Merge tag 'mac80211-next-for-davem-2015-04-10' of git://git.kernel.org/pub/scm/linux...
authorDavid S. Miller <davem@davemloft.net>
Mon, 13 Apr 2015 00:43:46 +0000 (20:43 -0400)
committerDavid S. Miller <davem@davemloft.net>
Mon, 13 Apr 2015 00:43:46 +0000 (20:43 -0400)
Johannes Berg says:

====================
There isn't much left, but we have
 * new mac80211 internal software queue to allow drivers to have
   shorter hardware queues and pull on-demand
 * use rhashtable for mac80211 station table
 * minstrel rate control debug improvements and some refactoring
 * fix noisy message about TX power reduction
 * fix continuous message printing and activity if CRDA doesn't respond
 * fix VHT-related capabilities with "iw connect" or "iwconfig ..."
 * fix Kconfig for cfg80211 wireless extensions compatibility
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
818 files changed:
Documentation/devicetree/bindings/net/dsa/dsa.txt
Documentation/devicetree/bindings/net/nfc/nxp-nci.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/stmmac.txt
Documentation/input/alps.txt
Documentation/input/event-codes.txt
Documentation/input/multi-touch-protocol.txt
Documentation/networking/can.txt
Documentation/networking/igb.txt
Documentation/networking/ixgb.txt
Documentation/networking/ixgbe.txt
MAINTAINERS
Makefile
arch/arc/kernel/signal.c
arch/arm/Kconfig
arch/arm/boot/dts/dm8168-evm.dts
arch/arm/boot/dts/dm816x.dtsi
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/omap3.dtsi
arch/arm/boot/dts/rk3288.dtsi
arch/arm/boot/dts/socfpga.dtsi
arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
arch/arm/boot/dts/sun4i-a10.dtsi
arch/arm/boot/dts/sun5i-a13.dtsi
arch/arm/boot/dts/sun7i-a20.dtsi
arch/arm/mach-omap2/id.c
arch/arm/mach-pxa/irq.c
arch/arm/mach-pxa/zeus.c
arch/arm/mach-sunxi/Kconfig
arch/arm/plat-omap/dmtimer.c
arch/arm64/boot/dts/arm/juno-clocks.dtsi
arch/arm64/include/asm/cmpxchg.h
arch/arm64/include/asm/mmu_context.h
arch/arm64/include/asm/percpu.h
arch/metag/include/asm/io.h
arch/metag/include/asm/pgtable-bits.h [new file with mode: 0644]
arch/metag/include/asm/pgtable.h
arch/parisc/include/asm/pgalloc.h
arch/parisc/kernel/syscall_table.S
arch/powerpc/include/asm/cputhreads.h
arch/powerpc/include/asm/ppc-opcode.h
arch/powerpc/include/asm/reg.h
arch/powerpc/kernel/cputable.c
arch/powerpc/kernel/dbell.c
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/platforms/powernv/smp.c
arch/powerpc/platforms/pseries/mobility.c
arch/s390/include/asm/elf.h
arch/s390/kernel/ftrace.c
arch/s390/kernel/perf_cpum_sf.c
arch/s390/kernel/swsusp_asm64.S
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/entry_64.S
arch/x86/kernel/kgdb.c
arch/x86/kernel/reboot.c
arch/x86/kvm/ioapic.c
arch/x86/kvm/lapic.c
arch/x86/kvm/vmx.c
arch/x86/xen/p2m.c
block/blk-merge.c
block/blk-mq-tag.c
block/blk-mq.c
block/blk-settings.c
crypto/af_alg.c
crypto/algif_skcipher.c
drivers/ata/libata-core.c
drivers/base/regmap/internal.h
drivers/base/regmap/regcache.c
drivers/base/regmap/regmap.c
drivers/bcma/Kconfig
drivers/bcma/bcma_private.h
drivers/bcma/driver_gpio.c
drivers/bcma/driver_pci.c
drivers/bcma/host_pci.c
drivers/block/nbd.c
drivers/block/nvme-core.c
drivers/bluetooth/Kconfig
drivers/bluetooth/Makefile
drivers/bluetooth/btbcm.c [new file with mode: 0644]
drivers/bluetooth/btbcm.h [new file with mode: 0644]
drivers/bluetooth/btintel.c [new file with mode: 0644]
drivers/bluetooth/btintel.h [new file with mode: 0644]
drivers/bluetooth/btusb.c
drivers/bluetooth/hci_ath.c
drivers/bluetooth/hci_bcm.c [new file with mode: 0644]
drivers/bluetooth/hci_bcsp.c
drivers/bluetooth/hci_h4.c
drivers/bluetooth/hci_h5.c
drivers/bluetooth/hci_intel.c [new file with mode: 0644]
drivers/bluetooth/hci_ldisc.c
drivers/bluetooth/hci_ll.c
drivers/bluetooth/hci_uart.h
drivers/clocksource/Kconfig
drivers/clocksource/timer-sun5i.c
drivers/dma/bcm2835-dma.c
drivers/dma/dma-jz4740.c
drivers/dma/edma.c
drivers/dma/moxart-dma.c
drivers/dma/omap-dma.c
drivers/firmware/dmi_scan.c
drivers/gpio/gpio-mpc8xxx.c
drivers/gpio/gpio-syscon.c
drivers/gpio/gpiolib-acpi.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_edid_load.c
drivers/gpu/drm/drm_probe_helper.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/radeon/cikd.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_bios.c
drivers/gpu/drm/radeon/radeon_mn.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/vce_v2_0.c
drivers/iio/accel/bma180.c
drivers/iio/accel/bmc150-accel.c
drivers/iio/accel/kxcjk-1013.c
drivers/iio/adc/Kconfig
drivers/iio/adc/at91_adc.c
drivers/iio/adc/ti_am335x_adc.c
drivers/iio/adc/vf610_adc.c
drivers/iio/gyro/bmg160.c
drivers/iio/imu/adis_trigger.c
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
drivers/iio/imu/kmx61.c
drivers/iio/industrialio-core.c
drivers/iio/industrialio-event.c
drivers/iio/proximity/sx9500.c
drivers/infiniband/core/umem.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx5/ah.c
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/doorbell.c
drivers/infiniband/hw/mlx5/mad.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mem.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/odp.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/mlx5/srq.c
drivers/infiniband/hw/mlx5/user.h
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
drivers/input/mouse/alps.c
drivers/input/mouse/synaptics.c
drivers/iommu/arm-smmu.c
drivers/iommu/intel-iommu.c
drivers/iommu/ipmmu-vmsa.c
drivers/irqchip/irq-gic-v3-its.c
drivers/lguest/Kconfig
drivers/md/dm.c
drivers/mfd/kempld-core.c
drivers/mfd/rtsx_usb.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_main.c
drivers/net/caif/caif_serial.c
drivers/net/can/flexcan.c
drivers/net/can/usb/ems_usb.c
drivers/net/can/usb/gs_usb.c
drivers/net/can/usb/kvaser_usb.c
drivers/net/can/usb/peak_usb/pcan_ucan.h
drivers/net/can/usb/peak_usb/pcan_usb_fd.c
drivers/net/dsa/mv88e6123_61_65.c
drivers/net/dsa/mv88e6131.c
drivers/net/dsa/mv88e6171.c
drivers/net/dsa/mv88e6352.c
drivers/net/dsa/mv88e6xxx.c
drivers/net/dsa/mv88e6xxx.h
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
drivers/net/ethernet/amd/xgbe/xgbe-main.c
drivers/net/ethernet/amd/xgbe/xgbe.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/chelsio/cxgb/cxgb2.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
drivers/net/ethernet/chelsio/cxgb3/sge.c
drivers/net/ethernet/chelsio/cxgb4/Makefile
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c [new file with mode: 0644]
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/intel/e100.c
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
drivers/net/ethernet/intel/i40e/i40e_fcoe.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40evf_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mellanox/mlx4/Makefile
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_main.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/fw.h
drivers/net/ethernet/mellanox/mlx4/fw_qos.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/fw_qos.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/mellanox/mlx4/qp.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/cq.c
drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/fw.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/mad.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/mcg.c
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
drivers/net/ethernet/mellanox/mlx5/core/mr.c
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
drivers/net/ethernet/mellanox/mlx5/core/pd.c
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/mellanox/mlx5/core/qp.c
drivers/net/ethernet/mellanox/mlx5/core/srq.c
drivers/net/ethernet/mellanox/mlx5/core/uar.c
drivers/net/ethernet/neterion/s2io.c
drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
drivers/net/ethernet/neterion/vxge/vxge-ethtool.h
drivers/net/ethernet/rocker/rocker.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/sun/sungem.c
drivers/net/ethernet/sun/sunhme.c
drivers/net/ethernet/sun/sunvnet.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ieee802154/at86rf230.c
drivers/net/ipvlan/ipvlan.h
drivers/net/ipvlan/ipvlan_core.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/macvlan.c
drivers/net/phy/at803x.c
drivers/net/phy/fixed_phy.c
drivers/net/usb/asix_common.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/r8152.c
drivers/net/usb/sr9800.c
drivers/net/usb/usbnet.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_ethtool.c
drivers/net/vxlan.c
drivers/net/wan/cosa.c
drivers/net/wan/lmc/lmc_main.c
drivers/net/wireless/ath/ar5523/ar5523.c
drivers/net/wireless/ath/ar5523/ar5523.h
drivers/net/wireless/ath/ath.h
drivers/net/wireless/ath/ath5k/ath5k.h
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath5k/reset.c
drivers/net/wireless/ath/ath9k/Makefile
drivers/net/wireless/ath/ath9k/ani.c
drivers/net/wireless/ath/ath9k/ar5008_phy.c
drivers/net/wireless/ath/ath9k/ar9002_calib.c
drivers/net/wireless/ath/ath9k/ar9003_aic.c [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/ar9003_aic.h [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/ar9003_hw.c
drivers/net/wireless/ath/ath9k/ar9003_mci.c
drivers/net/wireless/ath/ath9k/ar9003_phy.h
drivers/net/wireless/ath/ath9k/ar9003_rtt.c
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/beacon.c
drivers/net/wireless/ath/ath9k/btcoex.h
drivers/net/wireless/ath/ath9k/calib.c
drivers/net/wireless/ath/ath9k/common.h
drivers/net/wireless/ath/ath9k/dfs.c
drivers/net/wireless/ath/ath9k/eeprom.c
drivers/net/wireless/ath/ath9k/eeprom_4k.c
drivers/net/wireless/ath/ath9k/eeprom_def.c
drivers/net/wireless/ath/ath9k/gpio.c
drivers/net/wireless/ath/ath9k/htc.h
drivers/net/wireless/ath/ath9k/htc_drv_init.c
drivers/net/wireless/ath/ath9k/hw-ops.h
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/hw.h
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/ath/ath9k/reg.h
drivers/net/wireless/ath/ath9k/reg_aic.h [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/wmi.c
drivers/net/wireless/ath/ath9k/wmi.h
drivers/net/wireless/ath/dfs_pattern_detector.c
drivers/net/wireless/ath/wil6210/cfg80211.c
drivers/net/wireless/ath/wil6210/debugfs.c
drivers/net/wireless/ath/wil6210/main.c
drivers/net/wireless/ath/wil6210/netdev.c
drivers/net/wireless/ath/wil6210/pcie_bus.c
drivers/net/wireless/ath/wil6210/txrx.c
drivers/net/wireless/ath/wil6210/wil6210.h
drivers/net/wireless/ath/wil6210/wmi.c
drivers/net/wireless/ath/wil6210/wmi.h
drivers/net/wireless/b43/dma.c
drivers/net/wireless/b43/main.c
drivers/net/wireless/b43legacy/dma.c
drivers/net/wireless/b43legacy/rfkill.c
drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
drivers/net/wireless/brcm80211/brcmfmac/chip.c
drivers/net/wireless/brcm80211/brcmfmac/chip.h
drivers/net/wireless/brcm80211/brcmfmac/feature.c
drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h
drivers/net/wireless/brcm80211/brcmfmac/pcie.c
drivers/net/wireless/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/brcm80211/brcmsmac/main.c
drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
drivers/net/wireless/brcm80211/include/chipcommon.h
drivers/net/wireless/cw1200/cw1200_spi.c
drivers/net/wireless/iwlwifi/dvm/dev.h
drivers/net/wireless/iwlwifi/dvm/mac80211.c
drivers/net/wireless/iwlwifi/dvm/rs.c
drivers/net/wireless/iwlwifi/dvm/tx.c
drivers/net/wireless/iwlwifi/dvm/ucode.c
drivers/net/wireless/iwlwifi/iwl-7000.c
drivers/net/wireless/iwlwifi/iwl-8000.c
drivers/net/wireless/iwlwifi/iwl-config.h
drivers/net/wireless/iwlwifi/iwl-debug.h
drivers/net/wireless/iwlwifi/iwl-drv.c
drivers/net/wireless/iwlwifi/iwl-drv.h
drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
drivers/net/wireless/iwlwifi/iwl-fh.h
drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
drivers/net/wireless/iwlwifi/iwl-fw-file.h
drivers/net/wireless/iwlwifi/iwl-io.c
drivers/net/wireless/iwlwifi/iwl-modparams.h
drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
drivers/net/wireless/iwlwifi/iwl-op-mode.h
drivers/net/wireless/iwlwifi/iwl-phy-db.c
drivers/net/wireless/iwlwifi/iwl-prph.h
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/mvm/coex.c
drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
drivers/net/wireless/iwlwifi/mvm/d3.c
drivers/net/wireless/iwlwifi/mvm/debugfs.c
drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
drivers/net/wireless/iwlwifi/mvm/fw-api.h
drivers/net/wireless/iwlwifi/mvm/fw.c
drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/nvm.c
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
drivers/net/wireless/iwlwifi/mvm/power.c
drivers/net/wireless/iwlwifi/mvm/quota.c
drivers/net/wireless/iwlwifi/mvm/rs.c
drivers/net/wireless/iwlwifi/mvm/rx.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/sf.c
drivers/net/wireless/iwlwifi/mvm/sta.c
drivers/net/wireless/iwlwifi/mvm/sta.h
drivers/net/wireless/iwlwifi/mvm/time-event.c
drivers/net/wireless/iwlwifi/mvm/time-event.h
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/iwlwifi/mvm/utils.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/rx.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/wireless/libertas_tf/if_usb.c
drivers/net/wireless/mwifiex/11n.c
drivers/net/wireless/mwifiex/11n.h
drivers/net/wireless/mwifiex/11n_aggr.c
drivers/net/wireless/mwifiex/11n_rxreorder.c
drivers/net/wireless/mwifiex/cfg80211.c
drivers/net/wireless/mwifiex/decl.h
drivers/net/wireless/mwifiex/fw.h
drivers/net/wireless/mwifiex/init.c
drivers/net/wireless/mwifiex/main.c
drivers/net/wireless/mwifiex/main.h
drivers/net/wireless/mwifiex/pcie.c
drivers/net/wireless/mwifiex/sdio.c
drivers/net/wireless/mwifiex/sdio.h
drivers/net/wireless/mwifiex/sta_cmd.c
drivers/net/wireless/mwifiex/sta_cmdresp.c
drivers/net/wireless/mwifiex/sta_event.c
drivers/net/wireless/mwifiex/txrx.c
drivers/net/wireless/mwifiex/usb.c
drivers/net/wireless/mwifiex/util.c
drivers/net/wireless/mwifiex/wmm.c
drivers/net/wireless/mwifiex/wmm.h
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/wireless/rt2x00/rt2x00usb.h
drivers/net/wireless/rtlwifi/base.c
drivers/net/wireless/rtlwifi/base.h
drivers/net/wireless/rtlwifi/pci.c
drivers/net/wireless/rtlwifi/rc.c
drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
drivers/net/wireless/rtlwifi/rtl8192de/hw.c
drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
drivers/net/wireless/rtlwifi/rtl8192se/hw.c
drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
drivers/net/wireless/rtlwifi/rtl8723be/hw.c
drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
drivers/net/wireless/rtlwifi/rtl8821ae/trx.c
drivers/net/wireless/rtlwifi/stats.c
drivers/net/wireless/rtlwifi/stats.h
drivers/net/wireless/ti/wl1251/main.c
drivers/net/wireless/ti/wl18xx/debugfs.c
drivers/net/wireless/ti/wlcore/debugfs.h
drivers/net/xen-netfront.c
drivers/nfc/Kconfig
drivers/nfc/Makefile
drivers/nfc/microread/i2c.c
drivers/nfc/nfcmrvl/main.c
drivers/nfc/nfcmrvl/usb.c
drivers/nfc/nxp-nci/Kconfig [new file with mode: 0644]
drivers/nfc/nxp-nci/Makefile [new file with mode: 0644]
drivers/nfc/nxp-nci/core.c [new file with mode: 0644]
drivers/nfc/nxp-nci/firmware.c [new file with mode: 0644]
drivers/nfc/nxp-nci/i2c.c [new file with mode: 0644]
drivers/nfc/nxp-nci/nxp-nci.h [new file with mode: 0644]
drivers/nfc/pn533.c
drivers/nfc/pn544/i2c.c
drivers/nfc/port100.c
drivers/nfc/st21nfca/st21nfca.c
drivers/nfc/st21nfca/st21nfca_se.c
drivers/nfc/st21nfcb/i2c.c
drivers/nfc/st21nfcb/ndlc.c
drivers/nfc/st21nfcb/st21nfcb_se.c
drivers/of/address.c
drivers/regulator/palmas-regulator.c
drivers/rtc/rtc-mrst.c
drivers/scsi/ipr.c
drivers/scsi/libsas/sas_ata.c
drivers/spi/spi-dw-mid.c
drivers/spi/spi-qup.c
drivers/spi/spi.c
drivers/ssb/driver_pcicore.c
drivers/staging/iio/Kconfig
drivers/staging/iio/magnetometer/hmc5843_core.c
drivers/tty/serial/fsl_lpuart.c
drivers/tty/serial/samsung.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-pci.c
drivers/usb/isp1760/isp1760-udc.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/keyspan_pda.c
drivers/watchdog/imgpdc_wdt.c
drivers/watchdog/mtk_wdt.c
drivers/xen/Kconfig
drivers/xen/balloon.c
fs/affs/file.c
fs/cifs/cifsencrypt.c
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/compat_ioctl.c
fs/fs-writeback.c
fs/hfsplus/brec.c
fs/locks.c
fs/nfsd/blocklayout.c
fs/nfsd/blocklayoutxdr.c
fs/nfsd/nfs4layouts.c
fs/nfsd/nfs4proc.c
fs/nfsd/nfs4state.c
fs/nfsd/nfs4xdr.c
fs/nfsd/nfscache.c
include/linux/bcma/bcma.h
include/linux/bcma/bcma_driver_pci.h
include/linux/fs.h
include/linux/irqchip/arm-gic-v3.h
include/linux/jhash.h
include/linux/lcm.h
include/linux/libata.h
include/linux/mfd/palmas.h
include/linux/mlx4/cmd.h
include/linux/mlx4/device.h
include/linux/mlx4/qp.h
include/linux/mlx5/cmd.h
include/linux/mlx5/cq.h
include/linux/mlx5/device.h
include/linux/mlx5/doorbell.h
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mlx5/qp.h
include/linux/mlx5/srq.h
include/linux/mmc/sdio_ids.h
include/linux/netdevice.h
include/linux/netfilter.h
include/linux/netfilter_arp/arp_tables.h
include/linux/netfilter_bridge.h
include/linux/netfilter_ipv4/ip_tables.h
include/linux/netfilter_ipv6/ip6_tables.h
include/linux/phy_fixed.h
include/linux/platform_data/nxp-nci.h [new file with mode: 0644]
include/linux/regulator/driver.h
include/linux/sched.h
include/linux/skbuff.h
include/linux/sunrpc/debug.h
include/linux/tcp.h
include/linux/usb/usbnet.h
include/linux/writeback.h
include/net/bluetooth/bluetooth.h
include/net/bluetooth/hci.h
include/net/bluetooth/hci_core.h
include/net/dn_neigh.h
include/net/ip.h
include/net/ip6_route.h
include/net/ip6_tunnel.h
include/net/ip_tunnels.h
include/net/ipv6.h
include/net/mac802154.h
include/net/netfilter/nf_nat_l3proto.h
include/net/netfilter/nf_queue.h
include/net/netfilter/nf_tables.h
include/net/netfilter/nf_tables_core.h
include/net/netfilter/nf_tables_ipv4.h
include/net/netfilter/nf_tables_ipv6.h
include/net/nfc/hci.h
include/net/nfc/nci_core.h
include/net/nfc/nfc.h
include/net/rtnetlink.h
include/net/sock.h
include/net/tcp.h
include/net/udp_tunnel.h
include/net/vxlan.h
include/net/xfrm.h
include/trace/events/regmap.h
include/uapi/linux/bpf.h
include/uapi/linux/can/raw.h
include/uapi/linux/input.h
include/uapi/linux/netfilter/nf_tables.h
include/uapi/linux/nfsd/export.h
include/uapi/linux/rtnetlink.h
kernel/events/core.c
kernel/locking/lockdep.c
kernel/module.c
kernel/sched/core.c
kernel/sched/fair.c
kernel/sysctl.c
kernel/time/tick-broadcast-hrtimer.c
lib/lcm.c
lib/nlattr.c
lib/test_rhashtable.c
mm/huge_memory.c
mm/memory.c
mm/memory_hotplug.c
mm/mmap.c
mm/mprotect.c
mm/page-writeback.c
mm/page_isolation.c
mm/pagewalk.c
mm/rmap.c
mm/slub.c
net/8021q/vlan_dev.c
net/batman-adv/hard-interface.c
net/bluetooth/bnep/bnep.h
net/bluetooth/bnep/core.c
net/bluetooth/bnep/sock.c
net/bluetooth/cmtp/capi.c
net/bluetooth/cmtp/core.c
net/bluetooth/hci_core.c
net/bluetooth/hci_debugfs.c
net/bluetooth/hci_event.c
net/bluetooth/hci_request.c
net/bluetooth/hci_request.h
net/bluetooth/hci_sock.c
net/bluetooth/hidp/core.c
net/bluetooth/l2cap_core.c
net/bluetooth/l2cap_sock.c
net/bluetooth/mgmt.c
net/bluetooth/selftest.c
net/bluetooth/smp.c
net/bridge/br_forward.c
net/bridge/br_input.c
net/bridge/br_multicast.c
net/bridge/br_netfilter.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/bridge/br_stp_bpdu.c
net/bridge/netfilter/ebtable_filter.c
net/bridge/netfilter/ebtable_nat.c
net/bridge/netfilter/nf_tables_bridge.c
net/bridge/netfilter/nft_reject_bridge.c
net/can/raw.c
net/core/dev.c
net/core/fib_rules.c
net/core/filter.c
net/core/link_watch.c
net/core/net-sysfs.c
net/core/net_namespace.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock.c
net/decnet/dn_neigh.c
net/decnet/dn_nsp_in.c
net/decnet/dn_route.c
net/decnet/dn_rules.c
net/decnet/netfilter/dn_rtmsg.c
net/dsa/dsa.c
net/dsa/slave.c
net/ipv4/af_inet.c
net/ipv4/arp.c
net/ipv4/cipso_ipv4.c
net/ipv4/devinet.c
net/ipv4/esp4.c
net/ipv4/fib_frontend.c
net/ipv4/fib_rules.c
net/ipv4/fib_semantics.c
net/ipv4/fib_trie.c
net/ipv4/geneve.c
net/ipv4/gre_offload.c
net/ipv4/icmp.c
net/ipv4/igmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_fragment.c
net/ipv4/inet_hashtables.c
net/ipv4/inet_timewait_sock.c
net/ipv4/ip_forward.c
net/ipv4/ip_fragment.c
net/ipv4/ip_gre.c
net/ipv4/ip_input.c
net/ipv4/ip_options.c
net/ipv4/ip_output.c
net/ipv4/ip_sockglue.c
net/ipv4/ip_tunnel.c
net/ipv4/ip_tunnel_core.c
net/ipv4/ip_vti.c
net/ipv4/ipcomp.c
net/ipv4/ipconfig.c
net/ipv4/ipip.c
net/ipv4/ipmr.c
net/ipv4/netfilter.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/arptable_filter.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/netfilter/ipt_SYNPROXY.c
net/ipv4/netfilter/iptable_filter.c
net/ipv4/netfilter/iptable_mangle.c
net/ipv4/netfilter/iptable_nat.c
net/ipv4/netfilter/iptable_raw.c
net/ipv4/netfilter/iptable_security.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
net/ipv4/netfilter/nf_defrag_ipv4.c
net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
net/ipv4/netfilter/nf_reject_ipv4.c
net/ipv4/netfilter/nf_tables_arp.c
net/ipv4/netfilter/nf_tables_ipv4.c
net/ipv4/netfilter/nft_chain_nat_ipv4.c
net/ipv4/netfilter/nft_chain_route_ipv4.c
net/ipv4/netfilter/nft_reject_ipv4.c
net/ipv4/ping.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_diag.c
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_metrics.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c
net/ipv4/udp.c
net/ipv4/udp_diag.c
net/ipv4/udp_offload.c
net/ipv4/udp_tunnel.c
net/ipv4/xfrm4_input.c
net/ipv4/xfrm4_output.c
net/ipv4/xfrm4_policy.c
net/ipv6/addrconf.c
net/ipv6/fib6_rules.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_input.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_udp_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/ip6mr.c
net/ipv6/mcast.c
net/ipv6/ndisc.c
net/ipv6/netfilter.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/ip6t_SYNPROXY.c
net/ipv6/netfilter/ip6table_filter.c
net/ipv6/netfilter/ip6table_mangle.c
net/ipv6/netfilter/ip6table_nat.c
net/ipv6/netfilter/ip6table_raw.c
net/ipv6/netfilter/ip6table_security.c
net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
net/ipv6/netfilter/nf_reject_ipv6.c
net/ipv6/netfilter/nf_tables_ipv6.c
net/ipv6/netfilter/nft_chain_nat_ipv6.c
net/ipv6/netfilter/nft_chain_route_ipv6.c
net/ipv6/netfilter/nft_reject_ipv6.c
net/ipv6/output_core.c
net/ipv6/raw.c
net/ipv6/sit.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/ipv6/xfrm6_input.c
net/ipv6/xfrm6_output.c
net/iucv/af_iucv.c
net/l2tp/l2tp_core.c
net/mac80211/agg-rx.c
net/mac80211/rx.c
net/mac80211/sta_info.h
net/netfilter/Makefile
net/netfilter/core.c
net/netfilter/ipset/ip_set_hash_netiface.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_internals.h
net/netfilter/nf_log_common.c
net/netfilter/nf_queue.c
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_core.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue_core.c
net/netfilter/nft_compat.c
net/netfilter/nft_ct.c
net/netfilter/nft_dynset.c [new file with mode: 0644]
net/netfilter/nft_hash.c
net/netfilter/nft_lookup.c
net/netfilter/nft_meta.c
net/netfilter/xt_cgroup.c
net/netfilter/xt_physdev.c
net/netfilter/xt_socket.c
net/nfc/nci/core.c
net/openvswitch/vport-vxlan.c
net/openvswitch/vport.c
net/sched/sch_fq.c
net/sunrpc/clnt.c
net/sunrpc/debugfs.c
net/sunrpc/sunrpc_syms.c
net/sunrpc/xprt.c
net/tipc/bcast.c
net/tipc/core.c
net/tipc/link.c
net/tipc/link.h
net/tipc/msg.c
net/tipc/msg.h
net/tipc/node.c
net/tipc/udp_media.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_user.c
samples/bpf/Makefile
samples/bpf/bpf_helpers.h
samples/bpf/tcbpf1_kern.c [new file with mode: 0644]
security/selinux/hooks.c
security/selinux/nlmsgtab.c
security/selinux/selinuxfs.c
security/smack/smack_netfilter.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
tools/testing/selftests/Makefile
virt/kvm/kvm_main.c

index e124847443f87c9465ec82dc03ce792d2d1cf5c4..f0b4cd72411d66bf12aeec3d64335d6641b26674 100644 (file)
@@ -19,7 +19,9 @@ the parent DSA node. The maximum number of allowed child nodes is 4
 (DSA_MAX_SWITCHES).
 Each of these switch child nodes should have the following required properties:
 
-- reg                  : Describes the switch address on the MII bus
+- reg                  : Contains two fields. The first one describes the
+                         address on the MII bus. The second is the switch
+                         number that must be unique in cascaded configurations
 - #address-cells       : Must be 1
 - #size-cells          : Must be 0
 
diff --git a/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt b/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt
new file mode 100644 (file)
index 0000000..5b6cd9b
--- /dev/null
@@ -0,0 +1,35 @@
+* NXP Semiconductors NXP NCI NFC Controllers
+
+Required properties:
+- compatible: Should be "nxp,nxp-nci-i2c".
+- clock-frequency: I²C work frequency.
+- reg: address on the bus
+- interrupt-parent: phandle for the interrupt gpio controller
+- interrupts: GPIO interrupt to which the chip is connected
+- enable-gpios: Output GPIO pin used for enabling/disabling the chip
+- firmware-gpios: Output GPIO pin used to enter firmware download mode
+
+Optional SoC Specific Properties:
+- pinctrl-names: Contains only one value - "default".
+- pintctrl-0: Specifies the pin control groups used for this controller.
+
+Example (for ARM-based BeagleBone with NPC100 NFC controller on I2C2):
+
+&i2c2 {
+
+       status = "okay";
+
+       npc100: npc100@29 {
+
+               compatible = "nxp,nxp-nci-i2c";
+
+               reg = <0x29>;
+               clock-frequency = <100000>;
+
+               interrupt-parent = <&gpio1>;
+               interrupts = <29 GPIO_ACTIVE_HIGH>;
+
+               enable-gpios = <&gpio0 30 GPIO_ACTIVE_HIGH>;
+               firmware-gpios = <&gpio0 31 GPIO_ACTIVE_HIGH>;
+       };
+};
index 8ca65cec52ae8cc25e8231891a159c9525297391..29aca8591b163070211e29f02ea7608c19ca66a8 100644 (file)
@@ -35,10 +35,11 @@ Optional properties:
 - reset-names: Should contain the reset signal name "stmmaceth", if a
        reset phandle is given
 - max-frame-size: See ethernet.txt file in the same directory
-- clocks: If present, the first clock should be the GMAC main clock,
-  further clocks may be specified in derived bindings.
+- clocks: If present, the first clock should be the GMAC main clock and
+  the second clock should be peripheral's register interface clock. Further
+  clocks may be specified in derived bindings.
 - clock-names: One name for each entry in the clocks property, the
-  first one should be "stmmaceth".
+  first one should be "stmmaceth" and the second one should be "pclk".
 - clk_ptp_ref: this is the PTP reference clock; in case of the PTP is
   available this clock is used for programming the Timestamp Addend Register.
   If not passed then the system clock will be used and this is fine on some
index a63e5e013a8cddee63b1d3520dd1c2c73e80dc31..92ae734c00c348ab810373e0dc838a92462c932f 100644 (file)
@@ -114,6 +114,9 @@ ALPS Absolute Mode - Protocol Version 2
  byte 4:  0   y6   y5   y4   y3   y2   y1   y0
  byte 5:  0   z6   z5   z4   z3   z2   z1   z0
 
+Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for
+the DualPoint Stick.
+
 Dualpoint device -- interleaved packet format
 ---------------------------------------------
 
@@ -127,6 +130,11 @@ Dualpoint device -- interleaved packet format
  byte 7:    0   y6   y5   y4   y3   y2   y1   y0
  byte 8:    0   z6   z5   z4   z3   z2   z1   z0
 
+Devices which use the interleaving format normally send standard PS/2 mouse
+packets for the DualPoint Stick + ALPS Absolute Mode packets for the
+touchpad, switching to the interleaved packet format when both the stick and
+the touchpad are used at the same time.
+
 ALPS Absolute Mode - Protocol Version 3
 ---------------------------------------
 
index c587a966413e8597da3241f851db92f6e6df1d81..96705616f5820a6d48d6cfda497c7fcf30917e2a 100644 (file)
@@ -294,6 +294,12 @@ accordingly. This property does not affect kernel behavior.
 The kernel does not provide button emulation for such devices but treats
 them as any other INPUT_PROP_BUTTONPAD device.
 
+INPUT_PROP_ACCELEROMETER
+-------------------------
+Directional axes on this device (absolute and/or relative x, y, z) represent
+accelerometer data. All other axes retain their meaning. A device must not mix
+regular directional axes and accelerometer axes on the same event node.
+
 Guidelines:
 ==========
 The guidelines below ensure proper single-touch and multi-finger functionality.
index 7b4f59c09ee2301d077446f5f9ce9c2b96aa2551..b85d000faeb4067c9ab1ed06690105d459a40a4e 100644 (file)
@@ -312,9 +312,12 @@ ABS_MT_TOOL_TYPE
 
 The type of approaching tool. A lot of kernel drivers cannot distinguish
 between different tool types, such as a finger or a pen. In such cases, the
-event should be omitted. The protocol currently supports MT_TOOL_FINGER and
-MT_TOOL_PEN [2]. For type B devices, this event is handled by input core;
-drivers should instead use input_mt_report_slot_state().
+event should be omitted. The protocol currently supports MT_TOOL_FINGER,
+MT_TOOL_PEN, and MT_TOOL_PALM [2]. For type B devices, this event is handled
+by input core; drivers should instead use input_mt_report_slot_state().
+A contact's ABS_MT_TOOL_TYPE may change over time while still touching the
+device, because the firmware may not be able to determine which tool is being
+used when it first appears.
 
 ABS_MT_BLOB_ID
 
index 0a2859a8ee7ec66e6f2da253ca345b9d0f6d4ed1..5abad1e921ca810c1e765d1d84d1ca4a50ce5016 100644 (file)
@@ -22,7 +22,8 @@ This file contains
       4.1.3 RAW socket option CAN_RAW_LOOPBACK
       4.1.4 RAW socket option CAN_RAW_RECV_OWN_MSGS
       4.1.5 RAW socket option CAN_RAW_FD_FRAMES
-      4.1.6 RAW socket returned message flags
+      4.1.6 RAW socket option CAN_RAW_JOIN_FILTERS
+      4.1.7 RAW socket returned message flags
     4.2 Broadcast Manager protocol sockets (SOCK_DGRAM)
       4.2.1 Broadcast Manager operations
       4.2.2 Broadcast Manager message flags
@@ -601,7 +602,22 @@ solution for a couple of reasons:
   CAN FD frames by checking if the device maximum transfer unit is CANFD_MTU.
   The CAN device MTU can be retrieved e.g. with a SIOCGIFMTU ioctl() syscall.
 
-  4.1.6 RAW socket returned message flags
+  4.1.6 RAW socket option CAN_RAW_JOIN_FILTERS
+
+  The CAN_RAW socket can set multiple CAN identifier specific filters that
+  lead to multiple filters in the af_can.c filter processing. These filters
+  are indenpendent from each other which leads to logical OR'ed filters when
+  applied (see 4.1.1).
+
+  This socket option joines the given CAN filters in the way that only CAN
+  frames are passed to user space that matched *all* given CAN filters. The
+  semantic for the applied filters is therefore changed to a logical AND.
+
+  This is useful especially when the filterset is a combination of filters
+  where the CAN_INV_FILTER flag is set in order to notch single CAN IDs or
+  CAN ID ranges from the incoming traffic.
+
+  4.1.7 RAW socket returned message flags
 
   When using recvmsg() call, the msg->msg_flags may contain following flags:
 
index 43d3549366a09d6cfbb7e4b284bb01d04fd314a0..15534fdd09a88f97a8d9c20b72666d6eff93dd0d 100644 (file)
@@ -42,10 +42,10 @@ Additional Configurations
   Jumbo Frames
   ------------
   Jumbo Frames support is enabled by changing the MTU to a value larger than
-  the default of 1500.  Use the ifconfig command to increase the MTU size.
+  the default of 1500.  Use the ip command to increase the MTU size.
   For example:
 
-       ifconfig eth<x> mtu 9000 up
+       ip link set dev eth<x> mtu 9000
 
   This setting is not saved across reboots.
 
index 1e0c045e89f7dc12f5c2ccf016020e6e2fdb2857..9b4a10a1cf50b4513eecaa6e638051a356c34efa 100644 (file)
@@ -39,7 +39,7 @@ Channel Bonding documentation can be found in the Linux kernel source:
 
 The driver information previously displayed in the /proc filesystem is not
 supported in this release.  Alternatively, you can use ethtool (version 1.6
-or later), lspci, and ifconfig to obtain the same information.
+or later), lspci, and iproute2 to obtain the same information.
 
 Instructions on updating ethtool can be found in the section "Additional
 Configurations" later in this document.
@@ -90,7 +90,7 @@ select m for "Intel(R) PRO/10GbE support" located at:
 3. Assign an IP address to the interface by entering the following, where
    x is the interface number:
 
-     ifconfig ethx <IP_address>
+     ip addr add ethx <IP_address>
 
 4. Verify that the interface works. Enter the following, where <IP_address>
    is the IP address for another machine on the same subnet as the interface
@@ -177,7 +177,7 @@ NOTE: These changes are only suggestions, and serve as a starting point for
       tuning your network performance.
 
 The changes are made in three major ways, listed in order of greatest effect:
-- Use ifconfig to modify the mtu (maximum transmission unit) and the txqueuelen
+- Use ip link to modify the mtu (maximum transmission unit) and the txqueuelen
   parameter.
 - Use sysctl to modify /proc parameters (essentially kernel tuning)
 - Use setpci to modify the MMRBC field in PCI-X configuration space to increase
@@ -202,7 +202,7 @@ setpci -d 8086:1a48 e6.b=2e
 # to change as well.
 # set the txqueuelen
 # your ixgb adapter should be loaded as eth1 for this to work, change if needed
-ifconfig eth1 mtu 9000 txqueuelen 1000 up
+ip li set dev eth1 mtu 9000 txqueuelen 1000 up
 # call the sysctl utility to modify /proc/sys entries
 sysctl -p ./sysctl_ixgb.conf
 - END ixgb_perf.sh
@@ -297,10 +297,10 @@ Additional Configurations
   ------------
   The driver supports Jumbo Frames for all adapters. Jumbo Frames support is
   enabled by changing the MTU to a value larger than the default of 1500.
-  The maximum value for the MTU is 16114.  Use the ifconfig command to
+  The maximum value for the MTU is 16114.  Use the ip command to
   increase the MTU size.  For example:
 
-        ifconfig ethx mtu 9000 up
+        ip li set dev ethx mtu 9000
 
   The maximum MTU setting for Jumbo Frames is 16114.  This value coincides
   with the maximum Jumbo Frames size of 16128.
index 0ace6e776ac86d93c9f96f4ca1814eee71454624..6f0cb57b59c6b3713b3428c0dfadbbec46972ab5 100644 (file)
@@ -70,10 +70,10 @@ Avago      1000BASE-T SFP                                    ABCU-5710RZ
 82599-based adapters support all passive and active limiting direct attach
 cables that comply with SFF-8431 v4.1 and SFF-8472 v10.4 specifications.
 
-Laser turns off for SFP+ when ifconfig down
+Laser turns off for SFP+ when device is down
 -------------------------------------------
-"ifconfig down" turns off the laser for 82599-based SFP+ fiber adapters.
-"ifconfig up" turns on the laser.
+"ip link set down" turns off the laser for 82599-based SFP+ fiber adapters.
+"ip link set up" turns on the laser.
 
 
 82598-BASED ADAPTERS
@@ -213,13 +213,13 @@ Additional Configurations
   ------------
   The driver supports Jumbo Frames for all adapters. Jumbo Frames support is
   enabled by changing the MTU to a value larger than the default of 1500.
-  The maximum value for the MTU is 16110.  Use the ifconfig command to
+  The maximum value for the MTU is 16110.  Use the ip command to
   increase the MTU size.  For example:
 
-        ifconfig ethx mtu 9000 up
+        ip link set dev ethx mtu 9000
 
-  The maximum MTU setting for Jumbo Frames is 16110.  This value coincides
-  with the maximum Jumbo Frames size of 16128.
+  The maximum MTU setting for Jumbo Frames is 9710.  This value coincides
+  with the maximum Jumbo Frames size of 9728.
 
   Generic Receive Offload, aka GRO
   --------------------------------
index c2016557b2949f38ab66c4b46e7ee830308d4014..dcaa542524790f94e02c06f3faeb0f1c5a8be7f8 100644 (file)
@@ -637,8 +637,7 @@ F:      drivers/gpu/drm/radeon/radeon_kfd.h
 F:      include/uapi/linux/kfd_ioctl.h
 
 AMD MICROCODE UPDATE SUPPORT
-M:     Andreas Herrmann <herrmann.der.user@googlemail.com>
-L:     amd64-microcode@amd64.org
+M:     Borislav Petkov <bp@alien8.de>
 S:     Maintained
 F:     arch/x86/kernel/cpu/microcode/amd*
 
@@ -1186,7 +1185,7 @@ M:        Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-mvebu/
-F:     drivers/rtc/armada38x-rtc
+F:     drivers/rtc/rtc-armada38x.c
 
 ARM/Marvell Berlin SoC support
 M:     Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
@@ -1362,6 +1361,7 @@ F:        drivers/i2c/busses/i2c-rk3x.c
 F:     drivers/*/*rockchip*
 F:     drivers/*/*/*rockchip*
 F:     sound/soc/rockchip/
+N:     rockchip
 
 ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
 M:     Kukjin Kim <kgene@kernel.org>
@@ -1675,8 +1675,8 @@ F:        drivers/misc/eeprom/at24.c
 F:     include/linux/platform_data/at24.h
 
 ATA OVER ETHERNET (AOE) DRIVER
-M:     "Ed L. Cashin" <ecashin@coraid.com>
-W:     http://support.coraid.com/support/linux
+M:     "Ed L. Cashin" <ed.cashin@acm.org>
+W:     http://www.openaoe.org/
 S:     Supported
 F:     Documentation/aoe/
 F:     drivers/block/aoe/
@@ -3252,6 +3252,13 @@ S:       Maintained
 F:     Documentation/hwmon/dme1737
 F:     drivers/hwmon/dme1737.c
 
+DMI/SMBIOS SUPPORT
+M:     Jean Delvare <jdelvare@suse.de>
+S:     Maintained
+F:     drivers/firmware/dmi-id.c
+F:     drivers/firmware/dmi_scan.c
+F:     include/linux/dmi.h
+
 DOCKING STATION DRIVER
 M:     Shaohua Li <shaohua.li@intel.com>
 L:     linux-acpi@vger.kernel.org
@@ -5087,7 +5094,7 @@ S:        Supported
 F:     drivers/platform/x86/intel_menlow.c
 
 INTEL IA32 MICROCODE UPDATE SUPPORT
-M:     Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
+M:     Borislav Petkov <bp@alien8.de>
 S:     Maintained
 F:     arch/x86/kernel/cpu/microcode/core*
 F:     arch/x86/kernel/cpu/microcode/intel*
@@ -5128,22 +5135,21 @@ M:      Deepak Saxena <dsaxena@plexity.net>
 S:     Maintained
 F:     drivers/char/hw_random/ixp4xx-rng.c
 
-INTEL ETHERNET DRIVERS (e100/e1000/e1000e/fm10k/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e/i40evf)
+INTEL ETHERNET DRIVERS
 M:     Jeff Kirsher <jeffrey.t.kirsher@intel.com>
-M:     Jesse Brandeburg <jesse.brandeburg@intel.com>
-M:     Bruce Allan <bruce.w.allan@intel.com>
-M:     Carolyn Wyborny <carolyn.wyborny@intel.com>
-M:     Don Skidmore <donald.c.skidmore@intel.com>
-M:     Greg Rose <gregory.v.rose@intel.com>
-M:     Matthew Vick <matthew.vick@intel.com>
-M:     John Ronciak <john.ronciak@intel.com>
-M:     Mitch Williams <mitch.a.williams@intel.com>
-M:     Linux NICS <linux.nics@intel.com>
-L:     e1000-devel@lists.sourceforge.net
+R:     Jesse Brandeburg <jesse.brandeburg@intel.com>
+R:     Shannon Nelson <shannon.nelson@intel.com>
+R:     Carolyn Wyborny <carolyn.wyborny@intel.com>
+R:     Don Skidmore <donald.c.skidmore@intel.com>
+R:     Matthew Vick <matthew.vick@intel.com>
+R:     John Ronciak <john.ronciak@intel.com>
+R:     Mitch Williams <mitch.a.williams@intel.com>
+L:     intel-wired-lan@lists.osuosl.org
 W:     http://www.intel.com/support/feedback.htm
 W:     http://e1000.sourceforge.net/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net.git
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next.git
+Q:     http://patchwork.ozlabs.org/project/intel-wired-lan/list/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue.git
 S:     Supported
 F:     Documentation/networking/e100.txt
 F:     Documentation/networking/e1000.txt
@@ -6316,6 +6322,7 @@ F:        drivers/scsi/megaraid/
 
 MELLANOX ETHERNET DRIVER (mlx4_en)
 M:     Amir Vadai <amirv@mellanox.com>
+M:     Ido Shamay <idos@mellanox.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 W:     http://www.mellanox.com
@@ -6937,6 +6944,13 @@ S:       Supported
 F:     drivers/block/nvme*
 F:     include/linux/nvme.h
 
+NXP-NCI NFC DRIVER
+M:     Clément Perrochaud <clement.perrochaud@effinnov.com>
+R:     Charles Gorand <charles.gorand@effinnov.com>
+L:     linux-nfc@lists.01.org (moderated for non-subscribers)
+S:     Supported
+F:     drivers/nfc/nxp-nci
+
 NXP TDA998X DRM DRIVER
 M:     Russell King <rmk+kernel@arm.linux.org.uk>
 S:     Supported
index 14c722f9687766d5f672c74814c7c4298ea85110..54430f933b628ca99bdbc1e2bf5dd2570ca0354c 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc7
 NAME = Hurr durr I'ma sheep
 
 # *DOCUMENTATION*
index 114234e83caa25968e08302b5e61f14e152e2d16..edda76fae83f25219bd74c5487a458e29c765373 100644 (file)
@@ -67,7 +67,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
               sigset_t *set)
 {
        int err;
-       err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs,
+       err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), regs,
                             sizeof(sf->uc.uc_mcontext.regs.scratch));
        err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
 
@@ -83,7 +83,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
        if (!err)
                set_current_blocked(&set);
 
-       err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs),
+       err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch),
                                sizeof(sf->uc.uc_mcontext.regs.scratch));
 
        return err;
@@ -131,6 +131,15 @@ SYSCALL_DEFINE0(rt_sigreturn)
        /* Don't restart from sigreturn */
        syscall_wont_restart(regs);
 
+       /*
+        * Ensure that sigreturn always returns to user mode (in case the
+        * regs saved on user stack got fudged between save and sigreturn)
+        * Otherwise it is easy to panic the kernel with a custom
+        * signal handler and/or restorer which clobberes the status32/ret
+        * to return to a bogus location in kernel mode.
+        */
+       regs->status32 |= STATUS_U_MASK;
+
        return regs->r0;
 
 badframe:
@@ -229,8 +238,11 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
 
        /*
         * handler returns using sigreturn stub provided already by userpsace
+        * If not, nuke the process right away
         */
-       BUG_ON(!(ksig->ka.sa.sa_flags & SA_RESTORER));
+       if(!(ksig->ka.sa.sa_flags & SA_RESTORER))
+               return 1;
+
        regs->blink = (unsigned long)ksig->ka.sa.sa_restorer;
 
        /* User Stack for signal handler will be above the frame just carved */
@@ -296,12 +308,12 @@ static void
 handle_signal(struct ksignal *ksig, struct pt_regs *regs)
 {
        sigset_t *oldset = sigmask_to_save();
-       int ret;
+       int failed;
 
        /* Set up the stack frame */
-       ret = setup_rt_frame(ksig, oldset, regs);
+       failed = setup_rt_frame(ksig, oldset, regs);
 
-       signal_setup_done(ret, ksig, 0);
+       signal_setup_done(failed, ksig, 0);
 }
 
 void do_signal(struct pt_regs *regs)
index 9f1f09a2bc9bf28894777c08d6968d7faeba55a6..cf4c0c99aa253f3f69ecc08a07f0c5a695e63640 100644 (file)
@@ -619,6 +619,7 @@ config ARCH_PXA
        select GENERIC_CLOCKEVENTS
        select GPIO_PXA
        select HAVE_IDE
+       select IRQ_DOMAIN
        select MULTI_IRQ_HANDLER
        select PLAT_PXA
        select SPARSE_IRQ
index d3a29c1b841727f58200a37ac14b03e96a18cdd2..afe678f6d2e950308b74e403d26a3cc326c2b760 100644 (file)
                >;
        };
 
+       mmc_pins: pinmux_mmc_pins {
+               pinctrl-single,pins = <
+                       DM816X_IOPAD(0x0a70, MUX_MODE0)                 /* SD_POW */
+                       DM816X_IOPAD(0x0a74, MUX_MODE0)                 /* SD_CLK */
+                       DM816X_IOPAD(0x0a78, MUX_MODE0)                 /* SD_CMD */
+                       DM816X_IOPAD(0x0a7C, MUX_MODE0)                 /* SD_DAT0 */
+                       DM816X_IOPAD(0x0a80, MUX_MODE0)                 /* SD_DAT1 */
+                       DM816X_IOPAD(0x0a84, MUX_MODE0)                 /* SD_DAT2 */
+                       DM816X_IOPAD(0x0a88, MUX_MODE0)                 /* SD_DAT2 */
+                       DM816X_IOPAD(0x0a8c, MUX_MODE2)                 /* GP1[7] */
+                       DM816X_IOPAD(0x0a90, MUX_MODE2)                 /* GP1[8] */
+               >;
+       };
+
        usb0_pins: pinmux_usb0_pins {
                pinctrl-single,pins = <
                        DM816X_IOPAD(0x0d00, MUX_MODE0)                 /* USB0_DRVVBUS */
 };
 
 &mmc1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&mmc_pins>;
        vmmc-supply = <&vmmcsd_fixed>;
+       bus-width = <4>;
+       cd-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio2 8 GPIO_ACTIVE_LOW>;
 };
 
 /* At least dm8168-evm rev c won't support multipoint, later may */
index 3c97b5f2addc12a86639ec7b8e63e92349831236..f35715bc69922591577299730ecf1a105c6642de 100644 (file)
                };
 
                gpio1: gpio@48032000 {
-                       compatible = "ti,omap3-gpio";
+                       compatible = "ti,omap4-gpio";
                        ti,hwmods = "gpio1";
+                       ti,gpio-always-on;
                        reg = <0x48032000 0x1000>;
-                       interrupts = <97>;
+                       interrupts = <96>;
+                       gpio-controller;
+                       #gpio-cells = <2>;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
                };
 
                gpio2: gpio@4804c000 {
-                       compatible = "ti,omap3-gpio";
+                       compatible = "ti,omap4-gpio";
                        ti,hwmods = "gpio2";
+                       ti,gpio-always-on;
                        reg = <0x4804c000 0x1000>;
-                       interrupts = <99>;
+                       interrupts = <98>;
+                       gpio-controller;
+                       #gpio-cells = <2>;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
                };
 
                gpmc: gpmc@50000000 {
index 127608d79033e32e76143b0707918134f6450283..c4659a979c41387558f8ea660d119568645acbcb 100644 (file)
                                              "wkupclk", "refclk",
                                              "div-clk", "phy-div";
                                #phy-cells = <0>;
-                               ti,hwmods = "pcie1-phy";
                        };
 
                        pcie2_phy: pciephy@4a095000 {
                                              "wkupclk", "refclk",
                                              "div-clk", "phy-div";
                                #phy-cells = <0>;
-                               ti,hwmods = "pcie2-phy";
                                status = "disabled";
                        };
                };
index f4f78c40b56450160ba566cc25bcb7fac9ca489b..3fdc84fddb70d06a2ae26db02fc2c9c9f6d8ee63 100644 (file)
@@ -92,6 +92,8 @@
                        ti,hwmods = "aes";
                        reg = <0x480c5000 0x50>;
                        interrupts = <0>;
+                       dmas = <&sdma 65 &sdma 66>;
+                       dma-names = "tx", "rx";
                };
 
                prm: prm@48306000 {
                        ti,hwmods = "sham";
                        reg = <0x480c3000 0x64>;
                        interrupts = <49>;
+                       dmas = <&sdma 69>;
+                       dma-names = "rx";
                };
 
                smartreflex_core: smartreflex@480cb000 {
index d771f687a13b50abad6f87ad6fb476e1d4377da9..eccc78d3220ba2e59e2994dfb4f5f20d3d3f7a91 100644 (file)
                        "mac_clk_rx", "mac_clk_tx",
                        "clk_mac_ref", "clk_mac_refout",
                        "aclk_mac", "pclk_mac";
+               status = "disabled";
        };
 
        usb_host0_ehci: usb@ff500000 {
index 9d87609567523efd0c0e75033e8bf9e73079d6bd..d9176e6061731b7c42ee792338fab3509482c603 100644 (file)
                        #address-cells = <1>;
                        #size-cells = <0>;
                        reg = <0xfff01000 0x1000>;
-                       interrupts = <0 156 4>;
+                       interrupts = <0 155 4>;
                        num-cs = <4>;
                        clocks = <&spi_m_clk>;
                        status = "disabled";
index ab7891c43231de889f40efb79f7a064577c2d6ec..75742f8f96f3d1800d803a7fc2b2d3bd352413f5 100644 (file)
        model = "Olimex A10-OLinuXino-LIME";
        compatible = "olimex,a10-olinuxino-lime", "allwinner,sun4i-a10";
 
+       cpus {
+               cpu0: cpu@0 {
+                       /*
+                        * The A10-Lime is known to be unstable
+                        * when running at 1008 MHz
+                        */
+                       operating-points = <
+                               /* kHz    uV */
+                               912000  1350000
+                               864000  1300000
+                               624000  1250000
+                               >;
+                       cooling-max-level = <2>;
+               };
+       };
+
        soc@01c00000 {
                emac: ethernet@01c0b000 {
                        pinctrl-names = "default";
index 5c2925831f2038318258003ae6cd3736da71c0de..eebb7853e00bbad39916e804453929a2935cc831 100644 (file)
@@ -75,7 +75,6 @@
                        clock-latency = <244144>; /* 8 32k periods */
                        operating-points = <
                                /* kHz    uV */
-                               1056000 1500000
                                1008000 1400000
                                912000  1350000
                                864000  1300000
@@ -83,7 +82,7 @@
                                >;
                        #cooling-cells = <2>;
                        cooling-min-level = <0>;
-                       cooling-max-level = <4>;
+                       cooling-max-level = <3>;
                };
        };
 
index f8818f1edbbef27f16adadc139b8e46ae49823ad..883cb4873688f2f80ce3036ca2a60f338aa4bb67 100644 (file)
@@ -47,7 +47,6 @@
                        clock-latency = <244144>; /* 8 32k periods */
                        operating-points = <
                                /* kHz    uV */
-                               1104000 1500000
                                1008000 1400000
                                912000  1350000
                                864000  1300000
@@ -57,7 +56,7 @@
                                >;
                        #cooling-cells = <2>;
                        cooling-min-level = <0>;
-                       cooling-max-level = <6>;
+                       cooling-max-level = <5>;
                };
        };
 
index 3a8530b79f1c46200d2b7ee8bbe343198c7106d8..fdd181792b4beeb553ab55e275361d6bd6ecab07 100644 (file)
                        clock-latency = <244144>; /* 8 32k periods */
                        operating-points = <
                                /* kHz    uV */
-                               1008000 1450000
                                960000  1400000
                                912000  1400000
                                864000  1300000
                                >;
                        #cooling-cells = <2>;
                        cooling-min-level = <0>;
-                       cooling-max-level = <7>;
+                       cooling-max-level = <6>;
                };
 
                cpu@1 {
index 2a2f4d56e4c85ea599b295a10d09c2bf922bdc79..25f1beea453e0252234b3c62d7f53de75e6ed3f3 100644 (file)
@@ -720,6 +720,8 @@ static const char * __init omap_get_family(void)
                return kasprintf(GFP_KERNEL, "OMAP4");
        else if (soc_is_omap54xx())
                return kasprintf(GFP_KERNEL, "OMAP5");
+       else if (soc_is_am33xx() || soc_is_am335x())
+               return kasprintf(GFP_KERNEL, "AM33xx");
        else if (soc_is_am43xx())
                return kasprintf(GFP_KERNEL, "AM43xx");
        else if (soc_is_dra7xx())
index 0eecd83c624e3d98573d542efb834e7371cffc00..89a7c06570d3adf248a00bf07c3c3aff631d23cf 100644 (file)
@@ -11,6 +11,7 @@
  *  it under the terms of the GNU General Public License version 2 as
  *  published by the Free Software Foundation.
  */
+#include <linux/bitops.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
@@ -40,7 +41,6 @@
 #define ICHP_VAL_IRQ           (1 << 31)
 #define ICHP_IRQ(i)            (((i) >> 16) & 0x7fff)
 #define IPR_VALID              (1 << 31)
-#define IRQ_BIT(n)             (((n) - PXA_IRQ(0)) & 0x1f)
 
 #define MAX_INTERNAL_IRQS      128
 
@@ -51,6 +51,7 @@
 static void __iomem *pxa_irq_base;
 static int pxa_internal_irq_nr;
 static bool cpu_has_ipr;
+static struct irq_domain *pxa_irq_domain;
 
 static inline void __iomem *irq_base(int i)
 {
@@ -66,18 +67,20 @@ static inline void __iomem *irq_base(int i)
 void pxa_mask_irq(struct irq_data *d)
 {
        void __iomem *base = irq_data_get_irq_chip_data(d);
+       irq_hw_number_t irq = irqd_to_hwirq(d);
        uint32_t icmr = __raw_readl(base + ICMR);
 
-       icmr &= ~(1 << IRQ_BIT(d->irq));
+       icmr &= ~BIT(irq & 0x1f);
        __raw_writel(icmr, base + ICMR);
 }
 
 void pxa_unmask_irq(struct irq_data *d)
 {
        void __iomem *base = irq_data_get_irq_chip_data(d);
+       irq_hw_number_t irq = irqd_to_hwirq(d);
        uint32_t icmr = __raw_readl(base + ICMR);
 
-       icmr |= 1 << IRQ_BIT(d->irq);
+       icmr |= BIT(irq & 0x1f);
        __raw_writel(icmr, base + ICMR);
 }
 
@@ -118,40 +121,63 @@ asmlinkage void __exception_irq_entry ichp_handle_irq(struct pt_regs *regs)
        } while (1);
 }
 
-void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int))
+static int pxa_irq_map(struct irq_domain *h, unsigned int virq,
+                      irq_hw_number_t hw)
 {
-       int irq, i, n;
+       void __iomem *base = irq_base(hw / 32);
 
-       BUG_ON(irq_nr > MAX_INTERNAL_IRQS);
+       /* initialize interrupt priority */
+       if (cpu_has_ipr)
+               __raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw));
+
+       irq_set_chip_and_handler(virq, &pxa_internal_irq_chip,
+                                handle_level_irq);
+       irq_set_chip_data(virq, base);
+       set_irq_flags(virq, IRQF_VALID);
+
+       return 0;
+}
+
+static struct irq_domain_ops pxa_irq_ops = {
+       .map    = pxa_irq_map,
+       .xlate  = irq_domain_xlate_onecell,
+};
+
+static __init void
+pxa_init_irq_common(struct device_node *node, int irq_nr,
+                   int (*fn)(struct irq_data *, unsigned int))
+{
+       int n;
 
        pxa_internal_irq_nr = irq_nr;
-       cpu_has_ipr = !cpu_is_pxa25x();
-       pxa_irq_base = io_p2v(0x40d00000);
+       pxa_irq_domain = irq_domain_add_legacy(node, irq_nr,
+                                              PXA_IRQ(0), 0,
+                                              &pxa_irq_ops, NULL);
+       if (!pxa_irq_domain)
+               panic("Unable to add PXA IRQ domain\n");
+       irq_set_default_host(pxa_irq_domain);
 
        for (n = 0; n < irq_nr; n += 32) {
                void __iomem *base = irq_base(n >> 5);
 
                __raw_writel(0, base + ICMR);   /* disable all IRQs */
                __raw_writel(0, base + ICLR);   /* all IRQs are IRQ, not FIQ */
-               for (i = n; (i < (n + 32)) && (i < irq_nr); i++) {
-                       /* initialize interrupt priority */
-                       if (cpu_has_ipr)
-                               __raw_writel(i | IPR_VALID, pxa_irq_base + IPR(i));
-
-                       irq = PXA_IRQ(i);
-                       irq_set_chip_and_handler(irq, &pxa_internal_irq_chip,
-                                                handle_level_irq);
-                       irq_set_chip_data(irq, base);
-                       set_irq_flags(irq, IRQF_VALID);
-               }
        }
-
        /* only unmasked interrupts kick us out of idle */
        __raw_writel(1, irq_base(0) + ICCR);
 
        pxa_internal_irq_chip.irq_set_wake = fn;
 }
 
+void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int))
+{
+       BUG_ON(irq_nr > MAX_INTERNAL_IRQS);
+
+       pxa_irq_base = io_p2v(0x40d00000);
+       cpu_has_ipr = !cpu_is_pxa25x();
+       pxa_init_irq_common(NULL, irq_nr, fn);
+}
+
 #ifdef CONFIG_PM
 static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32];
 static unsigned long saved_ipr[MAX_INTERNAL_IRQS];
@@ -203,30 +229,6 @@ struct syscore_ops pxa_irq_syscore_ops = {
 };
 
 #ifdef CONFIG_OF
-static struct irq_domain *pxa_irq_domain;
-
-static int pxa_irq_map(struct irq_domain *h, unsigned int virq,
-                      irq_hw_number_t hw)
-{
-       void __iomem *base = irq_base(hw / 32);
-
-       /* initialize interrupt priority */
-       if (cpu_has_ipr)
-               __raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw));
-
-       irq_set_chip_and_handler(hw, &pxa_internal_irq_chip,
-                                handle_level_irq);
-       irq_set_chip_data(hw, base);
-       set_irq_flags(hw, IRQF_VALID);
-
-       return 0;
-}
-
-static struct irq_domain_ops pxa_irq_ops = {
-       .map    = pxa_irq_map,
-       .xlate  = irq_domain_xlate_onecell,
-};
-
 static const struct of_device_id intc_ids[] __initconst = {
        { .compatible = "marvell,pxa-intc", },
        {}
@@ -236,7 +238,7 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int))
 {
        struct device_node *node;
        struct resource res;
-       int n, ret;
+       int ret;
 
        node = of_find_matching_node(NULL, intc_ids);
        if (!node) {
@@ -267,23 +269,6 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int))
                return;
        }
 
-       pxa_irq_domain = irq_domain_add_legacy(node, pxa_internal_irq_nr, 0, 0,
-                                              &pxa_irq_ops, NULL);
-       if (!pxa_irq_domain)
-               panic("Unable to add PXA IRQ domain\n");
-
-       irq_set_default_host(pxa_irq_domain);
-
-       for (n = 0; n < pxa_internal_irq_nr; n += 32) {
-               void __iomem *base = irq_base(n >> 5);
-
-               __raw_writel(0, base + ICMR);   /* disable all IRQs */
-               __raw_writel(0, base + ICLR);   /* all IRQs are IRQ, not FIQ */
-       }
-
-       /* only unmasked interrupts kick us out of idle */
-       __raw_writel(1, irq_base(0) + ICCR);
-
-       pxa_internal_irq_chip.irq_set_wake = fn;
+       pxa_init_irq_common(node, pxa_internal_irq_nr, fn);
 }
 #endif /* CONFIG_OF */
index 205f9bf3821e30236d579021f09d1157a6c3fee5..ac2ae5c71ab45b7428440ee8925b121533ba9509 100644 (file)
@@ -412,7 +412,7 @@ static struct fixed_voltage_config can_regulator_pdata = {
 };
 
 static struct platform_device can_regulator_device = {
-       .name   = "reg-fixed-volage",
+       .name   = "reg-fixed-voltage",
        .id     = 0,
        .dev    = {
                .platform_data  = &can_regulator_pdata,
index a77604fbaf257acef8e2c546dd267a5e2ef501af..81502b90dd9130240bd716d4bfb866b9d5ac5efe 100644 (file)
@@ -1,10 +1,12 @@
 menuconfig ARCH_SUNXI
        bool "Allwinner SoCs" if ARCH_MULTI_V7
        select ARCH_REQUIRE_GPIOLIB
+       select ARCH_HAS_RESET_CONTROLLER
        select CLKSRC_MMIO
        select GENERIC_IRQ_CHIP
        select PINCTRL
        select SUN4I_TIMER
+       select RESET_CONTROLLER
 
 if ARCH_SUNXI
 
@@ -20,10 +22,8 @@ config MACH_SUN5I
 config MACH_SUN6I
        bool "Allwinner A31 (sun6i) SoCs support"
        default ARCH_SUNXI
-       select ARCH_HAS_RESET_CONTROLLER
        select ARM_GIC
        select MFD_SUN6I_PRCM
-       select RESET_CONTROLLER
        select SUN5I_HSTIMER
 
 config MACH_SUN7I
@@ -37,16 +37,12 @@ config MACH_SUN7I
 config MACH_SUN8I
        bool "Allwinner A23 (sun8i) SoCs support"
        default ARCH_SUNXI
-       select ARCH_HAS_RESET_CONTROLLER
        select ARM_GIC
        select MFD_SUN6I_PRCM
-       select RESET_CONTROLLER
 
 config MACH_SUN9I
        bool "Allwinner (sun9i) SoCs support"
        default ARCH_SUNXI
-       select ARCH_HAS_RESET_CONTROLLER
        select ARM_GIC
-       select RESET_CONTROLLER
 
 endif
index db10169a08de78980d0ad17b71224f96bbfb1d5c..8ca94d379bc35f2020dea0ea3e708b0fbb0bb827 100644 (file)
@@ -799,6 +799,7 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        const struct of_device_id *match;
        const struct dmtimer_platform_data *pdata;
+       int ret;
 
        match = of_match_device(of_match_ptr(omap_timer_match), dev);
        pdata = match ? match->data : dev->platform_data;
@@ -860,7 +861,12 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
        }
 
        if (!timer->reserved) {
-               pm_runtime_get_sync(dev);
+               ret = pm_runtime_get_sync(dev);
+               if (ret < 0) {
+                       dev_err(dev, "%s: pm_runtime_get_sync failed!\n",
+                               __func__);
+                       goto err_get_sync;
+               }
                __omap_dm_timer_init_regs(timer);
                pm_runtime_put(dev);
        }
@@ -873,6 +879,11 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
        dev_dbg(dev, "Device Probed.\n");
 
        return 0;
+
+err_get_sync:
+       pm_runtime_put_noidle(dev);
+       pm_runtime_disable(dev);
+       return ret;
 }
 
 /**
@@ -899,6 +910,8 @@ static int omap_dm_timer_remove(struct platform_device *pdev)
                }
        spin_unlock_irqrestore(&dm_timer_lock, flags);
 
+       pm_runtime_disable(&pdev->dev);
+
        return ret;
 }
 
index ea2b5666a16f5a3e6b194559e21fc2538f58b7f0..c9b89efe0f562a9dd7cd1a60126f42cb8948ff23 100644 (file)
@@ -8,7 +8,7 @@
  */
 
        /* SoC fixed clocks */
-       soc_uartclk: refclk72738khz {
+       soc_uartclk: refclk7273800hz {
                compatible = "fixed-clock";
                #clock-cells = <0>;
                clock-frequency = <7273800>;
index cb9593079f29763c34f7e68fa89737355ac03adb..d8c25b7b18fbf42ddc66ab888fc22c530d752d15 100644 (file)
@@ -246,14 +246,30 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
        __ret; \
 })
 
-#define this_cpu_cmpxchg_1(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
-#define this_cpu_cmpxchg_2(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
-#define this_cpu_cmpxchg_4(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
-#define this_cpu_cmpxchg_8(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
-
-#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
-       cmpxchg_double_local(raw_cpu_ptr(&(ptr1)), raw_cpu_ptr(&(ptr2)), \
-                               o1, o2, n1, n2)
+#define _protect_cmpxchg_local(pcp, o, n)                      \
+({                                                             \
+       typeof(*raw_cpu_ptr(&(pcp))) __ret;                     \
+       preempt_disable();                                      \
+       __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n);       \
+       preempt_enable();                                       \
+       __ret;                                                  \
+})
+
+#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+
+#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2)          \
+({                                                                     \
+       int __ret;                                                      \
+       preempt_disable();                                              \
+       __ret = cmpxchg_double_local(   raw_cpu_ptr(&(ptr1)),           \
+                                       raw_cpu_ptr(&(ptr2)),           \
+                                       o1, o2, n1, n2);                \
+       preempt_enable();                                               \
+       __ret;                                                          \
+})
 
 #define cmpxchg64(ptr,o,n)             cmpxchg((ptr),(o),(n))
 #define cmpxchg64_local(ptr,o,n)       cmpxchg_local((ptr),(o),(n))
index a9eee33dfa62dc031ab8262c275eba79f8609bac..101a42bde728a8b9547bca989b696d473dcd7e42 100644 (file)
@@ -151,6 +151,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
 {
        unsigned int cpu = smp_processor_id();
 
+       /*
+        * init_mm.pgd does not contain any user mappings and it is always
+        * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
+        */
+       if (next == &init_mm) {
+               cpu_set_reserved_ttbr0();
+               return;
+       }
+
        if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
                check_and_switch_context(next, tsk);
 }
index 09da25bc596fd0bdccdf03e94b37a4c81e0cc633..4fde8c1df97ffb46d9d2039a11cf574d05ed5a92 100644 (file)
@@ -204,25 +204,47 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
        return ret;
 }
 
+#define _percpu_read(pcp)                                              \
+({                                                                     \
+       typeof(pcp) __retval;                                           \
+       preempt_disable();                                              \
+       __retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)),      \
+                                             sizeof(pcp));             \
+       preempt_enable();                                               \
+       __retval;                                                       \
+})
+
+#define _percpu_write(pcp, val)                                                \
+do {                                                                   \
+       preempt_disable();                                              \
+       __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val),       \
+                               sizeof(pcp));                           \
+       preempt_enable();                                               \
+} while(0)                                                             \
+
+#define _pcp_protect(operation, pcp, val)                      \
+({                                                             \
+       typeof(pcp) __retval;                                   \
+       preempt_disable();                                      \
+       __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)),  \
+                                         (val), sizeof(pcp));  \
+       preempt_enable();                                       \
+       __retval;                                               \
+})
+
 #define _percpu_add(pcp, val) \
-       __percpu_add(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
+       _pcp_protect(__percpu_add, pcp, val)
 
-#define _percpu_add_return(pcp, val) (typeof(pcp)) (_percpu_add(pcp, val))
+#define _percpu_add_return(pcp, val) _percpu_add(pcp, val)
 
 #define _percpu_and(pcp, val) \
-       __percpu_and(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
+       _pcp_protect(__percpu_and, pcp, val)
 
 #define _percpu_or(pcp, val) \
-       __percpu_or(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
-
-#define _percpu_read(pcp) (typeof(pcp))        \
-       (__percpu_read(raw_cpu_ptr(&(pcp)), sizeof(pcp)))
-
-#define _percpu_write(pcp, val) \
-       __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp))
+       _pcp_protect(__percpu_or, pcp, val)
 
 #define _percpu_xchg(pcp, val) (typeof(pcp)) \
-       (__percpu_xchg(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp)))
+       _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val))
 
 #define this_cpu_add_1(pcp, val) _percpu_add(pcp, val)
 #define this_cpu_add_2(pcp, val) _percpu_add(pcp, val)
index 9359e504844258b12d2efd9c752859813210dd1a..d5779b0ec5730a01963a4a86940d1f5eb500084e 100644 (file)
@@ -2,6 +2,7 @@
 #define _ASM_METAG_IO_H
 
 #include <linux/types.h>
+#include <asm/pgtable-bits.h>
 
 #define IO_SPACE_LIMIT  0
 
diff --git a/arch/metag/include/asm/pgtable-bits.h b/arch/metag/include/asm/pgtable-bits.h
new file mode 100644 (file)
index 0000000..25ba672
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+ * Meta page table definitions.
+ */
+
+#ifndef _METAG_PGTABLE_BITS_H
+#define _METAG_PGTABLE_BITS_H
+
+#include <asm/metag_mem.h>
+
+/*
+ * Definitions for MMU descriptors
+ *
+ * These are the hardware bits in the MMCU pte entries.
+ * Derived from the Meta toolkit headers.
+ */
+#define _PAGE_PRESENT          MMCU_ENTRY_VAL_BIT
+#define _PAGE_WRITE            MMCU_ENTRY_WR_BIT
+#define _PAGE_PRIV             MMCU_ENTRY_PRIV_BIT
+/* Write combine bit - this can cause writes to occur out of order */
+#define _PAGE_WR_COMBINE       MMCU_ENTRY_WRC_BIT
+/* Sys coherent bit - this bit is never used by Linux */
+#define _PAGE_SYS_COHERENT     MMCU_ENTRY_SYS_BIT
+#define _PAGE_ALWAYS_ZERO_1    0x020
+#define _PAGE_CACHE_CTRL0      0x040
+#define _PAGE_CACHE_CTRL1      0x080
+#define _PAGE_ALWAYS_ZERO_2    0x100
+#define _PAGE_ALWAYS_ZERO_3    0x200
+#define _PAGE_ALWAYS_ZERO_4    0x400
+#define _PAGE_ALWAYS_ZERO_5    0x800
+
+/* These are software bits that we stuff into the gaps in the hardware
+ * pte entries that are not used.  Note, these DO get stored in the actual
+ * hardware, but the hardware just does not use them.
+ */
+#define _PAGE_ACCESSED         _PAGE_ALWAYS_ZERO_1
+#define _PAGE_DIRTY            _PAGE_ALWAYS_ZERO_2
+
+/* Pages owned, and protected by, the kernel. */
+#define _PAGE_KERNEL           _PAGE_PRIV
+
+/* No cacheing of this page */
+#define _PAGE_CACHE_WIN0       (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S)
+/* burst cacheing - good for data streaming */
+#define _PAGE_CACHE_WIN1       (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S)
+/* One cache way per thread */
+#define _PAGE_CACHE_WIN2       (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S)
+/* Full on cacheing */
+#define _PAGE_CACHE_WIN3       (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S)
+
+#define _PAGE_CACHEABLE                (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE)
+
+/* which bits are used for cache control ... */
+#define _PAGE_CACHE_MASK       (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \
+                                _PAGE_WR_COMBINE)
+
+/* This is a mask of the bits that pte_modify is allowed to change. */
+#define _PAGE_CHG_MASK         (PAGE_MASK)
+
+#define _PAGE_SZ_SHIFT         1
+#define _PAGE_SZ_4K            (0x0)
+#define _PAGE_SZ_8K            (0x1 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_16K           (0x2 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_32K           (0x3 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_64K           (0x4 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_128K          (0x5 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_256K          (0x6 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_512K          (0x7 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_1M            (0x8 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_2M            (0x9 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_4M            (0xa << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_MASK          (0xf << _PAGE_SZ_SHIFT)
+
+#if defined(CONFIG_PAGE_SIZE_4K)
+#define _PAGE_SZ               (_PAGE_SZ_4K)
+#elif defined(CONFIG_PAGE_SIZE_8K)
+#define _PAGE_SZ               (_PAGE_SZ_8K)
+#elif defined(CONFIG_PAGE_SIZE_16K)
+#define _PAGE_SZ               (_PAGE_SZ_16K)
+#endif
+#define _PAGE_TABLE            (_PAGE_SZ | _PAGE_PRESENT)
+
+#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
+# define _PAGE_SZHUGE          (_PAGE_SZ_8K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
+# define _PAGE_SZHUGE          (_PAGE_SZ_16K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
+# define _PAGE_SZHUGE          (_PAGE_SZ_32K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
+# define _PAGE_SZHUGE          (_PAGE_SZ_64K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
+# define _PAGE_SZHUGE          (_PAGE_SZ_128K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
+# define _PAGE_SZHUGE          (_PAGE_SZ_256K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
+# define _PAGE_SZHUGE          (_PAGE_SZ_512K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
+# define _PAGE_SZHUGE          (_PAGE_SZ_1M)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
+# define _PAGE_SZHUGE          (_PAGE_SZ_2M)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
+# define _PAGE_SZHUGE          (_PAGE_SZ_4M)
+#endif
+
+#endif /* _METAG_PGTABLE_BITS_H */
index d0604c0a87022231f37157531e1175894545ee4a..ffa3a3a2ecadda8bed7cf5e7b1508cd98c43abf8 100644 (file)
@@ -5,6 +5,7 @@
 #ifndef _METAG_PGTABLE_H
 #define _METAG_PGTABLE_H
 
+#include <asm/pgtable-bits.h>
 #include <asm-generic/pgtable-nopmd.h>
 
 /* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */
 #define VMALLOC_END            0x7FFFFFFF
 #endif
 
-/*
- * Definitions for MMU descriptors
- *
- * These are the hardware bits in the MMCU pte entries.
- * Derived from the Meta toolkit headers.
- */
-#define _PAGE_PRESENT          MMCU_ENTRY_VAL_BIT
-#define _PAGE_WRITE            MMCU_ENTRY_WR_BIT
-#define _PAGE_PRIV             MMCU_ENTRY_PRIV_BIT
-/* Write combine bit - this can cause writes to occur out of order */
-#define _PAGE_WR_COMBINE       MMCU_ENTRY_WRC_BIT
-/* Sys coherent bit - this bit is never used by Linux */
-#define _PAGE_SYS_COHERENT     MMCU_ENTRY_SYS_BIT
-#define _PAGE_ALWAYS_ZERO_1    0x020
-#define _PAGE_CACHE_CTRL0      0x040
-#define _PAGE_CACHE_CTRL1      0x080
-#define _PAGE_ALWAYS_ZERO_2    0x100
-#define _PAGE_ALWAYS_ZERO_3    0x200
-#define _PAGE_ALWAYS_ZERO_4    0x400
-#define _PAGE_ALWAYS_ZERO_5    0x800
-
-/* These are software bits that we stuff into the gaps in the hardware
- * pte entries that are not used.  Note, these DO get stored in the actual
- * hardware, but the hardware just does not use them.
- */
-#define _PAGE_ACCESSED         _PAGE_ALWAYS_ZERO_1
-#define _PAGE_DIRTY            _PAGE_ALWAYS_ZERO_2
-
-/* Pages owned, and protected by, the kernel. */
-#define _PAGE_KERNEL           _PAGE_PRIV
-
-/* No cacheing of this page */
-#define _PAGE_CACHE_WIN0       (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S)
-/* burst cacheing - good for data streaming */
-#define _PAGE_CACHE_WIN1       (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S)
-/* One cache way per thread */
-#define _PAGE_CACHE_WIN2       (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S)
-/* Full on cacheing */
-#define _PAGE_CACHE_WIN3       (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S)
-
-#define _PAGE_CACHEABLE                (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE)
-
-/* which bits are used for cache control ... */
-#define _PAGE_CACHE_MASK       (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \
-                                _PAGE_WR_COMBINE)
-
-/* This is a mask of the bits that pte_modify is allowed to change. */
-#define _PAGE_CHG_MASK         (PAGE_MASK)
-
-#define _PAGE_SZ_SHIFT         1
-#define _PAGE_SZ_4K            (0x0)
-#define _PAGE_SZ_8K            (0x1 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_16K           (0x2 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_32K           (0x3 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_64K           (0x4 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_128K          (0x5 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_256K          (0x6 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_512K          (0x7 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_1M            (0x8 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_2M            (0x9 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_4M            (0xa << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_MASK          (0xf << _PAGE_SZ_SHIFT)
-
-#if defined(CONFIG_PAGE_SIZE_4K)
-#define _PAGE_SZ               (_PAGE_SZ_4K)
-#elif defined(CONFIG_PAGE_SIZE_8K)
-#define _PAGE_SZ               (_PAGE_SZ_8K)
-#elif defined(CONFIG_PAGE_SIZE_16K)
-#define _PAGE_SZ               (_PAGE_SZ_16K)
-#endif
-#define _PAGE_TABLE            (_PAGE_SZ | _PAGE_PRESENT)
-
-#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
-# define _PAGE_SZHUGE          (_PAGE_SZ_8K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
-# define _PAGE_SZHUGE          (_PAGE_SZ_16K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
-# define _PAGE_SZHUGE          (_PAGE_SZ_32K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
-# define _PAGE_SZHUGE          (_PAGE_SZ_64K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
-# define _PAGE_SZHUGE          (_PAGE_SZ_128K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
-# define _PAGE_SZHUGE          (_PAGE_SZ_256K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
-# define _PAGE_SZHUGE          (_PAGE_SZ_512K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
-# define _PAGE_SZHUGE          (_PAGE_SZ_1M)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
-# define _PAGE_SZHUGE          (_PAGE_SZ_2M)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
-# define _PAGE_SZHUGE          (_PAGE_SZ_4M)
-#endif
-
 /*
  * The Linux memory management assumes a three-level page table setup. On
  * Meta, we use that, but "fold" the mid level into the top-level page
index f213f5b4c4239b961e260c659447886a8646f0bd..d17437238a2cef75dd4d46593c760320933b6d38 100644 (file)
@@ -26,7 +26,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 
        if (likely(pgd != NULL)) {
                memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
-#ifdef CONFIG_64BIT
+#if PT_NLEVELS == 3
                actual_pgd += PTRS_PER_PGD;
                /* Populate first pmd with allocated memory.  We mark it
                 * with PxD_FLAG_ATTACHED as a signal to the system that this
@@ -45,7 +45,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 
 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 {
-#ifdef CONFIG_64BIT
+#if PT_NLEVELS == 3
        pgd -= PTRS_PER_PGD;
 #endif
        free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
@@ -72,12 +72,15 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
 
 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 {
-#ifdef CONFIG_64BIT
        if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
-               /* This is the permanent pmd attached to the pgd;
-                * cannot free it */
+               /*
+                * This is the permanent pmd attached to the pgd;
+                * cannot free it.
+                * Increment the counter to compensate for the decrement
+                * done by generic mm code.
+                */
+               mm_inc_nr_pmds(mm);
                return;
-#endif
        free_pages((unsigned long)pmd, PMD_ORDER);
 }
 
@@ -99,7 +102,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 static inline void
 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
 {
-#ifdef CONFIG_64BIT
+#if PT_NLEVELS == 3
        /* preserve the gateway marker if this is the beginning of
         * the permanent pmd */
        if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
index 5a8997d63899346a82e759f30b601eaf907bac37..8eefb12d1d33f3fc87195a0cd9efe0d0fd0301b2 100644 (file)
@@ -55,8 +55,8 @@
 #define ENTRY_COMP(_name_) .word sys_##_name_
 #endif
 
-       ENTRY_SAME(restart_syscall)     /* 0 */
-       ENTRY_SAME(exit)
+90:    ENTRY_SAME(restart_syscall)     /* 0 */
+91:    ENTRY_SAME(exit)
        ENTRY_SAME(fork_wrapper)
        ENTRY_SAME(read)
        ENTRY_SAME(write)
        ENTRY_SAME(bpf)
        ENTRY_COMP(execveat)
 
-       /* Nothing yet */
+
+.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
+.error "size of syscall table does not fit value of __NR_Linux_syscalls"
+.endif
 
 #undef ENTRY_SAME
 #undef ENTRY_DIFF
index 2bf8e9307be9833b5bf9af51defdfada8feae68d..4c8ad592ae3351d96f7e225411d9f551657c5bc7 100644 (file)
@@ -55,7 +55,7 @@ static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
 
 static inline int cpu_nr_cores(void)
 {
-       return NR_CPUS >> threads_shift;
+       return nr_cpu_ids >> threads_shift;
 }
 
 static inline cpumask_t cpu_online_cores_map(void)
index 2eadde0b98fb5469ff94f353c8fb9e540d3ef326..5c93f691b4955ce91a287aabf98ed45a7aafe870 100644 (file)
 #define PPC_INST_MFSPR_PVR_MASK                0xfc1fffff
 #define PPC_INST_MFTMR                 0x7c0002dc
 #define PPC_INST_MSGSND                        0x7c00019c
+#define PPC_INST_MSGCLR                        0x7c0001dc
 #define PPC_INST_MSGSNDP               0x7c00011c
 #define PPC_INST_MTTMR                 0x7c0003dc
 #define PPC_INST_NOP                   0x60000000
                                        ___PPC_RB(b) | __PPC_EH(eh))
 #define PPC_MSGSND(b)          stringify_in_c(.long PPC_INST_MSGSND | \
                                        ___PPC_RB(b))
+#define PPC_MSGCLR(b)          stringify_in_c(.long PPC_INST_MSGCLR | \
+                                       ___PPC_RB(b))
 #define PPC_MSGSNDP(b)         stringify_in_c(.long PPC_INST_MSGSNDP | \
                                        ___PPC_RB(b))
 #define PPC_POPCNTB(a, s)      stringify_in_c(.long PPC_INST_POPCNTB | \
index 1c874fb533bbf22fe8ff019b328ae623214243bb..af56b5c6c81ab18aefe24b16c810a3b89604434d 100644 (file)
 #define   SRR1_ISI_N_OR_G      0x10000000 /* ISI: Access is no-exec or G */
 #define   SRR1_ISI_PROT                0x08000000 /* ISI: Other protection fault */
 #define   SRR1_WAKEMASK                0x00380000 /* reason for wakeup */
+#define   SRR1_WAKEMASK_P8     0x003c0000 /* reason for wakeup on POWER8 */
 #define   SRR1_WAKESYSERR      0x00300000 /* System error */
 #define   SRR1_WAKEEE          0x00200000 /* External interrupt */
 #define   SRR1_WAKEMT          0x00280000 /* mtctrl */
 #define          SRR1_WAKEHMI          0x00280000 /* Hypervisor maintenance */
 #define   SRR1_WAKEDEC         0x00180000 /* Decrementer interrupt */
+#define   SRR1_WAKEDBELL       0x00140000 /* Privileged doorbell on P8 */
 #define   SRR1_WAKETHERM       0x00100000 /* Thermal management interrupt */
 #define          SRR1_WAKERESET        0x00100000 /* System reset */
+#define   SRR1_WAKEHDBELL      0x000c0000 /* Hypervisor doorbell on P8 */
 #define          SRR1_WAKESTATE        0x00030000 /* Powersave exit mask [46:47] */
 #define          SRR1_WS_DEEPEST       0x00030000 /* Some resources not maintained,
                                          * may not be recoverable */
index f337666768a76594b41b59f283efa3b891988193..f8304687833651975722e4f8aa51741fffad63d9 100644 (file)
@@ -437,6 +437,26 @@ static struct cpu_spec __initdata cpu_specs[] = {
                .machine_check_early    = __machine_check_early_realmode_p8,
                .platform               = "power8",
        },
+       {       /* Power8NVL */
+               .pvr_mask               = 0xffff0000,
+               .pvr_value              = 0x004c0000,
+               .cpu_name               = "POWER8NVL (raw)",
+               .cpu_features           = CPU_FTRS_POWER8,
+               .cpu_user_features      = COMMON_USER_POWER8,
+               .cpu_user_features2     = COMMON_USER2_POWER8,
+               .mmu_features           = MMU_FTRS_POWER8,
+               .icache_bsize           = 128,
+               .dcache_bsize           = 128,
+               .num_pmcs               = 6,
+               .pmc_type               = PPC_PMC_IBM,
+               .oprofile_cpu_type      = "ppc64/power8",
+               .oprofile_type          = PPC_OPROFILE_INVALID,
+               .cpu_setup              = __setup_cpu_power8,
+               .cpu_restore            = __restore_cpu_power8,
+               .flush_tlb              = __flush_tlb_power8,
+               .machine_check_early    = __machine_check_early_realmode_p8,
+               .platform               = "power8",
+       },
        {       /* Power8 DD1: Does not support doorbell IPIs */
                .pvr_mask               = 0xffffff00,
                .pvr_value              = 0x004d0100,
index f4217819cc31fd417243d17f079c8bf2471d37a5..2128f3a96c32dd743908f286d4cf72d6d7701568 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <asm/dbell.h>
 #include <asm/irq_regs.h>
+#include <asm/kvm_ppc.h>
 
 #ifdef CONFIG_SMP
 void doorbell_setup_this_cpu(void)
@@ -41,6 +42,7 @@ void doorbell_exception(struct pt_regs *regs)
 
        may_hard_irq_enable();
 
+       kvmppc_set_host_ipi(smp_processor_id(), 0);
        __this_cpu_inc(irq_stat.doorbell_irqs);
 
        smp_ipi_demux();
index c2df8150bd7a0425fc00ebcc78d784d6b280c146..9519e6bdc6d75c324334bf4f0f52dd6da9d9bbcc 100644 (file)
@@ -1408,7 +1408,7 @@ machine_check_handle_early:
        bne     9f                      /* continue in V mode if we are. */
 
 5:
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
        /*
         * We are coming from kernel context. Check if we are coming from
         * guest. if yes, then we can continue. We will fall through
index de4018a1bc4bd290ecd0e970bd51960b281497c3..de747563d29df39be580e6cd67c54769194dce42 100644 (file)
@@ -636,7 +636,7 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
        spin_lock(&vcpu->arch.vpa_update_lock);
        lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
        if (lppaca)
-               yield_count = lppaca->yield_count;
+               yield_count = be32_to_cpu(lppaca->yield_count);
        spin_unlock(&vcpu->arch.vpa_update_lock);
        return yield_count;
 }
@@ -942,20 +942,20 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
 static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
                bool preserve_top32)
 {
+       struct kvm *kvm = vcpu->kvm;
        struct kvmppc_vcore *vc = vcpu->arch.vcore;
        u64 mask;
 
+       mutex_lock(&kvm->lock);
        spin_lock(&vc->lock);
        /*
         * If ILE (interrupt little-endian) has changed, update the
         * MSR_LE bit in the intr_msr for each vcpu in this vcore.
         */
        if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
-               struct kvm *kvm = vcpu->kvm;
                struct kvm_vcpu *vcpu;
                int i;
 
-               mutex_lock(&kvm->lock);
                kvm_for_each_vcpu(i, vcpu, kvm) {
                        if (vcpu->arch.vcore != vc)
                                continue;
@@ -964,7 +964,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
                        else
                                vcpu->arch.intr_msr &= ~MSR_LE;
                }
-               mutex_unlock(&kvm->lock);
        }
 
        /*
@@ -981,6 +980,7 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
                mask &= 0xFFFFFFFF;
        vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
        spin_unlock(&vc->lock);
+       mutex_unlock(&kvm->lock);
 }
 
 static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
index bb94e6f20c813bbcdb8297d9fb860a538c5e4f08..6cbf1630cb70c9d8b306a7628c91a3c9acf35785 100644 (file)
@@ -1005,6 +1005,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
        /* Save HEIR (HV emulation assist reg) in emul_inst
           if this is an HEI (HV emulation interrupt, e40) */
        li      r3,KVM_INST_FETCH_FAILED
+       stw     r3,VCPU_LAST_INST(r9)
        cmpwi   r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
        bne     11f
        mfspr   r3,SPRN_HEIR
index fc34025ef82270b00c7c32811dca20552a61d2fe..38a45088f633bb190ff53b5f7ac8306fd24acb4e 100644 (file)
@@ -33,6 +33,8 @@
 #include <asm/runlatch.h>
 #include <asm/code-patching.h>
 #include <asm/dbell.h>
+#include <asm/kvm_ppc.h>
+#include <asm/ppc-opcode.h>
 
 #include "powernv.h"
 
@@ -149,7 +151,7 @@ static int pnv_smp_cpu_disable(void)
 static void pnv_smp_cpu_kill_self(void)
 {
        unsigned int cpu;
-       unsigned long srr1;
+       unsigned long srr1, wmask;
        u32 idle_states;
 
        /* Standard hot unplug procedure */
@@ -161,6 +163,10 @@ static void pnv_smp_cpu_kill_self(void)
        generic_set_cpu_dead(cpu);
        smp_wmb();
 
+       wmask = SRR1_WAKEMASK;
+       if (cpu_has_feature(CPU_FTR_ARCH_207S))
+               wmask = SRR1_WAKEMASK_P8;
+
        idle_states = pnv_get_supported_cpuidle_states();
        /* We don't want to take decrementer interrupts while we are offline,
         * so clear LPCR:PECE1. We keep PECE2 enabled.
@@ -191,10 +197,14 @@ static void pnv_smp_cpu_kill_self(void)
                 * having finished executing in a KVM guest, then srr1
                 * contains 0.
                 */
-               if ((srr1 & SRR1_WAKEMASK) == SRR1_WAKEEE) {
+               if ((srr1 & wmask) == SRR1_WAKEEE) {
                        icp_native_flush_interrupt();
                        local_paca->irq_happened &= PACA_IRQ_HARD_DIS;
                        smp_mb();
+               } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
+                       unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
+                       asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
+                       kvmppc_set_host_ipi(cpu, 0);
                }
 
                if (cpu_core_split_required())
index 90cf3dcbd9f268b5430a1cb69bb44cd0fd75d54a..8f35d525cede8327ac73c2129810d83456d35c15 100644 (file)
 static struct kobject *mobility_kobj;
 
 struct update_props_workarea {
-       u32 phandle;
-       u32 state;
-       u64 reserved;
-       u32 nprops;
+       __be32 phandle;
+       __be32 state;
+       __be64 reserved;
+       __be32 nprops;
 } __packed;
 
 #define NODE_ACTION_MASK       0xff000000
@@ -54,11 +54,11 @@ static int mobility_rtas_call(int token, char *buf, s32 scope)
        return rc;
 }
 
-static int delete_dt_node(u32 phandle)
+static int delete_dt_node(__be32 phandle)
 {
        struct device_node *dn;
 
-       dn = of_find_node_by_phandle(phandle);
+       dn = of_find_node_by_phandle(be32_to_cpu(phandle));
        if (!dn)
                return -ENOENT;
 
@@ -127,7 +127,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop,
        return 0;
 }
 
-static int update_dt_node(u32 phandle, s32 scope)
+static int update_dt_node(__be32 phandle, s32 scope)
 {
        struct update_props_workarea *upwa;
        struct device_node *dn;
@@ -136,6 +136,7 @@ static int update_dt_node(u32 phandle, s32 scope)
        char *prop_data;
        char *rtas_buf;
        int update_properties_token;
+       u32 nprops;
        u32 vd;
 
        update_properties_token = rtas_token("ibm,update-properties");
@@ -146,7 +147,7 @@ static int update_dt_node(u32 phandle, s32 scope)
        if (!rtas_buf)
                return -ENOMEM;
 
-       dn = of_find_node_by_phandle(phandle);
+       dn = of_find_node_by_phandle(be32_to_cpu(phandle));
        if (!dn) {
                kfree(rtas_buf);
                return -ENOENT;
@@ -162,6 +163,7 @@ static int update_dt_node(u32 phandle, s32 scope)
                        break;
 
                prop_data = rtas_buf + sizeof(*upwa);
+               nprops = be32_to_cpu(upwa->nprops);
 
                /* On the first call to ibm,update-properties for a node the
                 * the first property value descriptor contains an empty
@@ -170,17 +172,17 @@ static int update_dt_node(u32 phandle, s32 scope)
                 */
                if (*prop_data == 0) {
                        prop_data++;
-                       vd = *(u32 *)prop_data;
+                       vd = be32_to_cpu(*(__be32 *)prop_data);
                        prop_data += vd + sizeof(vd);
-                       upwa->nprops--;
+                       nprops--;
                }
 
-               for (i = 0; i < upwa->nprops; i++) {
+               for (i = 0; i < nprops; i++) {
                        char *prop_name;
 
                        prop_name = prop_data;
                        prop_data += strlen(prop_name) + 1;
-                       vd = *(u32 *)prop_data;
+                       vd = be32_to_cpu(*(__be32 *)prop_data);
                        prop_data += sizeof(vd);
 
                        switch (vd) {
@@ -212,13 +214,13 @@ static int update_dt_node(u32 phandle, s32 scope)
        return 0;
 }
 
-static int add_dt_node(u32 parent_phandle, u32 drc_index)
+static int add_dt_node(__be32 parent_phandle, __be32 drc_index)
 {
        struct device_node *dn;
        struct device_node *parent_dn;
        int rc;
 
-       parent_dn = of_find_node_by_phandle(parent_phandle);
+       parent_dn = of_find_node_by_phandle(be32_to_cpu(parent_phandle));
        if (!parent_dn)
                return -ENOENT;
 
@@ -237,7 +239,7 @@ static int add_dt_node(u32 parent_phandle, u32 drc_index)
 int pseries_devicetree_update(s32 scope)
 {
        char *rtas_buf;
-       u32 *data;
+       __be32 *data;
        int update_nodes_token;
        int rc;
 
@@ -254,17 +256,17 @@ int pseries_devicetree_update(s32 scope)
                if (rc && rc != 1)
                        break;
 
-               data = (u32 *)rtas_buf + 4;
-               while (*data & NODE_ACTION_MASK) {
+               data = (__be32 *)rtas_buf + 4;
+               while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
                        int i;
-                       u32 action = *data & NODE_ACTION_MASK;
-                       int node_count = *data & NODE_COUNT_MASK;
+                       u32 action = be32_to_cpu(*data) & NODE_ACTION_MASK;
+                       u32 node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
 
                        data++;
 
                        for (i = 0; i < node_count; i++) {
-                               u32 phandle = *data++;
-                               u32 drc_index;
+                               __be32 phandle = *data++;
+                               __be32 drc_index;
 
                                switch (action) {
                                case DELETE_DT_NODE:
index c9df40b5c0ac1915eac7d6a794b25f7f9d6173b5..c9c875d9ed318cc75f7123a8d999b464201a4fd5 100644 (file)
@@ -211,7 +211,7 @@ do {                                                                \
 
 extern unsigned long mmap_rnd_mask;
 
-#define STACK_RND_MASK (mmap_rnd_mask)
+#define STACK_RND_MASK (test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask)
 
 #define ARCH_DLINFO                                                        \
 do {                                                                       \
index 82c19899574f83070158c09c5eed5b438bc092c1..6c79f1b44fe7f7aafd8b5379833fb6ae42cf6f72 100644 (file)
 
 unsigned long ftrace_plt;
 
+static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
+{
+#ifdef CC_USING_HOTPATCH
+       /* brcl 0,0 */
+       insn->opc = 0xc004;
+       insn->disp = 0;
+#else
+       /* stg r14,8(r15) */
+       insn->opc = 0xe3e0;
+       insn->disp = 0xf0080024;
+#endif
+}
+
+static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn)
+{
+#ifdef CONFIG_KPROBES
+       if (insn->opc == BREAKPOINT_INSTRUCTION)
+               return 1;
+#endif
+       return 0;
+}
+
+static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn)
+{
+#ifdef CONFIG_KPROBES
+       insn->opc = BREAKPOINT_INSTRUCTION;
+       insn->disp = KPROBE_ON_FTRACE_NOP;
+#endif
+}
+
+static inline void ftrace_generate_kprobe_call_insn(struct ftrace_insn *insn)
+{
+#ifdef CONFIG_KPROBES
+       insn->opc = BREAKPOINT_INSTRUCTION;
+       insn->disp = KPROBE_ON_FTRACE_CALL;
+#endif
+}
+
 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
                       unsigned long addr)
 {
@@ -72,16 +110,9 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
                return -EFAULT;
        if (addr == MCOUNT_ADDR) {
                /* Initial code replacement */
-#ifdef CC_USING_HOTPATCH
-               /* We expect to see brcl 0,0 */
-               ftrace_generate_nop_insn(&orig);
-#else
-               /* We expect to see stg r14,8(r15) */
-               orig.opc = 0xe3e0;
-               orig.disp = 0xf0080024;
-#endif
+               ftrace_generate_orig_insn(&orig);
                ftrace_generate_nop_insn(&new);
-       } else if (old.opc == BREAKPOINT_INSTRUCTION) {
+       } else if (is_kprobe_on_ftrace(&old)) {
                /*
                 * If we find a breakpoint instruction, a kprobe has been
                 * placed at the beginning of the function. We write the
@@ -89,9 +120,8 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
                 * bytes of the original instruction so that the kprobes
                 * handler can execute a nop, if it reaches this breakpoint.
                 */
-               new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
-               orig.disp = KPROBE_ON_FTRACE_CALL;
-               new.disp = KPROBE_ON_FTRACE_NOP;
+               ftrace_generate_kprobe_call_insn(&orig);
+               ftrace_generate_kprobe_nop_insn(&new);
        } else {
                /* Replace ftrace call with a nop. */
                ftrace_generate_call_insn(&orig, rec->ip);
@@ -111,7 +141,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 
        if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
                return -EFAULT;
-       if (old.opc == BREAKPOINT_INSTRUCTION) {
+       if (is_kprobe_on_ftrace(&old)) {
                /*
                 * If we find a breakpoint instruction, a kprobe has been
                 * placed at the beginning of the function. We write the
@@ -119,9 +149,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
                 * bytes of the original instruction so that the kprobes
                 * handler can execute a brasl if it reaches this breakpoint.
                 */
-               new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
-               orig.disp = KPROBE_ON_FTRACE_NOP;
-               new.disp = KPROBE_ON_FTRACE_CALL;
+               ftrace_generate_kprobe_nop_insn(&orig);
+               ftrace_generate_kprobe_call_insn(&new);
        } else {
                /* Replace nop with an ftrace call. */
                ftrace_generate_nop_insn(&orig);
index c3f8d157cb0d11898bbd72d103dd7b4aceb4b654..e6a1578fc00095929db02b51d4894fe9ad59802c 100644 (file)
@@ -1415,7 +1415,7 @@ CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG);
 
 static struct attribute *cpumsf_pmu_events_attr[] = {
        CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC),
-       CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG),
+       NULL,
        NULL,
 };
 
@@ -1606,8 +1606,11 @@ static int __init init_cpum_sampling_pmu(void)
                return -EINVAL;
        }
 
-       if (si.ad)
+       if (si.ad) {
                sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
+               cpumsf_pmu_events_attr[1] =
+                       CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG);
+       }
 
        sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80);
        if (!sfdbg)
index 6b09fdffbd2f7e7a787ca5100f074be15ff71f33..ca6294645dd37eeec22c4ab3d26226da37686cd0 100644 (file)
@@ -177,6 +177,17 @@ restart_entry:
        lhi     %r1,1
        sigp    %r1,%r0,SIGP_SET_ARCHITECTURE
        sam64
+#ifdef CONFIG_SMP
+       larl    %r1,smp_cpu_mt_shift
+       icm     %r1,15,0(%r1)
+       jz      smt_done
+       llgfr   %r1,%r1
+smt_loop:
+       sigp    %r1,%r0,SIGP_SET_MULTI_THREADING
+       brc     8,smt_done                      /* accepted */
+       brc     2,smt_loop                      /* busy, try again */
+smt_done:
+#endif
        larl    %r1,.Lnew_pgm_check_psw
        lpswe   0(%r1)
 pgm_check_entry:
index 498b6d967138b1fff29659e81813c77e70ff1f58..258990688a5e999557de7f3b5398d7eccdc45ebb 100644 (file)
@@ -212,11 +212,11 @@ static struct event_constraint intel_hsw_event_constraints[] = {
        INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
        INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
        /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
-       INTEL_EVENT_CONSTRAINT(0x08a3, 0x4),
+       INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
        /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
-       INTEL_EVENT_CONSTRAINT(0x0ca3, 0x4),
+       INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
        /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
-       INTEL_EVENT_CONSTRAINT(0x04a3, 0xf),
+       INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
        EVENT_CONSTRAINT_END
 };
 
@@ -1649,11 +1649,11 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
        if (c)
                return c;
 
-       c = intel_pebs_constraints(event);
+       c = intel_shared_regs_constraints(cpuc, event);
        if (c)
                return c;
 
-       c = intel_shared_regs_constraints(cpuc, event);
+       c = intel_pebs_constraints(event);
        if (c)
                return c;
 
index 1d74d161687c9f2a71f334b5530067356310af18..f0095a76c18211813d711bfa52b82c916190f42d 100644 (file)
@@ -364,12 +364,21 @@ system_call_fastpath:
  * Has incomplete stack frame and undefined top of stack.
  */
 ret_from_sys_call:
-       testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-       jnz int_ret_from_sys_call_fixup /* Go the the slow path */
-
        LOCKDEP_SYS_EXIT
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
+
+       /*
+        * We must check ti flags with interrupts (or at least preemption)
+        * off because we must *never* return to userspace without
+        * processing exit work that is enqueued if we're preempted here.
+        * In particular, returning to userspace with any of the one-shot
+        * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
+        * very bad.
+        */
+       testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+       jnz int_ret_from_sys_call_fixup /* Go the the slow path */
+
        CFI_REMEMBER_STATE
        /*
         * sysretq will re-enable interrupts:
@@ -386,7 +395,7 @@ ret_from_sys_call:
 
 int_ret_from_sys_call_fixup:
        FIXUP_TOP_OF_STACK %r11, -ARGOFFSET
-       jmp int_ret_from_sys_call
+       jmp int_ret_from_sys_call_irqs_off
 
        /* Do syscall tracing */
 tracesys:
@@ -432,6 +441,7 @@ tracesys_phase2:
 GLOBAL(int_ret_from_sys_call)
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
+int_ret_from_sys_call_irqs_off:
        movl $_TIF_ALLWORK_MASK,%edi
        /* edi: mask to check */
 GLOBAL(int_with_check)
@@ -789,7 +799,21 @@ retint_swapgs:             /* return to user-space */
        cmpq %r11,(EFLAGS-ARGOFFSET)(%rsp)      /* R11 == RFLAGS */
        jne opportunistic_sysret_failed
 
-       testq $X86_EFLAGS_RF,%r11               /* sysret can't restore RF */
+       /*
+        * SYSRET can't restore RF.  SYSRET can restore TF, but unlike IRET,
+        * restoring TF results in a trap from userspace immediately after
+        * SYSRET.  This would cause an infinite loop whenever #DB happens
+        * with register state that satisfies the opportunistic SYSRET
+        * conditions.  For example, single-stepping this user code:
+        *
+        *           movq $stuck_here,%rcx
+        *           pushfq
+        *           popq %r11
+        *   stuck_here:
+        *
+        * would never get past 'stuck_here'.
+        */
+       testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
        jnz opportunistic_sysret_failed
 
        /* nothing to check for RSP */
index 7ec1d5f8d28339bce0b74191a1d458c6c8e5d5df..25ecd56cefa8f22496153cf29b763c266bd8d91e 100644 (file)
@@ -72,7 +72,7 @@ struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
        { "bx", 8, offsetof(struct pt_regs, bx) },
        { "cx", 8, offsetof(struct pt_regs, cx) },
        { "dx", 8, offsetof(struct pt_regs, dx) },
-       { "si", 8, offsetof(struct pt_regs, dx) },
+       { "si", 8, offsetof(struct pt_regs, si) },
        { "di", 8, offsetof(struct pt_regs, di) },
        { "bp", 8, offsetof(struct pt_regs, bp) },
        { "sp", 8, offsetof(struct pt_regs, sp) },
index bae6c609888e7fdff25784d5bd96fd8dcd5ea88a..86db4bcd7ce52bcb74a5bf42efcd8e7152488cf1 100644 (file)
@@ -183,6 +183,16 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
                },
        },
 
+       /* ASRock */
+       {       /* Handle problems with rebooting on ASRock Q1900DC-ITX */
+               .callback = set_pci_reboot,
+               .ident = "ASRock Q1900DC-ITX",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "ASRock"),
+                       DMI_MATCH(DMI_BOARD_NAME, "Q1900DC-ITX"),
+               },
+       },
+
        /* ASUS */
        {       /* Handle problems with rebooting on ASUS P4S800 */
                .callback = set_bios_reboot,
index b1947e0f3e100d7552add9688125d9c86d29a8ab..46d4449772bc714daa658ea6424fb45659095c70 100644 (file)
@@ -422,6 +422,7 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
                        struct kvm_ioapic *ioapic, int vector, int trigger_mode)
 {
        int i;
+       struct kvm_lapic *apic = vcpu->arch.apic;
 
        for (i = 0; i < IOAPIC_NUM_PINS; i++) {
                union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
@@ -443,7 +444,8 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
                kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
                spin_lock(&ioapic->lock);
 
-               if (trigger_mode != IOAPIC_LEVEL_TRIG)
+               if (trigger_mode != IOAPIC_LEVEL_TRIG ||
+                   kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
                        continue;
 
                ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
index bd4e34de24c7a0860de0adf88d3b85fd94f8ddae..4ee827d7bf36f730c25d358f709aa99cda93260a 100644 (file)
@@ -833,8 +833,7 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
 
 static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
 {
-       if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) &&
-           kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
+       if (kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
                int trigger_mode;
                if (apic_test_vector(vector, apic->regs + APIC_TMR))
                        trigger_mode = IOAPIC_LEVEL_TRIG;
index 10a481b7674de285a45de0a1675ee60a9318e19d..ae4f6d35d19c268315745741150dd6d1a7df5222 100644 (file)
@@ -2479,8 +2479,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
        if (enable_ept) {
                /* nested EPT: emulate EPT also to L1 */
                vmx->nested.nested_vmx_secondary_ctls_high |=
-                       SECONDARY_EXEC_ENABLE_EPT |
-                       SECONDARY_EXEC_UNRESTRICTED_GUEST;
+                       SECONDARY_EXEC_ENABLE_EPT;
                vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
                         VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
                         VMX_EPT_INVEPT_BIT;
@@ -2494,6 +2493,10 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
        } else
                vmx->nested.nested_vmx_ept_caps = 0;
 
+       if (enable_unrestricted_guest)
+               vmx->nested.nested_vmx_secondary_ctls_high |=
+                       SECONDARY_EXEC_UNRESTRICTED_GUEST;
+
        /* miscellaneous data */
        rdmsr(MSR_IA32_VMX_MISC,
                vmx->nested.nested_vmx_misc_low,
index 9f93af56a5fc7bd4cf263406faf96f8a5fa56466..b47124d4cd67e29199fae1c48f9a94ecd93b00f7 100644 (file)
@@ -91,6 +91,12 @@ EXPORT_SYMBOL_GPL(xen_p2m_size);
 unsigned long xen_max_p2m_pfn __read_mostly;
 EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
 
+#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
+#define P2M_LIMIT CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
+#else
+#define P2M_LIMIT 0
+#endif
+
 static DEFINE_SPINLOCK(p2m_update_lock);
 
 static unsigned long *p2m_mid_missing_mfn;
@@ -385,9 +391,11 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m)
 void __init xen_vmalloc_p2m_tree(void)
 {
        static struct vm_struct vm;
+       unsigned long p2m_limit;
 
+       p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
        vm.flags = VM_ALLOC;
-       vm.size = ALIGN(sizeof(unsigned long) * xen_max_p2m_pfn,
+       vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
                        PMD_SIZE * PMDS_PER_MID_PAGE);
        vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE);
        pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size);
index fc1ff3b1ea1f4a9ea2dcd18d6e49eea7281eb9b3..fd3fee81c23ce2f1cdc73d2bf4e76188c90358cb 100644 (file)
@@ -592,7 +592,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
        if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) {
                struct bio_vec *bprev;
 
-               bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1];
+               bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1];
                if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
                        return false;
        }
index d53a764b05eacde776a4454054795d1c0277d676..be3290cc0644efc9e3feec6fd891c7afe1a15775 100644 (file)
@@ -278,9 +278,11 @@ static int bt_get(struct blk_mq_alloc_data *data,
                /*
                 * We're out of tags on this hardware queue, kick any
                 * pending IO submits before going to sleep waiting for
-                * some to complete.
+                * some to complete. Note that hctx can be NULL here for
+                * reserved tag allocation.
                 */
-               blk_mq_run_hw_queue(hctx, false);
+               if (hctx)
+                       blk_mq_run_hw_queue(hctx, false);
 
                /*
                 * Retry tag allocation after running the hardware queue,
index 4f4bea21052e41068112ead8cbb4e0a42cb7a9d6..b7b8933ec24188b2807229af1aa844503b46dc89 100644 (file)
@@ -1938,7 +1938,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
         */
        if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
                            PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
-               goto err_map;
+               goto err_mq_usage;
 
        setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
        blk_queue_rq_timeout(q, 30000);
@@ -1981,7 +1981,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
        blk_mq_init_cpu_queues(q, set->nr_hw_queues);
 
        if (blk_mq_init_hw_queues(q, set))
-               goto err_hw;
+               goto err_mq_usage;
 
        mutex_lock(&all_q_mutex);
        list_add_tail(&q->all_q_node, &all_q_list);
@@ -1993,7 +1993,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
 
        return q;
 
-err_hw:
+err_mq_usage:
        blk_cleanup_queue(q);
 err_hctxs:
        kfree(map);
index 6ed2cbe5e8c9ae340233b0491255dfed40947590..12600bfffca93f4547e2325eeda9669ff443a7a7 100644 (file)
@@ -585,7 +585,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                                     b->physical_block_size);
 
        t->io_min = max(t->io_min, b->io_min);
-       t->io_opt = lcm(t->io_opt, b->io_opt);
+       t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
 
        t->cluster &= b->cluster;
        t->discard_zeroes_data &= b->discard_zeroes_data;
@@ -616,7 +616,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                    b->raid_partial_stripes_expensive);
 
        /* Find lowest common alignment_offset */
-       t->alignment_offset = lcm(t->alignment_offset, alignment)
+       t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
                % max(t->physical_block_size, t->io_min);
 
        /* Verify that new alignment_offset is on a logical block boundary */
@@ -643,7 +643,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                                                      b->max_discard_sectors);
                t->discard_granularity = max(t->discard_granularity,
                                             b->discard_granularity);
-               t->discard_alignment = lcm(t->discard_alignment, alignment) %
+               t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
                        t->discard_granularity;
        }
 
index 26089d182cb70086f8278d05f15f07bafa4e3ab1..f22cc56fd1b383f7ef37472808070e8e31120a11 100644 (file)
@@ -381,7 +381,7 @@ void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new)
        sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
        sg_chain(sgl_prev->sg, sgl_prev->npages + 1, sgl_new->sg);
 }
-EXPORT_SYMBOL(af_alg_link_sg);
+EXPORT_SYMBOL_GPL(af_alg_link_sg);
 
 void af_alg_free_sg(struct af_alg_sgl *sgl)
 {
index 60496d405ebf4a6cf0e4cf12d36ab3294617b1f3..0aa02635ceda67a0591c6eb2a25a1d789a8a2012 100644 (file)
@@ -509,11 +509,11 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
        struct skcipher_async_req *sreq;
        struct ablkcipher_request *req;
        struct skcipher_async_rsgl *last_rsgl = NULL;
-       unsigned int len = 0, tx_nents = skcipher_all_sg_nents(ctx);
+       unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx);
        unsigned int reqlen = sizeof(struct skcipher_async_req) +
                                GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx);
-       int i = 0;
        int err = -ENOMEM;
+       bool mark = false;
 
        lock_sock(sk);
        req = kmalloc(reqlen, GFP_KERNEL);
@@ -555,7 +555,7 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
                             iov_iter_count(&msg->msg_iter));
                used = min_t(unsigned long, used, sg->length);
 
-               if (i == tx_nents) {
+               if (txbufs == tx_nents) {
                        struct scatterlist *tmp;
                        int x;
                        /* Ran out of tx slots in async request
@@ -573,17 +573,18 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
                        kfree(sreq->tsg);
                        sreq->tsg = tmp;
                        tx_nents *= 2;
+                       mark = true;
                }
                /* Need to take over the tx sgl from ctx
                 * to the asynch req - these sgls will be freed later */
-               sg_set_page(sreq->tsg + i++, sg_page(sg), sg->length,
+               sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length,
                            sg->offset);
 
                if (list_empty(&sreq->list)) {
                        rsgl = &sreq->first_sgl;
                        list_add_tail(&rsgl->list, &sreq->list);
                } else {
-                       rsgl = kzalloc(sizeof(*rsgl), GFP_KERNEL);
+                       rsgl = kmalloc(sizeof(*rsgl), GFP_KERNEL);
                        if (!rsgl) {
                                err = -ENOMEM;
                                goto free;
@@ -604,6 +605,9 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
                iov_iter_advance(&msg->msg_iter, used);
        }
 
+       if (mark)
+               sg_mark_end(sreq->tsg + txbufs - 1);
+
        ablkcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
                                     len, sreq->iv);
        err = ctx->enc ? crypto_ablkcipher_encrypt(req) :
index 4c35f0822d06e54dd5399a285d22cbcfe174b115..23dac3babfe3afc710db73a2ad4f8fe05174b7d7 100644 (file)
@@ -4204,9 +4204,18 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "PIONEER DVD-RW  DVR-216D",   NULL,   ATA_HORKAGE_NOSETXFER },
 
        /* devices that don't properly handle queued TRIM commands */
-       { "Micron_M[56]*",              NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
+       { "Micron_M500*",               NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
+                                               ATA_HORKAGE_ZERO_AFTER_TRIM, },
+       { "Crucial_CT*M500*",           NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
+                                               ATA_HORKAGE_ZERO_AFTER_TRIM, },
+       { "Micron_M5[15]0*",            "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
+                                               ATA_HORKAGE_ZERO_AFTER_TRIM, },
+       { "Crucial_CT*M550*",           "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
+                                               ATA_HORKAGE_ZERO_AFTER_TRIM, },
+       { "Crucial_CT*MX100*",          "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
+                                               ATA_HORKAGE_ZERO_AFTER_TRIM, },
+       { "Samsung SSD 850 PRO*",       NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
-       { "Crucial_CT*SSD*",            NULL,   ATA_HORKAGE_NO_NCQ_TRIM, },
 
        /*
         * As defined, the DRAT (Deterministic Read After Trim) and RZAT
@@ -4226,6 +4235,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
         */
        { "INTEL*SSDSC2MH*",            NULL,   0, },
 
+       { "Micron*",                    NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM, },
+       { "Crucial*",                   NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "INTEL*SSD*",                 NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "SSD*INTEL*",                 NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "Samsung*SSD*",               NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM, },
@@ -4737,7 +4748,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
                return NULL;
 
        /* libsas case */
-       if (!ap->scsi_host) {
+       if (ap->flags & ATA_FLAG_SAS_HOST) {
                tag = ata_sas_allocate_tag(ap);
                if (tag < 0)
                        return NULL;
@@ -4776,7 +4787,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
        tag = qc->tag;
        if (likely(ata_tag_valid(tag))) {
                qc->tag = ATA_TAG_POISON;
-               if (!ap->scsi_host)
+               if (ap->flags & ATA_FLAG_SAS_HOST)
                        ata_sas_free_tag(tag, ap);
        }
 }
index beb8b27d4621a6d9f839065c1296fa8ab67f3032..a13587b5c2be32b912f9fe0fd7544761a280bd57 100644 (file)
@@ -243,4 +243,12 @@ extern struct regcache_ops regcache_rbtree_ops;
 extern struct regcache_ops regcache_lzo_ops;
 extern struct regcache_ops regcache_flat_ops;
 
+static inline const char *regmap_name(const struct regmap *map)
+{
+       if (map->dev)
+               return dev_name(map->dev);
+
+       return map->name;
+}
+
 #endif
index da84f544c5443da0cf3928dd390ec81dcd77f669..87db9893b463ba505acc3c28e253b5a08cf20c90 100644 (file)
@@ -218,7 +218,7 @@ int regcache_read(struct regmap *map,
                ret = map->cache_ops->read(map, reg, value);
 
                if (ret == 0)
-                       trace_regmap_reg_read_cache(map->dev, reg, *value);
+                       trace_regmap_reg_read_cache(map, reg, *value);
 
                return ret;
        }
@@ -311,7 +311,7 @@ int regcache_sync(struct regmap *map)
        dev_dbg(map->dev, "Syncing %s cache\n",
                map->cache_ops->name);
        name = map->cache_ops->name;
-       trace_regcache_sync(map->dev, name, "start");
+       trace_regcache_sync(map, name, "start");
 
        if (!map->cache_dirty)
                goto out;
@@ -346,7 +346,7 @@ out:
 
        regmap_async_complete(map);
 
-       trace_regcache_sync(map->dev, name, "stop");
+       trace_regcache_sync(map, name, "stop");
 
        return ret;
 }
@@ -381,7 +381,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
        name = map->cache_ops->name;
        dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
 
-       trace_regcache_sync(map->dev, name, "start region");
+       trace_regcache_sync(map, name, "start region");
 
        if (!map->cache_dirty)
                goto out;
@@ -401,7 +401,7 @@ out:
 
        regmap_async_complete(map);
 
-       trace_regcache_sync(map->dev, name, "stop region");
+       trace_regcache_sync(map, name, "stop region");
 
        return ret;
 }
@@ -428,7 +428,7 @@ int regcache_drop_region(struct regmap *map, unsigned int min,
 
        map->lock(map->lock_arg);
 
-       trace_regcache_drop_region(map->dev, min, max);
+       trace_regcache_drop_region(map, min, max);
 
        ret = map->cache_ops->drop(map, min, max);
 
@@ -455,7 +455,7 @@ void regcache_cache_only(struct regmap *map, bool enable)
        map->lock(map->lock_arg);
        WARN_ON(map->cache_bypass && enable);
        map->cache_only = enable;
-       trace_regmap_cache_only(map->dev, enable);
+       trace_regmap_cache_only(map, enable);
        map->unlock(map->lock_arg);
 }
 EXPORT_SYMBOL_GPL(regcache_cache_only);
@@ -493,7 +493,7 @@ void regcache_cache_bypass(struct regmap *map, bool enable)
        map->lock(map->lock_arg);
        WARN_ON(map->cache_only && enable);
        map->cache_bypass = enable;
-       trace_regmap_cache_bypass(map->dev, enable);
+       trace_regmap_cache_bypass(map, enable);
        map->unlock(map->lock_arg);
 }
 EXPORT_SYMBOL_GPL(regcache_cache_bypass);
index f99b098ddabfbd23dae3ab3ee7733b9ff24e28ef..dbfe6a69c3daa67969df7c670f9e5144a053afb6 100644 (file)
@@ -1281,7 +1281,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
        if (map->async && map->bus->async_write) {
                struct regmap_async *async;
 
-               trace_regmap_async_write_start(map->dev, reg, val_len);
+               trace_regmap_async_write_start(map, reg, val_len);
 
                spin_lock_irqsave(&map->async_lock, flags);
                async = list_first_entry_or_null(&map->async_free,
@@ -1339,8 +1339,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
                return ret;
        }
 
-       trace_regmap_hw_write_start(map->dev, reg,
-                                   val_len / map->format.val_bytes);
+       trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
 
        /* If we're doing a single register write we can probably just
         * send the work_buf directly, otherwise try to do a gather
@@ -1372,8 +1371,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
                kfree(buf);
        }
 
-       trace_regmap_hw_write_done(map->dev, reg,
-                                  val_len / map->format.val_bytes);
+       trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
 
        return ret;
 }
@@ -1407,12 +1405,12 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
 
        map->format.format_write(map, reg, val);
 
-       trace_regmap_hw_write_start(map->dev, reg, 1);
+       trace_regmap_hw_write_start(map, reg, 1);
 
        ret = map->bus->write(map->bus_context, map->work_buf,
                              map->format.buf_size);
 
-       trace_regmap_hw_write_done(map->dev, reg, 1);
+       trace_regmap_hw_write_done(map, reg, 1);
 
        return ret;
 }
@@ -1470,7 +1468,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
                dev_info(map->dev, "%x <= %x\n", reg, val);
 #endif
 
-       trace_regmap_reg_write(map->dev, reg, val);
+       trace_regmap_reg_write(map, reg, val);
 
        return map->reg_write(context, reg, val);
 }
@@ -1773,7 +1771,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
        for (i = 0; i < num_regs; i++) {
                int reg = regs[i].reg;
                int val = regs[i].def;
-               trace_regmap_hw_write_start(map->dev, reg, 1);
+               trace_regmap_hw_write_start(map, reg, 1);
                map->format.format_reg(u8, reg, map->reg_shift);
                u8 += reg_bytes + pad_bytes;
                map->format.format_val(u8, val, 0);
@@ -1788,7 +1786,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
 
        for (i = 0; i < num_regs; i++) {
                int reg = regs[i].reg;
-               trace_regmap_hw_write_done(map->dev, reg, 1);
+               trace_regmap_hw_write_done(map, reg, 1);
        }
        return ret;
 }
@@ -2059,15 +2057,13 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
         */
        u8[0] |= map->read_flag_mask;
 
-       trace_regmap_hw_read_start(map->dev, reg,
-                                  val_len / map->format.val_bytes);
+       trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
 
        ret = map->bus->read(map->bus_context, map->work_buf,
                             map->format.reg_bytes + map->format.pad_bytes,
                             val, val_len);
 
-       trace_regmap_hw_read_done(map->dev, reg,
-                                 val_len / map->format.val_bytes);
+       trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
 
        return ret;
 }
@@ -2123,7 +2119,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
                        dev_info(map->dev, "%x => %x\n", reg, *val);
 #endif
 
-               trace_regmap_reg_read(map->dev, reg, *val);
+               trace_regmap_reg_read(map, reg, *val);
 
                if (!map->cache_bypass)
                        regcache_write(map, reg, *val);
@@ -2480,7 +2476,7 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
        struct regmap *map = async->map;
        bool wake;
 
-       trace_regmap_async_io_complete(map->dev);
+       trace_regmap_async_io_complete(map);
 
        spin_lock(&map->async_lock);
        list_move(&async->list, &map->async_free);
@@ -2525,7 +2521,7 @@ int regmap_async_complete(struct regmap *map)
        if (!map->bus || !map->bus->async_write)
                return 0;
 
-       trace_regmap_async_complete_start(map->dev);
+       trace_regmap_async_complete_start(map);
 
        wait_event(map->async_waitq, regmap_async_is_done(map));
 
@@ -2534,7 +2530,7 @@ int regmap_async_complete(struct regmap *map)
        map->async_ret = 0;
        spin_unlock_irqrestore(&map->async_lock, flags);
 
-       trace_regmap_async_complete_done(map->dev);
+       trace_regmap_async_complete_done(map);
 
        return ret;
 }
index 9be17d3431bb000a4a18cb515fbb7fe9f7eda0d6..fc6ffcfa80618efded04b72b1501ca505683a3b0 100644 (file)
@@ -1,6 +1,6 @@
 config BCMA_POSSIBLE
        bool
-       depends on HAS_IOMEM && HAS_DMA && PCI
+       depends on HAS_IOMEM && HAS_DMA
        default y
 
 menu "Broadcom specific AMBA"
@@ -45,9 +45,9 @@ config BCMA_HOST_SOC
 
          If unsure, say N
 
-# TODO: make it depend on PCI when ready
 config BCMA_DRIVER_PCI
-       bool
+       bool "BCMA Broadcom PCI core driver"
+       depends on BCMA && PCI
        default y
        help
          BCMA bus may have many versions of PCIe core. This driver
index 5a1d22489afc78d3a9a059655c6d661bcaf139b1..15f2b2e242ea76b9ed882383f51a976a72d7b325 100644 (file)
@@ -106,15 +106,35 @@ static inline void __exit bcma_host_soc_unregister_driver(void)
 #endif /* CONFIG_BCMA_HOST_SOC && CONFIG_OF */
 
 /* driver_pci.c */
+#ifdef CONFIG_BCMA_DRIVER_PCI
 u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address);
 void bcma_core_pci_early_init(struct bcma_drv_pci *pc);
 void bcma_core_pci_init(struct bcma_drv_pci *pc);
 void bcma_core_pci_up(struct bcma_drv_pci *pc);
 void bcma_core_pci_down(struct bcma_drv_pci *pc);
+#else
+static inline void bcma_core_pci_early_init(struct bcma_drv_pci *pc)
+{
+       WARN_ON(pc->core->bus->hosttype == BCMA_HOSTTYPE_PCI);
+}
+static inline void bcma_core_pci_init(struct bcma_drv_pci *pc)
+{
+       /* Initialization is required for PCI hosted bus */
+       WARN_ON(pc->core->bus->hosttype == BCMA_HOSTTYPE_PCI);
+}
+#endif
 
 /* driver_pcie2.c */
+#ifdef CONFIG_BCMA_DRIVER_PCI
 void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2);
 void bcma_core_pcie2_up(struct bcma_drv_pcie2 *pcie2);
+#else
+static inline void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2)
+{
+       /* Initialization is required for PCI hosted bus */
+       WARN_ON(pcie2->core->bus->hosttype == BCMA_HOSTTYPE_PCI);
+}
+#endif
 
 extern int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc);
 
index dce34fb52e27facf1136315e4beb729afa79d504..74ccb02e0f10c8c52a1db7ab3948f5d697b3cddc 100644 (file)
@@ -17,6 +17,8 @@
 
 #include "bcma_private.h"
 
+#define BCMA_GPIO_MAX_PINS     32
+
 static inline struct bcma_drv_cc *bcma_gpio_get_cc(struct gpio_chip *chip)
 {
        return container_of(chip, struct bcma_drv_cc, gpio);
@@ -204,6 +206,7 @@ static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc)
 
 int bcma_gpio_init(struct bcma_drv_cc *cc)
 {
+       struct bcma_bus *bus = cc->core->bus;
        struct gpio_chip *chip = &cc->gpio;
        int err;
 
@@ -222,7 +225,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
        if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
                chip->of_node   = cc->core->dev.of_node;
 #endif
-       switch (cc->core->bus->chipinfo.id) {
+       switch (bus->chipinfo.id) {
        case BCMA_CHIP_ID_BCM5357:
        case BCMA_CHIP_ID_BCM53572:
                chip->ngpio     = 32;
@@ -231,13 +234,17 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
                chip->ngpio     = 16;
        }
 
-       /* There is just one SoC in one device and its GPIO addresses should be
-        * deterministic to address them more easily. The other buses could get
-        * a random base number. */
-       if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
-               chip->base              = 0;
-       else
-               chip->base              = -1;
+       /*
+        * On MIPS we register GPIO devices (LEDs, buttons) using absolute GPIO
+        * pin numbers. We don't have Device Tree there and we can't really use
+        * relative (per chip) numbers.
+        * So let's use predictable base for BCM47XX and "random" for all other.
+        */
+#if IS_BUILTIN(CONFIG_BCM47XX)
+       chip->base              = bus->num * BCMA_GPIO_MAX_PINS;
+#else
+       chip->base              = -1;
+#endif
 
        err = bcma_gpio_irq_domain_init(cc);
        if (err)
index cfd35bc1c5a35752a1ac30f751cce6dcd544a864..f499a469e66d08d7dfcb50e123c82fc920f32ea9 100644 (file)
@@ -282,39 +282,6 @@ void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
 }
 EXPORT_SYMBOL_GPL(bcma_core_pci_power_save);
 
-int bcma_core_pci_irq_ctl(struct bcma_bus *bus, struct bcma_device *core,
-                         bool enable)
-{
-       struct pci_dev *pdev;
-       u32 coremask, tmp;
-       int err = 0;
-
-       if (bus->hosttype != BCMA_HOSTTYPE_PCI) {
-               /* This bcma device is not on a PCI host-bus. So the IRQs are
-                * not routed through the PCI core.
-                * So we must not enable routing through the PCI core. */
-               goto out;
-       }
-
-       pdev = bus->host_pci;
-
-       err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
-       if (err)
-               goto out;
-
-       coremask = BIT(core->core_index) << 8;
-       if (enable)
-               tmp |= coremask;
-       else
-               tmp &= ~coremask;
-
-       err = pci_write_config_dword(pdev, BCMA_PCI_IRQMASK, tmp);
-
-out:
-       return err;
-}
-EXPORT_SYMBOL_GPL(bcma_core_pci_irq_ctl);
-
 static void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
 {
        u32 w;
index a62a2f9091f529d8a9c7ccfdb607446accc6f676..0856189c065fd57826df7edab2dba3217d5e283c 100644 (file)
@@ -351,3 +351,37 @@ void bcma_host_pci_down(struct bcma_bus *bus)
                bcma_core_pci_down(&bus->drv_pci[0]);
 }
 EXPORT_SYMBOL_GPL(bcma_host_pci_down);
+
+/* See also si_pci_setup */
+int bcma_host_pci_irq_ctl(struct bcma_bus *bus, struct bcma_device *core,
+                         bool enable)
+{
+       struct pci_dev *pdev;
+       u32 coremask, tmp;
+       int err = 0;
+
+       if (bus->hosttype != BCMA_HOSTTYPE_PCI) {
+               /* This bcma device is not on a PCI host-bus. So the IRQs are
+                * not routed through the PCI core.
+                * So we must not enable routing through the PCI core. */
+               goto out;
+       }
+
+       pdev = bus->host_pci;
+
+       err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
+       if (err)
+               goto out;
+
+       coremask = BIT(core->core_index) << 8;
+       if (enable)
+               tmp |= coremask;
+       else
+               tmp &= ~coremask;
+
+       err = pci_write_config_dword(pdev, BCMA_PCI_IRQMASK, tmp);
+
+out:
+       return err;
+}
+EXPORT_SYMBOL_GPL(bcma_host_pci_irq_ctl);
index 4bc2a5cb9935fbc6f5256102bdf9c2c24b9e6d8c..a98c41f72c63f4ceac0cef519f12a7b1b5be7702 100644 (file)
@@ -803,10 +803,6 @@ static int __init nbd_init(void)
                return -EINVAL;
        }
 
-       nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
-       if (!nbd_dev)
-               return -ENOMEM;
-
        part_shift = 0;
        if (max_part > 0) {
                part_shift = fls(max_part);
@@ -828,6 +824,10 @@ static int __init nbd_init(void)
        if (nbds_max > 1UL << (MINORBITS - part_shift))
                return -EINVAL;
 
+       nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
+       if (!nbd_dev)
+               return -ENOMEM;
+
        for (i = 0; i < nbds_max; i++) {
                struct gendisk *disk = alloc_disk(1 << part_shift);
                if (!disk)
index ceb32dd52a6ca5a777e97541646866985251a9f0..e23be20a341752c1b175cc05612c7e087ee24fa4 100644 (file)
@@ -3003,6 +3003,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        }
        get_device(dev->device);
 
+       INIT_LIST_HEAD(&dev->node);
        INIT_WORK(&dev->probe_work, nvme_async_probe);
        schedule_work(&dev->probe_work);
        return 0;
index 364f080768d02b2a13ea621da81f880685c39579..ed5c2738bea20efcf0c6ed0a920ad9faf5b1fd67 100644 (file)
@@ -2,9 +2,17 @@
 menu "Bluetooth device drivers"
        depends on BT
 
+config BT_INTEL
+       tristate
+
+config BT_BCM
+       tristate
+       select FW_LOADER
+
 config BT_HCIBTUSB
        tristate "HCI USB driver"
        depends on USB
+       select BT_INTEL
        help
          Bluetooth HCI USB driver.
          This driver is required if you want to use Bluetooth devices with
@@ -13,6 +21,17 @@ config BT_HCIBTUSB
          Say Y here to compile support for Bluetooth USB devices into the
          kernel or say M to compile it as module (btusb).
 
+config BT_HCIBTUSB_BCM
+       bool "Broadcom protocol support"
+       depends on BT_HCIBTUSB
+       select BT_BCM
+       default y
+       help
+         The Broadcom protocol support enables firmware and patchram
+         download support for Broadcom Bluetooth controllers.
+
+         Say Y here to compile support for Broadcom protocol.
+
 config BT_HCIBTSDIO
        tristate "HCI SDIO driver"
        depends on MMC
@@ -62,6 +81,7 @@ config BT_HCIUART_BCSP
 config BT_HCIUART_ATH3K
        bool "Atheros AR300x serial support"
        depends on BT_HCIUART
+       select BT_HCIUART_H4
        help
          HCIATH3K (HCI Atheros AR300x) is a serial protocol for
          communication between host and Atheros AR300x Bluetooth devices.
@@ -94,6 +114,27 @@ config BT_HCIUART_3WIRE
 
          Say Y here to compile support for Three-wire UART protocol.
 
+config BT_HCIUART_INTEL
+       bool "Intel protocol support"
+       depends on BT_HCIUART
+       select BT_INTEL
+       help
+         The Intel protocol support enables Bluetooth HCI over serial
+         port interface for Intel Bluetooth controllers.
+
+         Say Y here to compile support for Intel protocol.
+
+config BT_HCIUART_BCM
+       bool "Broadcom protocol support"
+       depends on BT_HCIUART
+       select BT_HCIUART_H4
+       select BT_BCM
+       help
+         The Broadcom protocol support enables Bluetooth HCI over serial
+         port interface for Broadcom Bluetooth controllers.
+
+         Say Y here to compile support for Broadcom protocol.
+
 config BT_HCIBCM203X
        tristate "HCI BCM203x USB driver"
        depends on USB
index 9fe8a875a8277b51c069ce682d6ece31c922d426..dd0d9c40b99914817f76728d600fb359dbcac007 100644 (file)
@@ -15,10 +15,12 @@ obj-$(CONFIG_BT_HCIBTUART)  += btuart_cs.o
 obj-$(CONFIG_BT_HCIBTUSB)      += btusb.o
 obj-$(CONFIG_BT_HCIBTSDIO)     += btsdio.o
 
+obj-$(CONFIG_BT_INTEL)         += btintel.o
 obj-$(CONFIG_BT_ATH3K)         += ath3k.o
 obj-$(CONFIG_BT_MRVL)          += btmrvl.o
 obj-$(CONFIG_BT_MRVL_SDIO)     += btmrvl_sdio.o
 obj-$(CONFIG_BT_WILINK)                += btwilink.o
+obj-$(CONFIG_BT_BCM)           += btbcm.o
 
 btmrvl-y                       := btmrvl_main.o
 btmrvl-$(CONFIG_DEBUG_FS)      += btmrvl_debugfs.o
@@ -29,6 +31,8 @@ hci_uart-$(CONFIG_BT_HCIUART_BCSP)    += hci_bcsp.o
 hci_uart-$(CONFIG_BT_HCIUART_LL)       += hci_ll.o
 hci_uart-$(CONFIG_BT_HCIUART_ATH3K)    += hci_ath.o
 hci_uart-$(CONFIG_BT_HCIUART_3WIRE)    += hci_h5.o
+hci_uart-$(CONFIG_BT_HCIUART_INTEL)    += hci_intel.o
+hci_uart-$(CONFIG_BT_HCIUART_BCM)      += hci_bcm.o
 hci_uart-objs                          := $(hci_uart-y)
 
 ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
new file mode 100644 (file)
index 0000000..d0741f3
--- /dev/null
@@ -0,0 +1,387 @@
+/*
+ *
+ *  Bluetooth support for Broadcom devices
+ *
+ *  Copyright (C) 2015  Intel Corporation
+ *
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <asm/unaligned.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "btbcm.h"
+
+#define VERSION "0.1"
+
+#define BDADDR_BCM20702A0 (&(bdaddr_t) {{0x00, 0xa0, 0x02, 0x70, 0x20, 0x00}})
+
+int btbcm_check_bdaddr(struct hci_dev *hdev)
+{
+       struct hci_rp_read_bd_addr *bda;
+       struct sk_buff *skb;
+
+       skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL,
+                            HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               int err = PTR_ERR(skb);
+               BT_ERR("%s: BCM: Reading device address failed (%d)",
+                      hdev->name, err);
+               return err;
+       }
+
+       if (skb->len != sizeof(*bda)) {
+               BT_ERR("%s: BCM: Device address length mismatch", hdev->name);
+               kfree_skb(skb);
+               return -EIO;
+       }
+
+       bda = (struct hci_rp_read_bd_addr *)skb->data;
+       if (bda->status) {
+               BT_ERR("%s: BCM: Device address result failed (%02x)",
+                      hdev->name, bda->status);
+               kfree_skb(skb);
+               return -bt_to_errno(bda->status);
+       }
+
+       /* The address 00:20:70:02:A0:00 indicates a BCM20702A0 controller
+        * with no configured address.
+        */
+       if (!bacmp(&bda->bdaddr, BDADDR_BCM20702A0)) {
+               BT_INFO("%s: BCM: Using default device address (%pMR)",
+                       hdev->name, &bda->bdaddr);
+               set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+       }
+
+       kfree_skb(skb);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(btbcm_check_bdaddr);
+
+int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+       struct sk_buff *skb;
+       int err;
+
+       skb = __hci_cmd_sync(hdev, 0xfc01, 6, bdaddr, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               err = PTR_ERR(skb);
+               BT_ERR("%s: BCM: Change address command failed (%d)",
+                      hdev->name, err);
+               return err;
+       }
+       kfree_skb(skb);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(btbcm_set_bdaddr);
+
+static int btbcm_reset(struct hci_dev *hdev)
+{
+       struct sk_buff *skb;
+
+       skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               int err = PTR_ERR(skb);
+               BT_ERR("%s: BCM: Reset failed (%d)", hdev->name, err);
+               return err;
+       }
+       kfree_skb(skb);
+
+       return 0;
+}
+
+static struct sk_buff *btbcm_read_local_version(struct hci_dev *hdev)
+{
+       struct sk_buff *skb;
+
+       skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
+                            HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               BT_ERR("%s: BCM: Reading local version info failed (%ld)",
+                      hdev->name, PTR_ERR(skb));
+               return skb;
+       }
+
+       if (skb->len != sizeof(struct hci_rp_read_local_version)) {
+               BT_ERR("%s: BCM: Local version length mismatch", hdev->name);
+               kfree_skb(skb);
+               return ERR_PTR(-EIO);
+       }
+
+       return skb;
+}
+
+static struct sk_buff *btbcm_read_verbose_config(struct hci_dev *hdev)
+{
+       struct sk_buff *skb;
+
+       skb = __hci_cmd_sync(hdev, 0xfc79, 0, NULL, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               BT_ERR("%s: BCM: Read verbose config info failed (%ld)",
+                      hdev->name, PTR_ERR(skb));
+               return skb;
+       }
+
+       if (skb->len != 7) {
+               BT_ERR("%s: BCM: Verbose config length mismatch", hdev->name);
+               kfree_skb(skb);
+               return ERR_PTR(-EIO);
+       }
+
+       return skb;
+}
+
+static struct sk_buff *btbcm_read_usb_product(struct hci_dev *hdev)
+{
+       struct sk_buff *skb;
+
+       skb = __hci_cmd_sync(hdev, 0xfc5a, 0, NULL, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               BT_ERR("%s: BCM: Read USB product info failed (%ld)",
+                      hdev->name, PTR_ERR(skb));
+               return skb;
+       }
+
+       if (skb->len != 5) {
+               BT_ERR("%s: BCM: USB product length mismatch", hdev->name);
+               kfree_skb(skb);
+               return ERR_PTR(-EIO);
+       }
+
+       return skb;
+}
+
+static const struct {
+       u16 subver;
+       const char *name;
+} bcm_uart_subver_table[] = {
+       { 0x410e, "BCM43341B0"  },      /* 002.001.014 */
+       { }
+};
+
+static const struct {
+       u16 subver;
+       const char *name;
+} bcm_usb_subver_table[] = {
+       { 0x210b, "BCM43142A0"  },      /* 001.001.011 */
+       { 0x2112, "BCM4314A0"   },      /* 001.001.018 */
+       { 0x2118, "BCM20702A0"  },      /* 001.001.024 */
+       { 0x2126, "BCM4335A0"   },      /* 001.001.038 */
+       { 0x220e, "BCM20702A1"  },      /* 001.002.014 */
+       { 0x230f, "BCM4354A2"   },      /* 001.003.015 */
+       { 0x4106, "BCM4335B0"   },      /* 002.001.006 */
+       { 0x410e, "BCM20702B0"  },      /* 002.001.014 */
+       { 0x6109, "BCM4335C0"   },      /* 003.001.009 */
+       { 0x610c, "BCM4354"     },      /* 003.001.012 */
+       { }
+};
+
+int btbcm_setup_patchram(struct hci_dev *hdev)
+{
+       const struct hci_command_hdr *cmd;
+       const struct firmware *fw;
+       const u8 *fw_ptr;
+       size_t fw_size;
+       char fw_name[64];
+       u16 opcode, subver, rev, pid, vid;
+       const char *hw_name = NULL;
+       struct sk_buff *skb;
+       struct hci_rp_read_local_version *ver;
+       int i, err;
+
+       /* Reset */
+       err = btbcm_reset(hdev);
+       if (err)
+               return err;
+
+       /* Read Local Version Info */
+       skb = btbcm_read_local_version(hdev);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       ver = (struct hci_rp_read_local_version *)skb->data;
+       rev = le16_to_cpu(ver->hci_rev);
+       subver = le16_to_cpu(ver->lmp_subver);
+       kfree_skb(skb);
+
+       /* Read Verbose Config Version Info */
+       skb = btbcm_read_verbose_config(hdev);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       BT_INFO("%s: BCM: chip id %u", hdev->name, skb->data[1]);
+       kfree_skb(skb);
+
+       switch ((rev & 0xf000) >> 12) {
+       case 0:
+               for (i = 0; bcm_uart_subver_table[i].name; i++) {
+                       if (subver == bcm_uart_subver_table[i].subver) {
+                               hw_name = bcm_uart_subver_table[i].name;
+                               break;
+                       }
+               }
+
+               snprintf(fw_name, sizeof(fw_name), "brcm/%s.hcd",
+                        hw_name ? : "BCM");
+               break;
+       case 1:
+       case 2:
+               /* Read USB Product Info */
+               skb = btbcm_read_usb_product(hdev);
+               if (IS_ERR(skb))
+                       return PTR_ERR(skb);
+
+               vid = get_unaligned_le16(skb->data + 1);
+               pid = get_unaligned_le16(skb->data + 3);
+               kfree_skb(skb);
+
+               for (i = 0; bcm_usb_subver_table[i].name; i++) {
+                       if (subver == bcm_usb_subver_table[i].subver) {
+                               hw_name = bcm_usb_subver_table[i].name;
+                               break;
+                       }
+               }
+
+               snprintf(fw_name, sizeof(fw_name), "brcm/%s-%4.4x-%4.4x.hcd",
+                        hw_name ? : "BCM", vid, pid);
+               break;
+       default:
+               return 0;
+       }
+
+       BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
+               hw_name ? : "BCM", (subver & 0x7000) >> 13,
+               (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
+
+       err = request_firmware(&fw, fw_name, &hdev->dev);
+       if (err < 0) {
+               BT_INFO("%s: BCM: patch %s not found", hdev->name, fw_name);
+               return 0;
+       }
+
+       /* Start Download */
+       skb = __hci_cmd_sync(hdev, 0xfc2e, 0, NULL, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               err = PTR_ERR(skb);
+               BT_ERR("%s: BCM: Download Minidrv command failed (%d)",
+                      hdev->name, err);
+               goto reset;
+       }
+       kfree_skb(skb);
+
+       /* 50 msec delay after Download Minidrv completes */
+       msleep(50);
+
+       fw_ptr = fw->data;
+       fw_size = fw->size;
+
+       while (fw_size >= sizeof(*cmd)) {
+               const u8 *cmd_param;
+
+               cmd = (struct hci_command_hdr *)fw_ptr;
+               fw_ptr += sizeof(*cmd);
+               fw_size -= sizeof(*cmd);
+
+               if (fw_size < cmd->plen) {
+                       BT_ERR("%s: BCM: patch %s is corrupted", hdev->name,
+                              fw_name);
+                       err = -EINVAL;
+                       goto reset;
+               }
+
+               cmd_param = fw_ptr;
+               fw_ptr += cmd->plen;
+               fw_size -= cmd->plen;
+
+               opcode = le16_to_cpu(cmd->opcode);
+
+               skb = __hci_cmd_sync(hdev, opcode, cmd->plen, cmd_param,
+                                    HCI_INIT_TIMEOUT);
+               if (IS_ERR(skb)) {
+                       err = PTR_ERR(skb);
+                       BT_ERR("%s: BCM: patch command %04x failed (%d)",
+                              hdev->name, opcode, err);
+                       goto reset;
+               }
+               kfree_skb(skb);
+       }
+
+       /* 250 msec delay after Launch Ram completes */
+       msleep(250);
+
+reset:
+       /* Reset */
+       err = btbcm_reset(hdev);
+       if (err)
+               goto done;
+
+       /* Read Local Version Info */
+       skb = btbcm_read_local_version(hdev);
+       if (IS_ERR(skb)) {
+               err = PTR_ERR(skb);
+               goto done;
+       }
+
+       ver = (struct hci_rp_read_local_version *)skb->data;
+       rev = le16_to_cpu(ver->hci_rev);
+       subver = le16_to_cpu(ver->lmp_subver);
+       kfree_skb(skb);
+
+       BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
+               hw_name ? : "BCM", (subver & 0x7000) >> 13,
+               (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
+
+       btbcm_check_bdaddr(hdev);
+
+       set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+
+done:
+       release_firmware(fw);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(btbcm_setup_patchram);
+
+int btbcm_setup_apple(struct hci_dev *hdev)
+{
+       struct sk_buff *skb;
+
+       /* Read Verbose Config Version Info */
+       skb = btbcm_read_verbose_config(hdev);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1],
+               get_unaligned_le16(skb->data + 5));
+       kfree_skb(skb);
+
+       set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(btbcm_setup_apple);
+
+MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
+MODULE_DESCRIPTION("Bluetooth support for Broadcom devices ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/bluetooth/btbcm.h b/drivers/bluetooth/btbcm.h
new file mode 100644 (file)
index 0000000..34268ae
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ *
+ *  Bluetooth support for Broadcom devices
+ *
+ *  Copyright (C) 2015  Intel Corporation
+ *
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#if IS_ENABLED(CONFIG_BT_BCM)
+
+int btbcm_check_bdaddr(struct hci_dev *hdev);
+int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+
+int btbcm_setup_patchram(struct hci_dev *hdev);
+int btbcm_setup_apple(struct hci_dev *hdev);
+
+#else
+
+static inline int btbcm_check_bdaddr(struct hci_dev *hdev)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int btbcm_setup_patchram(struct hci_dev *hdev)
+{
+       return 0;
+}
+
+static inline int btbcm_setup_apple(struct hci_dev *hdev)
+{
+       return 0;
+}
+
+#endif
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
new file mode 100644 (file)
index 0000000..2d43d42
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+ *
+ *  Bluetooth support for Intel devices
+ *
+ *  Copyright (C) 2015  Intel Corporation
+ *
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/module.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "btintel.h"
+
+#define VERSION "0.1"
+
+#define BDADDR_INTEL (&(bdaddr_t) {{0x00, 0x8b, 0x9e, 0x19, 0x03, 0x00}})
+
+int btintel_check_bdaddr(struct hci_dev *hdev)
+{
+       struct hci_rp_read_bd_addr *bda;
+       struct sk_buff *skb;
+
+       skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL,
+                            HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               int err = PTR_ERR(skb);
+               BT_ERR("%s: Reading Intel device address failed (%d)",
+                      hdev->name, err);
+               return err;
+       }
+
+       if (skb->len != sizeof(*bda)) {
+               BT_ERR("%s: Intel device address length mismatch", hdev->name);
+               kfree_skb(skb);
+               return -EIO;
+       }
+
+       bda = (struct hci_rp_read_bd_addr *)skb->data;
+       if (bda->status) {
+               BT_ERR("%s: Intel device address result failed (%02x)",
+                      hdev->name, bda->status);
+               kfree_skb(skb);
+               return -bt_to_errno(bda->status);
+       }
+
+       /* For some Intel based controllers, the default Bluetooth device
+        * address 00:03:19:9E:8B:00 can be found. These controllers are
+        * fully operational, but have the danger of duplicate addresses
+        * and that in turn can cause problems with Bluetooth operation.
+        */
+       if (!bacmp(&bda->bdaddr, BDADDR_INTEL)) {
+               BT_ERR("%s: Found Intel default device address (%pMR)",
+                      hdev->name, &bda->bdaddr);
+               set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+       }
+
+       kfree_skb(skb);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(btintel_check_bdaddr);
+
+int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+       struct sk_buff *skb;
+       int err;
+
+       skb = __hci_cmd_sync(hdev, 0xfc31, 6, bdaddr, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               err = PTR_ERR(skb);
+               BT_ERR("%s: Changing Intel device address failed (%d)",
+                      hdev->name, err);
+               return err;
+       }
+       kfree_skb(skb);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(btintel_set_bdaddr);
+
+MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
+MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
new file mode 100644 (file)
index 0000000..4bda6ab
--- /dev/null
@@ -0,0 +1,89 @@
+/*
+ *
+ *  Bluetooth support for Intel devices
+ *
+ *  Copyright (C) 2015  Intel Corporation
+ *
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+struct intel_version {
+       u8 status;
+       u8 hw_platform;
+       u8 hw_variant;
+       u8 hw_revision;
+       u8 fw_variant;
+       u8 fw_revision;
+       u8 fw_build_num;
+       u8 fw_build_ww;
+       u8 fw_build_yy;
+       u8 fw_patch_num;
+} __packed;
+
+struct intel_boot_params {
+       __u8     status;
+       __u8     otp_format;
+       __u8     otp_content;
+       __u8     otp_patch;
+       __le16   dev_revid;
+       __u8     secure_boot;
+       __u8     key_from_hdr;
+       __u8     key_type;
+       __u8     otp_lock;
+       __u8     api_lock;
+       __u8     debug_lock;
+       bdaddr_t otp_bdaddr;
+       __u8     min_fw_build_nn;
+       __u8     min_fw_build_cw;
+       __u8     min_fw_build_yy;
+       __u8     limited_cce;
+       __u8     unlocked_state;
+} __packed;
+
+struct intel_bootup {
+       __u8     zero;
+       __u8     num_cmds;
+       __u8     source;
+       __u8     reset_type;
+       __u8     reset_reason;
+       __u8     ddc_status;
+} __packed;
+
+struct intel_secure_send_result {
+       __u8     result;
+       __le16   opcode;
+       __u8     status;
+} __packed;
+
+#if IS_ENABLED(CONFIG_BT_INTEL)
+
+int btintel_check_bdaddr(struct hci_dev *hdev);
+int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+
+#else
+
+static inline int btintel_check_bdaddr(struct hci_dev *hdev)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+       return -EOPNOTSUPP;
+}
+
+#endif
index 9bf4d6ae6c6be17f28396dc2eae3d311a105b34b..de7b236eeae7777f71389ec42c233525d309aef2 100644 (file)
 #include <linux/module.h>
 #include <linux/usb.h>
 #include <linux/firmware.h>
-#include <asm/unaligned.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
-#define VERSION "0.7"
+#include "btintel.h"
+#include "btbcm.h"
+
+#define VERSION "0.8"
 
 static bool disable_scofix;
 static bool force_scofix;
@@ -111,13 +113,7 @@ static const struct usb_device_id btusb_table[] = {
        { USB_DEVICE(0x0c10, 0x0000) },
 
        /* Broadcom BCM20702A0 */
-       { USB_DEVICE(0x0489, 0xe042) },
-       { USB_DEVICE(0x04ca, 0x2003) },
-       { USB_DEVICE(0x0b05, 0x17b5) },
-       { USB_DEVICE(0x0b05, 0x17cb) },
        { USB_DEVICE(0x413c, 0x8197) },
-       { USB_DEVICE(0x13d3, 0x3404),
-         .driver_info = BTUSB_BCM_PATCHRAM },
 
        /* Broadcom BCM20702B0 (Dynex/Insignia) */
        { USB_DEVICE(0x19ff, 0x0239), .driver_info = BTUSB_BCM_PATCHRAM },
@@ -139,10 +135,12 @@ static const struct usb_device_id btusb_table[] = {
          .driver_info = BTUSB_BCM_PATCHRAM },
 
        /* Belkin F8065bf - Broadcom based */
-       { USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) },
+       { USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01),
+         .driver_info = BTUSB_BCM_PATCHRAM },
 
        /* IMC Networks - Broadcom based */
-       { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01) },
+       { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01),
+         .driver_info = BTUSB_BCM_PATCHRAM },
 
        /* Intel Bluetooth USB Bootloader (RAM module) */
        { USB_DEVICE(0x8087, 0x0a5a),
@@ -1347,39 +1345,6 @@ static int btusb_setup_csr(struct hci_dev *hdev)
        return ret;
 }
 
-struct intel_version {
-       u8 status;
-       u8 hw_platform;
-       u8 hw_variant;
-       u8 hw_revision;
-       u8 fw_variant;
-       u8 fw_revision;
-       u8 fw_build_num;
-       u8 fw_build_ww;
-       u8 fw_build_yy;
-       u8 fw_patch_num;
-} __packed;
-
-struct intel_boot_params {
-       __u8     status;
-       __u8     otp_format;
-       __u8     otp_content;
-       __u8     otp_patch;
-       __le16   dev_revid;
-       __u8     secure_boot;
-       __u8     key_from_hdr;
-       __u8     key_type;
-       __u8     otp_lock;
-       __u8     api_lock;
-       __u8     debug_lock;
-       bdaddr_t otp_bdaddr;
-       __u8     min_fw_build_nn;
-       __u8     min_fw_build_cw;
-       __u8     min_fw_build_yy;
-       __u8     limited_cce;
-       __u8     unlocked_state;
-} __packed;
-
 static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev,
                                                       struct intel_version *ver)
 {
@@ -1536,51 +1501,6 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev,
        return 0;
 }
 
-#define BDADDR_INTEL (&(bdaddr_t) {{0x00, 0x8b, 0x9e, 0x19, 0x03, 0x00}})
-
-static int btusb_check_bdaddr_intel(struct hci_dev *hdev)
-{
-       struct sk_buff *skb;
-       struct hci_rp_read_bd_addr *rp;
-
-       skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL,
-                            HCI_INIT_TIMEOUT);
-       if (IS_ERR(skb)) {
-               BT_ERR("%s reading Intel device address failed (%ld)",
-                      hdev->name, PTR_ERR(skb));
-               return PTR_ERR(skb);
-       }
-
-       if (skb->len != sizeof(*rp)) {
-               BT_ERR("%s Intel device address length mismatch", hdev->name);
-               kfree_skb(skb);
-               return -EIO;
-       }
-
-       rp = (struct hci_rp_read_bd_addr *)skb->data;
-       if (rp->status) {
-               BT_ERR("%s Intel device address result failed (%02x)",
-                      hdev->name, rp->status);
-               kfree_skb(skb);
-               return -bt_to_errno(rp->status);
-       }
-
-       /* For some Intel based controllers, the default Bluetooth device
-        * address 00:03:19:9E:8B:00 can be found. These controllers are
-        * fully operational, but have the danger of duplicate addresses
-        * and that in turn can cause problems with Bluetooth operation.
-        */
-       if (!bacmp(&rp->bdaddr, BDADDR_INTEL)) {
-               BT_ERR("%s found Intel default device address (%pMR)",
-                      hdev->name, &rp->bdaddr);
-               set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
-       }
-
-       kfree_skb(skb);
-
-       return 0;
-}
-
 static int btusb_setup_intel(struct hci_dev *hdev)
 {
        struct sk_buff *skb;
@@ -1653,7 +1573,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
                BT_INFO("%s: Intel device is already patched. patch num: %02x",
                        hdev->name, ver->fw_patch_num);
                kfree_skb(skb);
-               btusb_check_bdaddr_intel(hdev);
+               btintel_check_bdaddr(hdev);
                return 0;
        }
 
@@ -1666,7 +1586,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
        fw = btusb_setup_intel_get_fw(hdev, ver);
        if (!fw) {
                kfree_skb(skb);
-               btusb_check_bdaddr_intel(hdev);
+               btintel_check_bdaddr(hdev);
                return 0;
        }
        fw_ptr = fw->data;
@@ -1747,7 +1667,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
        BT_INFO("%s: Intel Bluetooth firmware patch completed and activated",
                hdev->name);
 
-       btusb_check_bdaddr_intel(hdev);
+       btintel_check_bdaddr(hdev);
        return 0;
 
 exit_mfg_disable:
@@ -1763,7 +1683,7 @@ exit_mfg_disable:
 
        BT_INFO("%s: Intel Bluetooth firmware patch completed", hdev->name);
 
-       btusb_check_bdaddr_intel(hdev);
+       btintel_check_bdaddr(hdev);
        return 0;
 
 exit_mfg_deactivate:
@@ -1784,7 +1704,7 @@ exit_mfg_deactivate:
        BT_INFO("%s: Intel Bluetooth firmware patch completed and deactivated",
                hdev->name);
 
-       btusb_check_bdaddr_intel(hdev);
+       btintel_check_bdaddr(hdev);
        return 0;
 }
 
@@ -1826,6 +1746,38 @@ static int btusb_recv_bulk_intel(struct btusb_data *data, void *buffer,
        return btusb_recv_bulk(data, buffer, count);
 }
 
+static void btusb_intel_bootup(struct btusb_data *data, const void *ptr,
+                              unsigned int len)
+{
+       const struct intel_bootup *evt = ptr;
+
+       if (len != sizeof(*evt))
+               return;
+
+       if (test_and_clear_bit(BTUSB_BOOTING, &data->flags)) {
+               smp_mb__after_atomic();
+               wake_up_bit(&data->flags, BTUSB_BOOTING);
+       }
+}
+
+static void btusb_intel_secure_send_result(struct btusb_data *data,
+                                          const void *ptr, unsigned int len)
+{
+       const struct intel_secure_send_result *evt = ptr;
+
+       if (len != sizeof(*evt))
+               return;
+
+       if (evt->result)
+               set_bit(BTUSB_FIRMWARE_FAILED, &data->flags);
+
+       if (test_and_clear_bit(BTUSB_DOWNLOADING, &data->flags) &&
+           test_bit(BTUSB_FIRMWARE_LOADED, &data->flags)) {
+               smp_mb__after_atomic();
+               wake_up_bit(&data->flags, BTUSB_DOWNLOADING);
+       }
+}
+
 static int btusb_recv_event_intel(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct btusb_data *data = hci_get_drvdata(hdev);
@@ -1833,32 +1785,27 @@ static int btusb_recv_event_intel(struct hci_dev *hdev, struct sk_buff *skb)
        if (test_bit(BTUSB_BOOTLOADER, &data->flags)) {
                struct hci_event_hdr *hdr = (void *)skb->data;
 
-               /* When the firmware loading completes the device sends
-                * out a vendor specific event indicating the result of
-                * the firmware loading.
-                */
-               if (skb->len == 7 && hdr->evt == 0xff && hdr->plen == 0x05 &&
-                   skb->data[2] == 0x06) {
-                       if (skb->data[3] != 0x00)
-                               test_bit(BTUSB_FIRMWARE_FAILED, &data->flags);
-
-                       if (test_and_clear_bit(BTUSB_DOWNLOADING,
-                                              &data->flags) &&
-                           test_bit(BTUSB_FIRMWARE_LOADED, &data->flags)) {
-                               smp_mb__after_atomic();
-                               wake_up_bit(&data->flags, BTUSB_DOWNLOADING);
-                       }
-               }
-
-               /* When switching to the operational firmware the device
-                * sends a vendor specific event indicating that the bootup
-                * completed.
-                */
-               if (skb->len == 9 && hdr->evt == 0xff && hdr->plen == 0x07 &&
-                   skb->data[2] == 0x02) {
-                       if (test_and_clear_bit(BTUSB_BOOTING, &data->flags)) {
-                               smp_mb__after_atomic();
-                               wake_up_bit(&data->flags, BTUSB_BOOTING);
+               if (skb->len > HCI_EVENT_HDR_SIZE && hdr->evt == 0xff &&
+                   hdr->plen > 0) {
+                       const void *ptr = skb->data + HCI_EVENT_HDR_SIZE + 1;
+                       unsigned int len = skb->len - HCI_EVENT_HDR_SIZE - 1;
+
+                       switch (skb->data[2]) {
+                       case 0x02:
+                               /* When switching to the operational firmware
+                                * the device sends a vendor specific event
+                                * indicating that the bootup completed.
+                                */
+                               btusb_intel_bootup(data, ptr, len);
+                               break;
+                       case 0x06:
+                               /* When the firmware loading completes the
+                                * device sends out a vendor specific event
+                                * indicating the result of the firmware
+                                * loading.
+                                */
+                               btusb_intel_secure_send_result(data, ptr, len);
+                               break;
                        }
                }
        }
@@ -2060,7 +2007,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
        if (ver->fw_variant == 0x23) {
                kfree_skb(skb);
                clear_bit(BTUSB_BOOTLOADER, &data->flags);
-               btusb_check_bdaddr_intel(hdev);
+               btintel_check_bdaddr(hdev);
                return 0;
        }
 
@@ -2344,23 +2291,6 @@ static void btusb_hw_error_intel(struct hci_dev *hdev, u8 code)
        kfree_skb(skb);
 }
 
-static int btusb_set_bdaddr_intel(struct hci_dev *hdev, const bdaddr_t *bdaddr)
-{
-       struct sk_buff *skb;
-       long ret;
-
-       skb = __hci_cmd_sync(hdev, 0xfc31, 6, bdaddr, HCI_INIT_TIMEOUT);
-       if (IS_ERR(skb)) {
-               ret = PTR_ERR(skb);
-               BT_ERR("%s: changing Intel device address failed (%ld)",
-                      hdev->name, ret);
-               return ret;
-       }
-       kfree_skb(skb);
-
-       return 0;
-}
-
 static int btusb_shutdown_intel(struct hci_dev *hdev)
 {
        struct sk_buff *skb;
@@ -2405,266 +2335,6 @@ static int btusb_set_bdaddr_marvell(struct hci_dev *hdev,
        return 0;
 }
 
-static const struct {
-       u16 subver;
-       const char *name;
-} bcm_subver_table[] = {
-       { 0x210b, "BCM43142A0"  },      /* 001.001.011 */
-       { 0x2112, "BCM4314A0"   },      /* 001.001.018 */
-       { 0x2118, "BCM20702A0"  },      /* 001.001.024 */
-       { 0x2126, "BCM4335A0"   },      /* 001.001.038 */
-       { 0x220e, "BCM20702A1"  },      /* 001.002.014 */
-       { 0x230f, "BCM4354A2"   },      /* 001.003.015 */
-       { 0x4106, "BCM4335B0"   },      /* 002.001.006 */
-       { 0x410e, "BCM20702B0"  },      /* 002.001.014 */
-       { 0x6109, "BCM4335C0"   },      /* 003.001.009 */
-       { 0x610c, "BCM4354"     },      /* 003.001.012 */
-       { }
-};
-
-#define BDADDR_BCM20702A0 (&(bdaddr_t) {{0x00, 0xa0, 0x02, 0x70, 0x20, 0x00}})
-
-static int btusb_setup_bcm_patchram(struct hci_dev *hdev)
-{
-       struct btusb_data *data = hci_get_drvdata(hdev);
-       struct usb_device *udev = data->udev;
-       char fw_name[64];
-       const struct firmware *fw;
-       const u8 *fw_ptr;
-       size_t fw_size;
-       const struct hci_command_hdr *cmd;
-       const u8 *cmd_param;
-       u16 opcode, subver, rev;
-       const char *hw_name = NULL;
-       struct sk_buff *skb;
-       struct hci_rp_read_local_version *ver;
-       struct hci_rp_read_bd_addr *bda;
-       long ret;
-       int i;
-
-       /* Reset */
-       skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
-       if (IS_ERR(skb)) {
-               ret = PTR_ERR(skb);
-               BT_ERR("%s: HCI_OP_RESET failed (%ld)", hdev->name, ret);
-               return ret;
-       }
-       kfree_skb(skb);
-
-       /* Read Local Version Info */
-       skb = btusb_read_local_version(hdev);
-       if (IS_ERR(skb))
-               return PTR_ERR(skb);
-
-       ver = (struct hci_rp_read_local_version *)skb->data;
-       rev = le16_to_cpu(ver->hci_rev);
-       subver = le16_to_cpu(ver->lmp_subver);
-       kfree_skb(skb);
-
-       /* Read Verbose Config Version Info */
-       skb = __hci_cmd_sync(hdev, 0xfc79, 0, NULL, HCI_INIT_TIMEOUT);
-       if (IS_ERR(skb)) {
-               ret = PTR_ERR(skb);
-               BT_ERR("%s: BCM: Read Verbose Version failed (%ld)",
-                      hdev->name, ret);
-               return ret;
-       }
-
-       if (skb->len != 7) {
-               BT_ERR("%s: BCM: Read Verbose Version event length mismatch",
-                      hdev->name);
-               kfree_skb(skb);
-               return -EIO;
-       }
-
-       BT_INFO("%s: BCM: chip id %u", hdev->name, skb->data[1]);
-       kfree_skb(skb);
-
-       for (i = 0; bcm_subver_table[i].name; i++) {
-               if (subver == bcm_subver_table[i].subver) {
-                       hw_name = bcm_subver_table[i].name;
-                       break;
-               }
-       }
-
-       BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
-               hw_name ? : "BCM", (subver & 0x7000) >> 13,
-               (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
-
-       snprintf(fw_name, sizeof(fw_name), "brcm/%s-%4.4x-%4.4x.hcd",
-                hw_name ? : "BCM",
-                le16_to_cpu(udev->descriptor.idVendor),
-                le16_to_cpu(udev->descriptor.idProduct));
-
-       ret = request_firmware(&fw, fw_name, &hdev->dev);
-       if (ret < 0) {
-               BT_INFO("%s: BCM: patch %s not found", hdev->name, fw_name);
-               return 0;
-       }
-
-       /* Start Download */
-       skb = __hci_cmd_sync(hdev, 0xfc2e, 0, NULL, HCI_INIT_TIMEOUT);
-       if (IS_ERR(skb)) {
-               ret = PTR_ERR(skb);
-               BT_ERR("%s: BCM: Download Minidrv command failed (%ld)",
-                      hdev->name, ret);
-               goto reset_fw;
-       }
-       kfree_skb(skb);
-
-       /* 50 msec delay after Download Minidrv completes */
-       msleep(50);
-
-       fw_ptr = fw->data;
-       fw_size = fw->size;
-
-       while (fw_size >= sizeof(*cmd)) {
-               cmd = (struct hci_command_hdr *)fw_ptr;
-               fw_ptr += sizeof(*cmd);
-               fw_size -= sizeof(*cmd);
-
-               if (fw_size < cmd->plen) {
-                       BT_ERR("%s: BCM: patch %s is corrupted",
-                              hdev->name, fw_name);
-                       ret = -EINVAL;
-                       goto reset_fw;
-               }
-
-               cmd_param = fw_ptr;
-               fw_ptr += cmd->plen;
-               fw_size -= cmd->plen;
-
-               opcode = le16_to_cpu(cmd->opcode);
-
-               skb = __hci_cmd_sync(hdev, opcode, cmd->plen, cmd_param,
-                                    HCI_INIT_TIMEOUT);
-               if (IS_ERR(skb)) {
-                       ret = PTR_ERR(skb);
-                       BT_ERR("%s: BCM: patch command %04x failed (%ld)",
-                              hdev->name, opcode, ret);
-                       goto reset_fw;
-               }
-               kfree_skb(skb);
-       }
-
-       /* 250 msec delay after Launch Ram completes */
-       msleep(250);
-
-reset_fw:
-       /* Reset */
-       skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
-       if (IS_ERR(skb)) {
-               ret = PTR_ERR(skb);
-               BT_ERR("%s: HCI_OP_RESET failed (%ld)", hdev->name, ret);
-               goto done;
-       }
-       kfree_skb(skb);
-
-       /* Read Local Version Info */
-       skb = btusb_read_local_version(hdev);
-       if (IS_ERR(skb)) {
-               ret = PTR_ERR(skb);
-               goto done;
-       }
-
-       ver = (struct hci_rp_read_local_version *)skb->data;
-       rev = le16_to_cpu(ver->hci_rev);
-       subver = le16_to_cpu(ver->lmp_subver);
-       kfree_skb(skb);
-
-       BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
-               hw_name ? : "BCM", (subver & 0x7000) >> 13,
-               (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
-
-       /* Read BD Address */
-       skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL,
-                            HCI_INIT_TIMEOUT);
-       if (IS_ERR(skb)) {
-               ret = PTR_ERR(skb);
-               BT_ERR("%s: HCI_OP_READ_BD_ADDR failed (%ld)",
-                      hdev->name, ret);
-               goto done;
-       }
-
-       if (skb->len != sizeof(*bda)) {
-               BT_ERR("%s: HCI_OP_READ_BD_ADDR event length mismatch",
-                      hdev->name);
-               kfree_skb(skb);
-               ret = -EIO;
-               goto done;
-       }
-
-       bda = (struct hci_rp_read_bd_addr *)skb->data;
-       if (bda->status) {
-               BT_ERR("%s: HCI_OP_READ_BD_ADDR error status (%02x)",
-                      hdev->name, bda->status);
-               kfree_skb(skb);
-               ret = -bt_to_errno(bda->status);
-               goto done;
-       }
-
-       /* The address 00:20:70:02:A0:00 indicates a BCM20702A0 controller
-        * with no configured address.
-        */
-       if (!bacmp(&bda->bdaddr, BDADDR_BCM20702A0)) {
-               BT_INFO("%s: BCM: using default device address (%pMR)",
-                       hdev->name, &bda->bdaddr);
-               set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
-       }
-
-       kfree_skb(skb);
-
-done:
-       release_firmware(fw);
-
-       return ret;
-}
-
-static int btusb_set_bdaddr_bcm(struct hci_dev *hdev, const bdaddr_t *bdaddr)
-{
-       struct sk_buff *skb;
-       long ret;
-
-       skb = __hci_cmd_sync(hdev, 0xfc01, 6, bdaddr, HCI_INIT_TIMEOUT);
-       if (IS_ERR(skb)) {
-               ret = PTR_ERR(skb);
-               BT_ERR("%s: BCM: Change address command failed (%ld)",
-                      hdev->name, ret);
-               return ret;
-       }
-       kfree_skb(skb);
-
-       return 0;
-}
-
-static int btusb_setup_bcm_apple(struct hci_dev *hdev)
-{
-       struct sk_buff *skb;
-       int err;
-
-       /* Read Verbose Config Version Info */
-       skb = __hci_cmd_sync(hdev, 0xfc79, 0, NULL, HCI_INIT_TIMEOUT);
-       if (IS_ERR(skb)) {
-               err = PTR_ERR(skb);
-               BT_ERR("%s: BCM: Read Verbose Version failed (%d)",
-                      hdev->name, err);
-               return err;
-       }
-
-       if (skb->len != 7) {
-               BT_ERR("%s: BCM: Read Verbose Version event length mismatch",
-                      hdev->name);
-               kfree_skb(skb);
-               return -EIO;
-       }
-
-       BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1],
-               get_unaligned_le16(skb->data + 5));
-       kfree_skb(skb);
-
-       return 0;
-}
-
 static int btusb_set_bdaddr_ath3012(struct hci_dev *hdev,
                                    const bdaddr_t *bdaddr)
 {
@@ -3058,21 +2728,20 @@ static int btusb_probe(struct usb_interface *intf,
        if (id->driver_info & BTUSB_BCM92035)
                hdev->setup = btusb_setup_bcm92035;
 
+#ifdef CONFIG_BT_HCIBTUSB_BCM
        if (id->driver_info & BTUSB_BCM_PATCHRAM) {
-               hdev->setup = btusb_setup_bcm_patchram;
-               hdev->set_bdaddr = btusb_set_bdaddr_bcm;
-               set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+               hdev->setup = btbcm_setup_patchram;
+               hdev->set_bdaddr = btbcm_set_bdaddr;
        }
 
-       if (id->driver_info & BTUSB_BCM_APPLE) {
-               hdev->setup = btusb_setup_bcm_apple;
-               set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
-       }
+       if (id->driver_info & BTUSB_BCM_APPLE)
+               hdev->setup = btbcm_setup_apple;
+#endif
 
        if (id->driver_info & BTUSB_INTEL) {
                hdev->setup = btusb_setup_intel;
                hdev->shutdown = btusb_shutdown_intel;
-               hdev->set_bdaddr = btusb_set_bdaddr_intel;
+               hdev->set_bdaddr = btintel_set_bdaddr;
                set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
                set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
        }
@@ -3081,7 +2750,7 @@ static int btusb_probe(struct usb_interface *intf,
                hdev->send = btusb_send_frame_intel;
                hdev->setup = btusb_setup_intel_new;
                hdev->hw_error = btusb_hw_error_intel;
-               hdev->set_bdaddr = btusb_set_bdaddr_intel;
+               hdev->set_bdaddr = btintel_set_bdaddr;
                set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
        }
 
index 9c4dcf4c62ea763e318ecc41f7ef5db03a9fbe67..1b3f8647ea2fd446e1a6f14979a27e399fb0dbf5 100644 (file)
@@ -45,6 +45,7 @@ struct ath_struct {
        struct hci_uart *hu;
        unsigned int cur_sleep;
 
+       struct sk_buff *rx_skb;
        struct sk_buff_head txq;
        struct work_struct ctxtsw;
 };
@@ -136,6 +137,8 @@ static int ath_close(struct hci_uart *hu)
 
        skb_queue_purge(&ath->txq);
 
+       kfree_skb(ath->rx_skb);
+
        cancel_work_sync(&ath->ctxtsw);
 
        hu->priv = NULL;
@@ -187,40 +190,42 @@ static struct sk_buff *ath_dequeue(struct hci_uart *hu)
        return skb_dequeue(&ath->txq);
 }
 
+static const struct h4_recv_pkt ath_recv_pkts[] = {
+       { H4_RECV_ACL,   .recv = hci_recv_frame },
+       { H4_RECV_SCO,   .recv = hci_recv_frame },
+       { H4_RECV_EVENT, .recv = hci_recv_frame },
+};
+
 /* Recv data */
-static int ath_recv(struct hci_uart *hu, void *data, int count)
+static int ath_recv(struct hci_uart *hu, const void *data, int count)
 {
-       int ret;
+       struct ath_struct *ath = hu->priv;
 
-       ret = hci_recv_stream_fragment(hu->hdev, data, count);
-       if (ret < 0) {
-               BT_ERR("Frame Reassembly Failed");
-               return ret;
+       ath->rx_skb = h4_recv_buf(hu->hdev, ath->rx_skb, data, count,
+                                 ath_recv_pkts, ARRAY_SIZE(ath_recv_pkts));
+       if (IS_ERR(ath->rx_skb)) {
+               int err = PTR_ERR(ath->rx_skb);
+               BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
+               return err;
        }
 
        return count;
 }
 
-static struct hci_uart_proto athp = {
-       .id = HCI_UART_ATH3K,
-       .open = ath_open,
-       .close = ath_close,
-       .recv = ath_recv,
-       .enqueue = ath_enqueue,
-       .dequeue = ath_dequeue,
-       .flush = ath_flush,
+static const struct hci_uart_proto athp = {
+       .id             = HCI_UART_ATH3K,
+       .name           = "ATH3K",
+       .open           = ath_open,
+       .close          = ath_close,
+       .recv           = ath_recv,
+       .enqueue        = ath_enqueue,
+       .dequeue        = ath_dequeue,
+       .flush          = ath_flush,
 };
 
 int __init ath_init(void)
 {
-       int err = hci_uart_register_proto(&athp);
-
-       if (!err)
-               BT_INFO("HCIATH3K protocol initialized");
-       else
-               BT_ERR("HCIATH3K protocol registration failed");
-
-       return err;
+       return hci_uart_register_proto(&athp);
 }
 
 int __exit ath_deinit(void)
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
new file mode 100644 (file)
index 0000000..1ec0b4a
--- /dev/null
@@ -0,0 +1,153 @@
+/*
+ *
+ *  Bluetooth HCI UART driver for Broadcom devices
+ *
+ *  Copyright (C) 2015  Intel Corporation
+ *
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "btbcm.h"
+#include "hci_uart.h"
+
+struct bcm_data {
+       struct sk_buff *rx_skb;
+       struct sk_buff_head txq;
+};
+
+static int bcm_open(struct hci_uart *hu)
+{
+       struct bcm_data *bcm;
+
+       BT_DBG("hu %p", hu);
+
+       bcm = kzalloc(sizeof(*bcm), GFP_KERNEL);
+       if (!bcm)
+               return -ENOMEM;
+
+       skb_queue_head_init(&bcm->txq);
+
+       hu->priv = bcm;
+       return 0;
+}
+
+static int bcm_close(struct hci_uart *hu)
+{
+       struct bcm_data *bcm = hu->priv;
+
+       BT_DBG("hu %p", hu);
+
+       skb_queue_purge(&bcm->txq);
+       kfree_skb(bcm->rx_skb);
+       kfree(bcm);
+
+       hu->priv = NULL;
+       return 0;
+}
+
+static int bcm_flush(struct hci_uart *hu)
+{
+       struct bcm_data *bcm = hu->priv;
+
+       BT_DBG("hu %p", hu);
+
+       skb_queue_purge(&bcm->txq);
+
+       return 0;
+}
+
+static int bcm_setup(struct hci_uart *hu)
+{
+       BT_DBG("hu %p", hu);
+
+       hu->hdev->set_bdaddr = btbcm_set_bdaddr;
+
+       return btbcm_setup_patchram(hu->hdev);
+}
+
+static const struct h4_recv_pkt bcm_recv_pkts[] = {
+       { H4_RECV_ACL,   .recv = hci_recv_frame },
+       { H4_RECV_SCO,   .recv = hci_recv_frame },
+       { H4_RECV_EVENT, .recv = hci_recv_frame },
+};
+
+static int bcm_recv(struct hci_uart *hu, const void *data, int count)
+{
+       struct bcm_data *bcm = hu->priv;
+
+       if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
+               return -EUNATCH;
+
+       bcm->rx_skb = h4_recv_buf(hu->hdev, bcm->rx_skb, data, count,
+                                 bcm_recv_pkts, ARRAY_SIZE(bcm_recv_pkts));
+       if (IS_ERR(bcm->rx_skb)) {
+               int err = PTR_ERR(bcm->rx_skb);
+               BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
+               return err;
+       }
+
+       return count;
+}
+
+static int bcm_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+{
+       struct bcm_data *bcm = hu->priv;
+
+       BT_DBG("hu %p skb %p", hu, skb);
+
+       /* Prepend skb with frame type */
+       memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+       skb_queue_tail(&bcm->txq, skb);
+
+       return 0;
+}
+
+static struct sk_buff *bcm_dequeue(struct hci_uart *hu)
+{
+       struct bcm_data *bcm = hu->priv;
+
+       return skb_dequeue(&bcm->txq);
+}
+
+static const struct hci_uart_proto bcm_proto = {
+       .id             = HCI_UART_BCM,
+       .name           = "BCM",
+       .open           = bcm_open,
+       .close          = bcm_close,
+       .flush          = bcm_flush,
+       .setup          = bcm_setup,
+       .recv           = bcm_recv,
+       .enqueue        = bcm_enqueue,
+       .dequeue        = bcm_dequeue,
+};
+
+int __init bcm_init(void)
+{
+       return hci_uart_register_proto(&bcm_proto);
+}
+
+int __exit bcm_deinit(void)
+{
+       return hci_uart_unregister_proto(&bcm_proto);
+}
index 21cc45b34f134fe8028606e2595d381a125e0926..dc8e3d4356a0ff40f1b27ddc63de3e7eefef7a87 100644 (file)
@@ -47,8 +47,6 @@
 
 #include "hci_uart.h"
 
-#define VERSION "0.3"
-
 static bool txcrc = 1;
 static bool hciextn = 1;
 
@@ -554,10 +552,10 @@ static u16 bscp_get_crc(struct bcsp_struct *bcsp)
 }
 
 /* Recv data */
-static int bcsp_recv(struct hci_uart *hu, void *data, int count)
+static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
 {
        struct bcsp_struct *bcsp = hu->priv;
-       unsigned char *ptr;
+       const unsigned char *ptr;
 
        BT_DBG("hu %p count %d rx_state %d rx_count %ld", 
                hu, count, bcsp->rx_state, bcsp->rx_count);
@@ -735,8 +733,9 @@ static int bcsp_close(struct hci_uart *hu)
        return 0;
 }
 
-static struct hci_uart_proto bcsp = {
+static const struct hci_uart_proto bcsp = {
        .id             = HCI_UART_BCSP,
+       .name           = "BCSP",
        .open           = bcsp_open,
        .close          = bcsp_close,
        .enqueue        = bcsp_enqueue,
@@ -747,14 +746,7 @@ static struct hci_uart_proto bcsp = {
 
 int __init bcsp_init(void)
 {
-       int err = hci_uart_register_proto(&bcsp);
-
-       if (!err)
-               BT_INFO("HCI BCSP protocol initialized");
-       else
-               BT_ERR("HCI BCSP protocol registration failed");
-
-       return err;
+       return hci_uart_register_proto(&bcsp);
 }
 
 int __exit bcsp_deinit(void)
index 66db9a803373efb92c8966c9f69fdb6b8ec7aa59..f7190f01e1357b764504b0084128804ca87d6a6e 100644 (file)
 #include <linux/signal.h>
 #include <linux/ioctl.h>
 #include <linux/skbuff.h>
+#include <asm/unaligned.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
 #include "hci_uart.h"
 
-#define VERSION "1.2"
-
 struct h4_struct {
-       unsigned long rx_state;
-       unsigned long rx_count;
        struct sk_buff *rx_skb;
        struct sk_buff_head txq;
 };
@@ -117,18 +114,26 @@ static int h4_enqueue(struct hci_uart *hu, struct sk_buff *skb)
        return 0;
 }
 
+static const struct h4_recv_pkt h4_recv_pkts[] = {
+       { H4_RECV_ACL,   .recv = hci_recv_frame },
+       { H4_RECV_SCO,   .recv = hci_recv_frame },
+       { H4_RECV_EVENT, .recv = hci_recv_frame },
+};
+
 /* Recv data */
-static int h4_recv(struct hci_uart *hu, void *data, int count)
+static int h4_recv(struct hci_uart *hu, const void *data, int count)
 {
-       int ret;
+       struct h4_struct *h4 = hu->priv;
 
        if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
                return -EUNATCH;
 
-       ret = hci_recv_stream_fragment(hu->hdev, data, count);
-       if (ret < 0) {
-               BT_ERR("Frame Reassembly Failed");
-               return ret;
+       h4->rx_skb = h4_recv_buf(hu->hdev, h4->rx_skb, data, count,
+                                h4_recv_pkts, ARRAY_SIZE(h4_recv_pkts));
+       if (IS_ERR(h4->rx_skb)) {
+               int err = PTR_ERR(h4->rx_skb);
+               BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
+               return err;
        }
 
        return count;
@@ -140,8 +145,9 @@ static struct sk_buff *h4_dequeue(struct hci_uart *hu)
        return skb_dequeue(&h4->txq);
 }
 
-static struct hci_uart_proto h4p = {
+static const struct hci_uart_proto h4p = {
        .id             = HCI_UART_H4,
+       .name           = "H4",
        .open           = h4_open,
        .close          = h4_close,
        .recv           = h4_recv,
@@ -152,17 +158,105 @@ static struct hci_uart_proto h4p = {
 
 int __init h4_init(void)
 {
-       int err = hci_uart_register_proto(&h4p);
-
-       if (!err)
-               BT_INFO("HCI H4 protocol initialized");
-       else
-               BT_ERR("HCI H4 protocol registration failed");
-
-       return err;
+       return hci_uart_register_proto(&h4p);
 }
 
 int __exit h4_deinit(void)
 {
        return hci_uart_unregister_proto(&h4p);
 }
+
+struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
+                           const unsigned char *buffer, int count,
+                           const struct h4_recv_pkt *pkts, int pkts_count)
+{
+       while (count) {
+               int i, len;
+
+               if (!skb) {
+                       for (i = 0; i < pkts_count; i++) {
+                               if (buffer[0] != (&pkts[i])->type)
+                                       continue;
+
+                               skb = bt_skb_alloc((&pkts[i])->maxlen,
+                                                  GFP_ATOMIC);
+                               if (!skb)
+                                       return ERR_PTR(-ENOMEM);
+
+                               bt_cb(skb)->pkt_type = (&pkts[i])->type;
+                               bt_cb(skb)->expect = (&pkts[i])->hlen;
+                               break;
+                       }
+
+                       /* Check for invalid packet type */
+                       if (!skb)
+                               return ERR_PTR(-EILSEQ);
+
+                       count -= 1;
+                       buffer += 1;
+               }
+
+               len = min_t(uint, bt_cb(skb)->expect - skb->len, count);
+               memcpy(skb_put(skb, len), buffer, len);
+
+               count -= len;
+               buffer += len;
+
+               /* Check for partial packet */
+               if (skb->len < bt_cb(skb)->expect)
+                       continue;
+
+               for (i = 0; i < pkts_count; i++) {
+                       if (bt_cb(skb)->pkt_type == (&pkts[i])->type)
+                               break;
+               }
+
+               if (i >= pkts_count) {
+                       kfree_skb(skb);
+                       return ERR_PTR(-EILSEQ);
+               }
+
+               if (skb->len == (&pkts[i])->hlen) {
+                       u16 dlen;
+
+                       switch ((&pkts[i])->lsize) {
+                       case 0:
+                               /* No variable data length */
+                               (&pkts[i])->recv(hdev, skb);
+                               skb = NULL;
+                               break;
+                       case 1:
+                               /* Single octet variable length */
+                               dlen = skb->data[(&pkts[i])->loff];
+                               bt_cb(skb)->expect += dlen;
+
+                               if (skb_tailroom(skb) < dlen) {
+                                       kfree_skb(skb);
+                                       return ERR_PTR(-EMSGSIZE);
+                               }
+                               break;
+                       case 2:
+                               /* Double octet variable length */
+                               dlen = get_unaligned_le16(skb->data +
+                                                         (&pkts[i])->loff);
+                               bt_cb(skb)->expect += dlen;
+
+                               if (skb_tailroom(skb) < dlen) {
+                                       kfree_skb(skb);
+                                       return ERR_PTR(-EMSGSIZE);
+                               }
+                               break;
+                       default:
+                               /* Unsupported variable length */
+                               kfree_skb(skb);
+                               return ERR_PTR(-EILSEQ);
+                       }
+               } else {
+                       /* Complete frame */
+                       (&pkts[i])->recv(hdev, skb);
+                       skb = NULL;
+               }
+       }
+
+       return skb;
+}
index ec0fa7732c0d953c359c86f16a5f307a00f3d930..3455cecc9ecfe630c3331d67d22715ddb60cfcf8 100644 (file)
@@ -511,10 +511,10 @@ static void h5_reset_rx(struct h5 *h5)
        clear_bit(H5_RX_ESC, &h5->flags);
 }
 
-static int h5_recv(struct hci_uart *hu, void *data, int count)
+static int h5_recv(struct hci_uart *hu, const void *data, int count)
 {
        struct h5 *h5 = hu->priv;
-       unsigned char *ptr = data;
+       const unsigned char *ptr = data;
 
        BT_DBG("%s pending %zu count %d", hu->hdev->name, h5->rx_pending,
               count);
@@ -743,8 +743,9 @@ static int h5_flush(struct hci_uart *hu)
        return 0;
 }
 
-static struct hci_uart_proto h5p = {
+static const struct hci_uart_proto h5p = {
        .id             = HCI_UART_3WIRE,
+       .name           = "Three-wire (H5)",
        .open           = h5_open,
        .close          = h5_close,
        .recv           = h5_recv,
@@ -755,14 +756,7 @@ static struct hci_uart_proto h5p = {
 
 int __init h5_init(void)
 {
-       int err = hci_uart_register_proto(&h5p);
-
-       if (!err)
-               BT_INFO("HCI Three-wire UART (H5) protocol initialized");
-       else
-               BT_ERR("HCI Three-wire UART (H5) protocol init failed");
-
-       return err;
+       return hci_uart_register_proto(&h5p);
 }
 
 int __exit h5_deinit(void)
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
new file mode 100644 (file)
index 0000000..5dd07bf
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ *
+ *  Bluetooth HCI UART driver for Intel devices
+ *
+ *  Copyright (C) 2015  Intel Corporation
+ *
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "hci_uart.h"
index 48a0c250d5b83d7fe353daee1b99cf06afdab454..5c9a73f026649c99a1b56d2f91d5981e29b905c9 100644 (file)
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
+#include "btintel.h"
+#include "btbcm.h"
 #include "hci_uart.h"
 
-#define VERSION "2.2"
+#define VERSION "2.3"
 
-static struct hci_uart_proto *hup[HCI_UART_MAX_PROTO];
+static const struct hci_uart_proto *hup[HCI_UART_MAX_PROTO];
 
-int hci_uart_register_proto(struct hci_uart_proto *p)
+int hci_uart_register_proto(const struct hci_uart_proto *p)
 {
        if (p->id >= HCI_UART_MAX_PROTO)
                return -EINVAL;
@@ -60,10 +62,12 @@ int hci_uart_register_proto(struct hci_uart_proto *p)
 
        hup[p->id] = p;
 
+       BT_INFO("HCI UART protocol %s registered", p->name);
+
        return 0;
 }
 
-int hci_uart_unregister_proto(struct hci_uart_proto *p)
+int hci_uart_unregister_proto(const struct hci_uart_proto *p)
 {
        if (p->id >= HCI_UART_MAX_PROTO)
                return -EINVAL;
@@ -76,7 +80,7 @@ int hci_uart_unregister_proto(struct hci_uart_proto *p)
        return 0;
 }
 
-static struct hci_uart_proto *hci_uart_get_proto(unsigned int id)
+static const struct hci_uart_proto *hci_uart_get_proto(unsigned int id)
 {
        if (id >= HCI_UART_MAX_PROTO)
                return NULL;
@@ -264,10 +268,48 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 static int hci_uart_setup(struct hci_dev *hdev)
 {
        struct hci_uart *hu = hci_get_drvdata(hdev);
+       struct hci_rp_read_local_version *ver;
+       struct sk_buff *skb;
 
        if (hu->proto->setup)
                return hu->proto->setup(hu);
 
+       if (!test_bit(HCI_UART_VND_DETECT, &hu->hdev_flags))
+               return 0;
+
+       skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
+                            HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               BT_ERR("%s: Reading local version information failed (%ld)",
+                      hdev->name, PTR_ERR(skb));
+               return 0;
+       }
+
+       if (skb->len != sizeof(*ver)) {
+               BT_ERR("%s: Event length mismatch for version information",
+                      hdev->name);
+               goto done;
+       }
+
+       ver = (struct hci_rp_read_local_version *)skb->data;
+
+       switch (le16_to_cpu(ver->manufacturer)) {
+#ifdef CONFIG_BT_HCIUART_INTEL
+       case 2:
+               hdev->set_bdaddr = btintel_set_bdaddr;
+               btintel_check_bdaddr(hdev);
+               break;
+#endif
+#ifdef CONFIG_BT_HCIUART_BCM
+       case 15:
+               hdev->set_bdaddr = btbcm_set_bdaddr;
+               btbcm_check_bdaddr(hdev);
+               break;
+#endif
+       }
+
+done:
+       kfree_skb(skb);
        return 0;
 }
 
@@ -326,7 +368,7 @@ static int hci_uart_tty_open(struct tty_struct *tty)
  */
 static void hci_uart_tty_close(struct tty_struct *tty)
 {
-       struct hci_uart *hu = (void *)tty->disc_data;
+       struct hci_uart *hu = tty->disc_data;
        struct hci_dev *hdev;
 
        BT_DBG("tty %p", tty);
@@ -365,7 +407,7 @@ static void hci_uart_tty_close(struct tty_struct *tty)
  */
 static void hci_uart_tty_wakeup(struct tty_struct *tty)
 {
-       struct hci_uart *hu = (void *)tty->disc_data;
+       struct hci_uart *hu = tty->disc_data;
 
        BT_DBG("");
 
@@ -393,9 +435,10 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
  *
  * Return Value:    None
  */
-static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *flags, int count)
+static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data,
+                                char *flags, int count)
 {
-       struct hci_uart *hu = (void *)tty->disc_data;
+       struct hci_uart *hu = tty->disc_data;
 
        if (!hu || tty != hu->tty)
                return;
@@ -404,7 +447,7 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *f
                return;
 
        spin_lock(&hu->rx_lock);
-       hu->proto->recv(hu, (void *) data, count);
+       hu->proto->recv(hu, data, count);
 
        if (hu->hdev)
                hu->hdev->stat.byte_rx += count;
@@ -469,7 +512,7 @@ static int hci_uart_register_dev(struct hci_uart *hu)
 
 static int hci_uart_set_proto(struct hci_uart *hu, int id)
 {
-       struct hci_uart_proto *p;
+       const struct hci_uart_proto *p;
        int err;
 
        p = hci_uart_get_proto(id);
@@ -497,9 +540,10 @@ static int hci_uart_set_flags(struct hci_uart *hu, unsigned long flags)
                                    BIT(HCI_UART_RESET_ON_INIT) |
                                    BIT(HCI_UART_CREATE_AMP) |
                                    BIT(HCI_UART_INIT_PENDING) |
-                                   BIT(HCI_UART_EXT_CONFIG);
+                                   BIT(HCI_UART_EXT_CONFIG) |
+                                   BIT(HCI_UART_VND_DETECT);
 
-       if ((flags & ~valid_flags))
+       if (flags & ~valid_flags)
                return -EINVAL;
 
        hu->hdev_flags = flags;
@@ -520,10 +564,10 @@ static int hci_uart_set_flags(struct hci_uart *hu, unsigned long flags)
  *
  * Return Value:    Command dependent
  */
-static int hci_uart_tty_ioctl(struct tty_struct *tty, struct file * file,
-                                       unsigned int cmd, unsigned long arg)
+static int hci_uart_tty_ioctl(struct tty_struct *tty, struct file *file,
+                             unsigned int cmd, unsigned long arg)
 {
-       struct hci_uart *hu = (void *)tty->disc_data;
+       struct hci_uart *hu = tty->disc_data;
        int err = 0;
 
        BT_DBG("");
@@ -577,19 +621,19 @@ static int hci_uart_tty_ioctl(struct tty_struct *tty, struct file * file,
  * We don't provide read/write/poll interface for user space.
  */
 static ssize_t hci_uart_tty_read(struct tty_struct *tty, struct file *file,
-                                       unsigned char __user *buf, size_t nr)
+                                unsigned char __user *buf, size_t nr)
 {
        return 0;
 }
 
 static ssize_t hci_uart_tty_write(struct tty_struct *tty, struct file *file,
-                                       const unsigned char *data, size_t count)
+                                 const unsigned char *data, size_t count)
 {
        return 0;
 }
 
 static unsigned int hci_uart_tty_poll(struct tty_struct *tty,
-                                       struct file *filp, poll_table *wait)
+                                     struct file *filp, poll_table *wait)
 {
        return 0;
 }
@@ -637,6 +681,9 @@ static int __init hci_uart_init(void)
 #ifdef CONFIG_BT_HCIUART_3WIRE
        h5_init();
 #endif
+#ifdef CONFIG_BT_HCIUART_BCM
+       bcm_init();
+#endif
 
        return 0;
 }
@@ -660,6 +707,9 @@ static void __exit hci_uart_exit(void)
 #ifdef CONFIG_BT_HCIUART_3WIRE
        h5_deinit();
 #endif
+#ifdef CONFIG_BT_HCIUART_BCM
+       bcm_deinit();
+#endif
 
        /* Release tty registration of line discipline */
        err = tty_unregister_ldisc(N_HCI);
index 69a90b1b5ff56131aa7a2e51b7d18aabba28b560..9ee24b075f7973e7578043c74e7421ddd535b7b5 100644 (file)
@@ -370,10 +370,10 @@ static inline int ll_check_data_len(struct hci_dev *hdev, struct ll_struct *ll,
 }
 
 /* Recv data */
-static int ll_recv(struct hci_uart *hu, void *data, int count)
+static int ll_recv(struct hci_uart *hu, const void *data, int count)
 {
        struct ll_struct *ll = hu->priv;
-       char *ptr;
+       const char *ptr;
        struct hci_event_hdr *eh;
        struct hci_acl_hdr   *ah;
        struct hci_sco_hdr   *sh;
@@ -505,8 +505,9 @@ static struct sk_buff *ll_dequeue(struct hci_uart *hu)
        return skb_dequeue(&ll->txq);
 }
 
-static struct hci_uart_proto llp = {
+static const struct hci_uart_proto llp = {
        .id             = HCI_UART_LL,
+       .name           = "LL",
        .open           = ll_open,
        .close          = ll_close,
        .recv           = ll_recv,
@@ -517,14 +518,7 @@ static struct hci_uart_proto llp = {
 
 int __init ll_init(void)
 {
-       int err = hci_uart_register_proto(&llp);
-
-       if (!err)
-               BT_INFO("HCILL protocol initialized");
-       else
-               BT_ERR("HCILL protocol registration failed");
-
-       return err;
+       return hci_uart_register_proto(&llp);
 }
 
 int __exit ll_deinit(void)
index 074ed29092b487b700838b34f1504e8379335dd9..72120a5ba13c8a6056649a7ec51a617eac8cf536 100644 (file)
@@ -35,7 +35,7 @@
 #define HCIUARTGETFLAGS                _IOR('U', 204, int)
 
 /* UART protocols */
-#define HCI_UART_MAX_PROTO     6
+#define HCI_UART_MAX_PROTO     8
 
 #define HCI_UART_H4    0
 #define HCI_UART_BCSP  1
 #define HCI_UART_H4DS  3
 #define HCI_UART_LL    4
 #define HCI_UART_ATH3K 5
+#define HCI_UART_INTEL 6
+#define HCI_UART_BCM   7
 
 #define HCI_UART_RAW_DEVICE    0
 #define HCI_UART_RESET_ON_INIT 1
 #define HCI_UART_CREATE_AMP    2
 #define HCI_UART_INIT_PENDING  3
 #define HCI_UART_EXT_CONFIG    4
+#define HCI_UART_VND_DETECT    5
 
 struct hci_uart;
 
 struct hci_uart_proto {
        unsigned int id;
+       const char *name;
        int (*open)(struct hci_uart *hu);
        int (*close)(struct hci_uart *hu);
        int (*flush)(struct hci_uart *hu);
-       int (*recv)(struct hci_uart *hu, void *data, int len);
-       int (*enqueue)(struct hci_uart *hu, struct sk_buff *skb);
        int (*setup)(struct hci_uart *hu);
+       int (*recv)(struct hci_uart *hu, const void *data, int len);
+       int (*enqueue)(struct hci_uart *hu, struct sk_buff *skb);
        struct sk_buff *(*dequeue)(struct hci_uart *hu);
 };
 
@@ -72,7 +76,7 @@ struct hci_uart {
        struct work_struct      init_ready;
        struct work_struct      write_work;
 
-       struct hci_uart_proto   *proto;
+       const struct hci_uart_proto *proto;
        void                    *priv;
 
        struct sk_buff          *tx_skb;
@@ -88,14 +92,48 @@ struct hci_uart {
 #define HCI_UART_SENDING       1
 #define HCI_UART_TX_WAKEUP     2
 
-int hci_uart_register_proto(struct hci_uart_proto *p);
-int hci_uart_unregister_proto(struct hci_uart_proto *p);
+int hci_uart_register_proto(const struct hci_uart_proto *p);
+int hci_uart_unregister_proto(const struct hci_uart_proto *p);
 int hci_uart_tx_wakeup(struct hci_uart *hu);
 int hci_uart_init_ready(struct hci_uart *hu);
 
 #ifdef CONFIG_BT_HCIUART_H4
 int h4_init(void);
 int h4_deinit(void);
+
+struct h4_recv_pkt {
+       u8  type;       /* Packet type */
+       u8  hlen;       /* Header length */
+       u8  loff;       /* Data length offset in header */
+       u8  lsize;      /* Data length field size */
+       u16 maxlen;     /* Max overall packet length */
+       int (*recv)(struct hci_dev *hdev, struct sk_buff *skb);
+};
+
+#define H4_RECV_ACL \
+       .type = HCI_ACLDATA_PKT, \
+       .hlen = HCI_ACL_HDR_SIZE, \
+       .loff = 2, \
+       .lsize = 2, \
+       .maxlen = HCI_MAX_FRAME_SIZE \
+
+#define H4_RECV_SCO \
+       .type = HCI_SCODATA_PKT, \
+       .hlen = HCI_SCO_HDR_SIZE, \
+       .loff = 2, \
+       .lsize = 1, \
+       .maxlen = HCI_MAX_SCO_SIZE
+
+#define H4_RECV_EVENT \
+       .type = HCI_EVENT_PKT, \
+       .hlen = HCI_EVENT_HDR_SIZE, \
+       .loff = 1, \
+       .lsize = 1, \
+       .maxlen = HCI_MAX_EVENT_SIZE
+
+struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
+                           const unsigned char *buffer, int count,
+                           const struct h4_recv_pkt *pkts, int pkts_count);
 #endif
 
 #ifdef CONFIG_BT_HCIUART_BCSP
@@ -117,3 +155,8 @@ int ath_deinit(void);
 int h5_init(void);
 int h5_deinit(void);
 #endif
+
+#ifdef CONFIG_BT_HCIUART_BCM
+int bcm_init(void);
+int bcm_deinit(void);
+#endif
index 68161f7a07d6c8bef677fdd04bb2a1b20715a664..a0b036ccb118b1fc23fa07907f17196c80a15a54 100644 (file)
@@ -192,6 +192,7 @@ config SYS_SUPPORTS_EM_STI
 config SH_TIMER_CMT
        bool "Renesas CMT timer driver" if COMPILE_TEST
        depends on GENERIC_CLOCKEVENTS
+       depends on HAS_IOMEM
        default SYS_SUPPORTS_SH_CMT
        help
          This enables build of a clocksource and clockevent driver for
@@ -201,6 +202,7 @@ config SH_TIMER_CMT
 config SH_TIMER_MTU2
        bool "Renesas MTU2 timer driver" if COMPILE_TEST
        depends on GENERIC_CLOCKEVENTS
+       depends on HAS_IOMEM
        default SYS_SUPPORTS_SH_MTU2
        help
          This enables build of a clockevent driver for the Multi-Function
@@ -210,6 +212,7 @@ config SH_TIMER_MTU2
 config SH_TIMER_TMU
        bool "Renesas TMU timer driver" if COMPILE_TEST
        depends on GENERIC_CLOCKEVENTS
+       depends on HAS_IOMEM
        default SYS_SUPPORTS_SH_TMU
        help
          This enables build of a clocksource and clockevent driver for
index 5dcbf90b8015ce40787d2acf27a53a142d4aed8d..58597fbcc046f27f88238aa949730d6109a704b7 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/irq.h>
 #include <linux/irqreturn.h>
 #include <linux/reset.h>
-#include <linux/sched_clock.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
@@ -137,11 +136,6 @@ static struct irqaction sun5i_timer_irq = {
        .dev_id = &sun5i_clockevent,
 };
 
-static u64 sun5i_timer_sched_read(void)
-{
-       return ~readl(timer_base + TIMER_CNTVAL_LO_REG(1));
-}
-
 static void __init sun5i_timer_init(struct device_node *node)
 {
        struct reset_control *rstc;
@@ -172,7 +166,6 @@ static void __init sun5i_timer_init(struct device_node *node)
        writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
               timer_base + TIMER_CTL_REG(1));
 
-       sched_clock_register(sun5i_timer_sched_read, 32, rate);
        clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name,
                              rate, 340, 32, clocksource_mmio_readl_down);
 
index 0723096fb50ac125dbb471126ba396085307ff2e..c92d6a70ccf303c69cfdb127210a09a1262122bc 100644 (file)
@@ -475,6 +475,7 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
         * c->desc is NULL and exit.)
         */
        if (c->desc) {
+               bcm2835_dma_desc_free(&c->desc->vd);
                c->desc = NULL;
                bcm2835_dma_abort(c->chan_base);
 
index 4527a3ebeac446f58a4c3b3b8722d6caf16b1b63..84884418fd30fc73a700bde9c0bebd67d72ea0a5 100644 (file)
@@ -511,6 +511,9 @@ static void jz4740_dma_desc_free(struct virt_dma_desc *vdesc)
        kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc));
 }
 
+#define JZ4740_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+       BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
 static int jz4740_dma_probe(struct platform_device *pdev)
 {
        struct jz4740_dmaengine_chan *chan;
@@ -548,6 +551,10 @@ static int jz4740_dma_probe(struct platform_device *pdev)
        dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic;
        dd->device_config = jz4740_dma_slave_config;
        dd->device_terminate_all = jz4740_dma_terminate_all;
+       dd->src_addr_widths = JZ4740_DMA_BUSWIDTHS;
+       dd->dst_addr_widths = JZ4740_DMA_BUSWIDTHS;
+       dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
        dd->dev = &pdev->dev;
        INIT_LIST_HEAD(&dd->channels);
 
index 276157f22612dc18140b84294ab1ef49ecf46ebc..53dbd3b3384cfd8b00940f7a7cd752b28bcac71a 100644 (file)
@@ -260,6 +260,13 @@ static int edma_terminate_all(struct dma_chan *chan)
         */
        if (echan->edesc) {
                int cyclic = echan->edesc->cyclic;
+
+               /*
+                * free the running request descriptor
+                * since it is not in any of the vdesc lists
+                */
+               edma_desc_free(&echan->edesc->vdesc);
+
                echan->edesc = NULL;
                edma_stop(echan->ch_num);
                /* Move the cyclic channel back to default queue */
index 15cab7d79525914d862e5cd38344bc2e3046c95a..b4634109e0100905dd39d285f03d669c405cbef0 100644 (file)
@@ -193,8 +193,10 @@ static int moxart_terminate_all(struct dma_chan *chan)
 
        spin_lock_irqsave(&ch->vc.lock, flags);
 
-       if (ch->desc)
+       if (ch->desc) {
+               moxart_dma_desc_free(&ch->desc->vd);
                ch->desc = NULL;
+       }
 
        ctrl = readl(ch->base + REG_OFF_CTRL);
        ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
index 7dd6dd1216819543aae06f2d3dd2707d16917c24..167dbaf6574275a0fffd5590b7a169e811320b39 100644 (file)
@@ -981,6 +981,7 @@ static int omap_dma_terminate_all(struct dma_chan *chan)
         * c->desc is NULL and exit.)
         */
        if (c->desc) {
+               omap_dma_desc_free(&c->desc->vd);
                c->desc = NULL;
                /* Avoid stopping the dma twice */
                if (!c->paused)
index 69fac068669fde566f41013cefbdf48db023466c..2eebd28b4c40af2789c32e0008f2b60006fc03ac 100644 (file)
@@ -86,10 +86,13 @@ static void dmi_table(u8 *buf, u32 len, int num,
        int i = 0;
 
        /*
-        *      Stop when we see all the items the table claimed to have
-        *      OR we run off the end of the table (also happens)
+        * Stop when we have seen all the items the table claimed to have
+        * (SMBIOS < 3.0 only) OR we reach an end-of-table marker OR we run
+        * off the end of the table (should never happen but sometimes does
+        * on bogus implementations.)
         */
-       while ((i < num) && (data - buf + sizeof(struct dmi_header)) <= len) {
+       while ((!num || i < num) &&
+              (data - buf + sizeof(struct dmi_header)) <= len) {
                const struct dmi_header *dm = (const struct dmi_header *)data;
 
                /*
@@ -529,21 +532,10 @@ static int __init dmi_smbios3_present(const u8 *buf)
        if (memcmp(buf, "_SM3_", 5) == 0 &&
            buf[6] < 32 && dmi_checksum(buf, buf[6])) {
                dmi_ver = get_unaligned_be16(buf + 7);
+               dmi_num = 0;                    /* No longer specified */
                dmi_len = get_unaligned_le32(buf + 12);
                dmi_base = get_unaligned_le64(buf + 16);
 
-               /*
-                * The 64-bit SMBIOS 3.0 entry point no longer has a field
-                * containing the number of structures present in the table.
-                * Instead, it defines the table size as a maximum size, and
-                * relies on the end-of-table structure type (#127) to be used
-                * to signal the end of the table.
-                * So let's define dmi_num as an upper bound as well: each
-                * structure has a 4 byte header, so dmi_len / 4 is an upper
-                * bound for the number of structures in the table.
-                */
-               dmi_num = dmi_len / 4;
-
                if (dmi_walk_early(dmi_decode) == 0) {
                        pr_info("SMBIOS %d.%d present.\n",
                                dmi_ver >> 8, dmi_ver & 0xFF);
index a6952ba343a89747b919b45aa4d9e07951762fcd..a65b75161aa49d16749258407db18c7cfc30f79f 100644 (file)
@@ -334,7 +334,7 @@ static struct irq_domain_ops mpc8xxx_gpio_irq_ops = {
        .xlate  = irq_domain_xlate_twocell,
 };
 
-static struct of_device_id mpc8xxx_gpio_ids[] __initdata = {
+static struct of_device_id mpc8xxx_gpio_ids[] = {
        { .compatible = "fsl,mpc8349-gpio", },
        { .compatible = "fsl,mpc8572-gpio", },
        { .compatible = "fsl,mpc8610-gpio", },
index 257e2989215c035b87fbef2f7b086c58b722e266..045a952576c708e253de29438abaec95640989f2 100644 (file)
@@ -219,7 +219,7 @@ static int syscon_gpio_probe(struct platform_device *pdev)
                ret = of_property_read_u32_index(np, "gpio,syscon-dev", 2,
                                                 &priv->dir_reg_offset);
                if (ret)
-                       dev_err(dev, "can't read the dir register offset!\n");
+                       dev_dbg(dev, "can't read the dir register offset!\n");
 
                priv->dir_reg_offset <<= 3;
        }
index c0929d938ced866e343e0230e00fc5c9cda77c0b..df990f29757a7e045fd8a42760944b2d8bd2ba84 100644 (file)
@@ -201,6 +201,10 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
        if (!handler)
                return AE_BAD_PARAMETER;
 
+       pin = acpi_gpiochip_pin_to_gpio_offset(chip, pin);
+       if (pin < 0)
+               return AE_BAD_PARAMETER;
+
        desc = gpiochip_request_own_desc(chip, pin, "ACPI:Event");
        if (IS_ERR(desc)) {
                dev_err(chip->dev, "Failed to request GPIO\n");
@@ -551,6 +555,12 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
                struct gpio_desc *desc;
                bool found;
 
+               pin = acpi_gpiochip_pin_to_gpio_offset(chip, pin);
+               if (pin < 0) {
+                       status = AE_BAD_PARAMETER;
+                       goto out;
+               }
+
                mutex_lock(&achip->conn_lock);
 
                found = false;
index f6d04c7b5115a965bce68ad265080a46fcad3a6b..679b10e34fb545f23c827f9699bfef1c9f4268bd 100644 (file)
@@ -525,17 +525,6 @@ void drm_framebuffer_reference(struct drm_framebuffer *fb)
 }
 EXPORT_SYMBOL(drm_framebuffer_reference);
 
-static void drm_framebuffer_free_bug(struct kref *kref)
-{
-       BUG();
-}
-
-static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
-{
-       DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
-       kref_put(&fb->refcount, drm_framebuffer_free_bug);
-}
-
 /**
  * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
  * @fb: fb to unregister
@@ -1320,7 +1309,7 @@ void drm_plane_force_disable(struct drm_plane *plane)
                return;
        }
        /* disconnect the plane from the fb and crtc: */
-       __drm_framebuffer_unreference(plane->old_fb);
+       drm_framebuffer_unreference(plane->old_fb);
        plane->old_fb = NULL;
        plane->fb = NULL;
        plane->crtc = NULL;
index 732cb6f8e653f58dee7102f0bb11b797cde2f5ad..4c0aa97aaf0399a111fe0d63be6d996bafbfd7f3 100644 (file)
@@ -287,6 +287,7 @@ int drm_load_edid_firmware(struct drm_connector *connector)
 
        drm_mode_connector_update_edid_property(connector, edid);
        ret = drm_add_edid_modes(connector, edid);
+       drm_edid_to_eld(connector, edid);
        kfree(edid);
 
        return ret;
index 6591d48c1b9d0f3bcc4a74a8da730833cbc4e839..3fee587bc284ebffc7b2050c69034cba9c1949da 100644 (file)
@@ -174,6 +174,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
                        struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
 
                        count = drm_add_edid_modes(connector, edid);
+                       drm_edid_to_eld(connector, edid);
                } else
                        count = (*connector_funcs->get_modes)(connector);
        }
index c300e22da8ac5f2f4294c27543e62291ee0a8739..33a10ce967eacdfbcbc679a8645baae4515b4f1b 100644 (file)
@@ -147,6 +147,7 @@ struct fimd_win_data {
        unsigned int            ovl_height;
        unsigned int            fb_width;
        unsigned int            fb_height;
+       unsigned int            fb_pitch;
        unsigned int            bpp;
        unsigned int            pixel_format;
        dma_addr_t              dma_addr;
@@ -532,13 +533,14 @@ static void fimd_win_mode_set(struct exynos_drm_crtc *crtc,
        win_data->offset_y = plane->crtc_y;
        win_data->ovl_width = plane->crtc_width;
        win_data->ovl_height = plane->crtc_height;
+       win_data->fb_pitch = plane->pitch;
        win_data->fb_width = plane->fb_width;
        win_data->fb_height = plane->fb_height;
        win_data->dma_addr = plane->dma_addr[0] + offset;
        win_data->bpp = plane->bpp;
        win_data->pixel_format = plane->pixel_format;
-       win_data->buf_offsize = (plane->fb_width - plane->crtc_width) *
-                               (plane->bpp >> 3);
+       win_data->buf_offsize =
+               plane->pitch - (plane->crtc_width * (plane->bpp >> 3));
        win_data->line_size = plane->crtc_width * (plane->bpp >> 3);
 
        DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
@@ -704,7 +706,7 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, int zpos)
        writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
 
        /* buffer end address */
-       size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3);
+       size = win_data->fb_pitch * win_data->ovl_height * (win_data->bpp >> 3);
        val = (unsigned long)(win_data->dma_addr + size);
        writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
 
index 3518bc4654c5c9381acdb8d7253395e0466c8adc..2e3bc57ea50e594dcdf739042c39169428ecbda1 100644 (file)
@@ -55,6 +55,7 @@ struct hdmi_win_data {
        unsigned int            fb_x;
        unsigned int            fb_y;
        unsigned int            fb_width;
+       unsigned int            fb_pitch;
        unsigned int            fb_height;
        unsigned int            src_width;
        unsigned int            src_height;
@@ -438,7 +439,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
        } else {
                luma_addr[0] = win_data->dma_addr;
                chroma_addr[0] = win_data->dma_addr
-                       + (win_data->fb_width * win_data->fb_height);
+                       + (win_data->fb_pitch * win_data->fb_height);
        }
 
        if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) {
@@ -447,8 +448,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
                        luma_addr[1] = luma_addr[0] + 0x40;
                        chroma_addr[1] = chroma_addr[0] + 0x40;
                } else {
-                       luma_addr[1] = luma_addr[0] + win_data->fb_width;
-                       chroma_addr[1] = chroma_addr[0] + win_data->fb_width;
+                       luma_addr[1] = luma_addr[0] + win_data->fb_pitch;
+                       chroma_addr[1] = chroma_addr[0] + win_data->fb_pitch;
                }
        } else {
                ctx->interlace = false;
@@ -469,10 +470,10 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
        vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
 
        /* setting size of input image */
-       vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_width) |
+       vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_pitch) |
                VP_IMG_VSIZE(win_data->fb_height));
        /* chroma height has to reduced by 2 to avoid chroma distorions */
-       vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_width) |
+       vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_pitch) |
                VP_IMG_VSIZE(win_data->fb_height / 2));
 
        vp_reg_write(res, VP_SRC_WIDTH, win_data->src_width);
@@ -559,7 +560,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
        /* converting dma address base and source offset */
        dma_addr = win_data->dma_addr
                + (win_data->fb_x * win_data->bpp >> 3)
-               + (win_data->fb_y * win_data->fb_width * win_data->bpp >> 3);
+               + (win_data->fb_y * win_data->fb_pitch);
        src_x_offset = 0;
        src_y_offset = 0;
 
@@ -576,7 +577,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
                MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK);
 
        /* setup geometry */
-       mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), win_data->fb_width);
+       mixer_reg_write(res, MXR_GRAPHIC_SPAN(win),
+                       win_data->fb_pitch / (win_data->bpp >> 3));
 
        /* setup display size */
        if (ctx->mxr_ver == MXR_VER_128_0_0_184 &&
@@ -961,6 +963,7 @@ static void mixer_win_mode_set(struct exynos_drm_crtc *crtc,
        win_data->fb_y = plane->fb_y;
        win_data->fb_width = plane->fb_width;
        win_data->fb_height = plane->fb_height;
+       win_data->fb_pitch = plane->pitch;
        win_data->src_width = plane->src_width;
        win_data->src_height = plane->src_height;
 
index 5b205863b6596d7fa72e6f465a017d9bcb204e78..27ea6bdebce761dbd8dca5340a4cd6e2bd32eaa3 100644 (file)
@@ -2737,24 +2737,11 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 
        WARN_ON(i915_verify_lists(ring->dev));
 
-       /* Move any buffers on the active list that are no longer referenced
-        * by the ringbuffer to the flushing/inactive lists as appropriate,
-        * before we free the context associated with the requests.
+       /* Retire requests first as we use it above for the early return.
+        * If we retire requests last, we may use a later seqno and so clear
+        * the requests lists without clearing the active list, leading to
+        * confusion.
         */
-       while (!list_empty(&ring->active_list)) {
-               struct drm_i915_gem_object *obj;
-
-               obj = list_first_entry(&ring->active_list,
-                                     struct drm_i915_gem_object,
-                                     ring_list);
-
-               if (!i915_gem_request_completed(obj->last_read_req, true))
-                       break;
-
-               i915_gem_object_move_to_inactive(obj);
-       }
-
-
        while (!list_empty(&ring->request_list)) {
                struct drm_i915_gem_request *request;
                struct intel_ringbuffer *ringbuf;
@@ -2789,6 +2776,23 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
                i915_gem_free_request(request);
        }
 
+       /* Move any buffers on the active list that are no longer referenced
+        * by the ringbuffer to the flushing/inactive lists as appropriate,
+        * before we free the context associated with the requests.
+        */
+       while (!list_empty(&ring->active_list)) {
+               struct drm_i915_gem_object *obj;
+
+               obj = list_first_entry(&ring->active_list,
+                                     struct drm_i915_gem_object,
+                                     ring_list);
+
+               if (!i915_gem_request_completed(obj->last_read_req, true))
+                       break;
+
+               i915_gem_object_move_to_inactive(obj);
+       }
+
        if (unlikely(ring->trace_irq_req &&
                     i915_gem_request_completed(ring->trace_irq_req, true))) {
                ring->irq_put(ring);
index b773368fc62c8ac67717f6770ce20fd642a1bc8c..38a742532c4fa48ba2798d6aec910b546839090f 100644 (file)
@@ -1487,7 +1487,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                goto err;
        }
 
-       if (i915_needs_cmd_parser(ring)) {
+       if (i915_needs_cmd_parser(ring) && args->batch_len) {
                batch_obj = i915_gem_execbuffer_parse(ring,
                                                      &shadow_exec_entry,
                                                      eb,
index 6d22128d97b1b8ce732208fe93ed897dc0a2cdcd..f75173c20f47677f1a8462dabc78b10ac7b4645f 100644 (file)
@@ -2438,8 +2438,15 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc,
        if (!intel_crtc->base.primary->fb)
                return;
 
-       if (intel_alloc_plane_obj(intel_crtc, plane_config))
+       if (intel_alloc_plane_obj(intel_crtc, plane_config)) {
+               struct drm_plane *primary = intel_crtc->base.primary;
+
+               primary->state->crtc = &intel_crtc->base;
+               primary->crtc = &intel_crtc->base;
+               update_state_fb(primary);
+
                return;
+       }
 
        kfree(intel_crtc->base.primary->fb);
        intel_crtc->base.primary->fb = NULL;
@@ -2462,11 +2469,15 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc,
                        continue;
 
                if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
+                       struct drm_plane *primary = intel_crtc->base.primary;
+
                        if (obj->tiling_mode != I915_TILING_NONE)
                                dev_priv->preserve_bios_swizzle = true;
 
                        drm_framebuffer_reference(c->primary->fb);
-                       intel_crtc->base.primary->fb = c->primary->fb;
+                       primary->fb = c->primary->fb;
+                       primary->state->crtc = &intel_crtc->base;
+                       primary->crtc = &intel_crtc->base;
                        obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
                        break;
                }
@@ -6663,7 +6674,6 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
                      plane_config->size);
 
        crtc->base.primary->fb = fb;
-       update_state_fb(crtc->base.primary);
 }
 
 static void chv_crtc_clock_get(struct intel_crtc *crtc,
@@ -7704,7 +7714,6 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
                      plane_config->size);
 
        crtc->base.primary->fb = fb;
-       update_state_fb(crtc->base.primary);
        return;
 
 error:
@@ -7798,7 +7807,6 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
                      plane_config->size);
 
        crtc->base.primary->fb = fb;
-       update_state_fb(crtc->base.primary);
 }
 
 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
index 0a52c44ad03d6b21078fe7482ddc3b84813ac9c4..9c5451c97942801544bbe9a0e692810bb5a9444e 100644 (file)
@@ -1322,7 +1322,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
        drm_modeset_lock_all(dev);
 
        plane = drm_plane_find(dev, set->plane_id);
-       if (!plane) {
+       if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) {
                ret = -ENOENT;
                goto out_unlock;
        }
@@ -1349,7 +1349,7 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
        drm_modeset_lock_all(dev);
 
        plane = drm_plane_find(dev, get->plane_id);
-       if (!plane) {
+       if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) {
                ret = -ENOENT;
                goto out_unlock;
        }
index c648e1996dabac449dfb838e018cad85b2d3bb61..243a36c93b8f96c0f268ef9bfcae1721043c7240 100644 (file)
 #define VCE_UENC_REG_CLOCK_GATING      0x207c0
 #define VCE_SYS_INT_EN                 0x21300
 #      define VCE_SYS_INT_TRAP_INTERRUPT_EN    (1 << 3)
+#define VCE_LMI_VCPU_CACHE_40BIT_BAR   0x2145c
 #define VCE_LMI_CTRL2                  0x21474
 #define VCE_LMI_CTRL                   0x21498
 #define VCE_LMI_VM_CTRL                        0x214a0
index 5587603b4a891c1f2cfcf7873dd8aa9e173907df..33d5a4f4eebdc21118b733957a2e1f75230a55cb 100644 (file)
@@ -1565,6 +1565,7 @@ struct radeon_dpm {
        int                     new_active_crtc_count;
        u32                     current_active_crtcs;
        int                     current_active_crtc_count;
+       bool single_display;
        struct radeon_dpm_dynamic_state dyn_state;
        struct radeon_dpm_fan fan;
        u32 tdp_limit;
index 63ccb8fa799c209bc82db257d3da0c8fc60ba052..d27e4ccb848c9c60e8a14f71336b790b125d2231 100644 (file)
@@ -76,7 +76,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev)
 
 static bool radeon_read_bios(struct radeon_device *rdev)
 {
-       uint8_t __iomem *bios;
+       uint8_t __iomem *bios, val1, val2;
        size_t size;
 
        rdev->bios = NULL;
@@ -86,15 +86,19 @@ static bool radeon_read_bios(struct radeon_device *rdev)
                return false;
        }
 
-       if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
+       val1 = readb(&bios[0]);
+       val2 = readb(&bios[1]);
+
+       if (size == 0 || val1 != 0x55 || val2 != 0xaa) {
                pci_unmap_rom(rdev->pdev, bios);
                return false;
        }
-       rdev->bios = kmemdup(bios, size, GFP_KERNEL);
+       rdev->bios = kzalloc(size, GFP_KERNEL);
        if (rdev->bios == NULL) {
                pci_unmap_rom(rdev->pdev, bios);
                return false;
        }
+       memcpy_fromio(rdev->bios, bios, size);
        pci_unmap_rom(rdev->pdev, bios);
        return true;
 }
index a69bd441dd2d0cc612b9fa4d9e7711934270e3c5..572b4dbec186a9d59e8066782773c86acdbfa46b 100644 (file)
@@ -122,7 +122,6 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
        it = interval_tree_iter_first(&rmn->objects, start, end);
        while (it) {
                struct radeon_bo *bo;
-               struct fence *fence;
                int r;
 
                bo = container_of(it, struct radeon_bo, mn_it);
@@ -134,12 +133,10 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
                        continue;
                }
 
-               fence = reservation_object_get_excl(bo->tbo.resv);
-               if (fence) {
-                       r = radeon_fence_wait((struct radeon_fence *)fence, false);
-                       if (r)
-                               DRM_ERROR("(%d) failed to wait for user bo\n", r);
-               }
+               r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true,
+                       false, MAX_SCHEDULE_TIMEOUT);
+               if (r)
+                       DRM_ERROR("(%d) failed to wait for user bo\n", r);
 
                radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
                r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
index 33cf4108386dbba4ef70a0e372eb992d1ff7e4d3..c1ba83a8dd8c9333829aaa5f1384f65c7103d238 100644 (file)
@@ -837,12 +837,8 @@ static void radeon_dpm_thermal_work_handler(struct work_struct *work)
        radeon_pm_compute_clocks(rdev);
 }
 
-static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
-                                                    enum radeon_pm_state_type dpm_state)
+static bool radeon_dpm_single_display(struct radeon_device *rdev)
 {
-       int i;
-       struct radeon_ps *ps;
-       u32 ui_class;
        bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
                true : false;
 
@@ -858,6 +854,17 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
        if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120))
                single_display = false;
 
+       return single_display;
+}
+
+static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
+                                                    enum radeon_pm_state_type dpm_state)
+{
+       int i;
+       struct radeon_ps *ps;
+       u32 ui_class;
+       bool single_display = radeon_dpm_single_display(rdev);
+
        /* certain older asics have a separare 3D performance state,
         * so try that first if the user selected performance
         */
@@ -983,6 +990,7 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
        struct radeon_ps *ps;
        enum radeon_pm_state_type dpm_state;
        int ret;
+       bool single_display = radeon_dpm_single_display(rdev);
 
        /* if dpm init failed */
        if (!rdev->pm.dpm_enabled)
@@ -1007,6 +1015,9 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
                /* vce just modifies an existing state so force a change */
                if (ps->vce_active != rdev->pm.dpm.vce_active)
                        goto force;
+               /* user has made a display change (such as timing) */
+               if (rdev->pm.dpm.single_display != single_display)
+                       goto force;
                if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
                        /* for pre-BTC and APUs if the num crtcs changed but state is the same,
                         * all we need to do is update the display configuration.
@@ -1069,6 +1080,7 @@ force:
 
        rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
        rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
+       rdev->pm.dpm.single_display = single_display;
 
        /* wait for the rings to drain */
        for (i = 0; i < RADEON_NUM_RINGS; i++) {
index 2456f69efd2310233fac5c75a314a9060abe1a17..8c7872339c2a6f5e94bf601c47ed421e5caee352 100644 (file)
@@ -495,7 +495,7 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
        seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
        seq_printf(m, "%u dwords in ring\n", count);
 
-       if (!ring->ready)
+       if (!ring->ring)
                return 0;
 
        /* print 8 dw before current rptr as often it's the last executed
index d02aa1d0f5885408c877056bd4ac1ab0e1ed6f12..b292aca0f342d53856ec3eaf982b71fd8b0a7fa8 100644 (file)
@@ -598,6 +598,10 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
        enum dma_data_direction direction = write ?
                DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
 
+       /* double check that we don't free the table twice */
+       if (!ttm->sg->sgl)
+               return;
+
        /* free the sg table and pages again */
        dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
 
index 1ac7bb825a1b3bfecea601d76f8e33c2b8d581ff..fbbe78fbd087ae7c147a43b925f0ac0401c86466 100644 (file)
@@ -156,6 +156,9 @@ int vce_v2_0_resume(struct radeon_device *rdev)
        WREG32(VCE_LMI_SWAP_CNTL1, 0);
        WREG32(VCE_LMI_VM_CTRL, 0);
 
+       WREG32(VCE_LMI_VCPU_CACHE_40BIT_BAR, addr >> 8);
+
+       addr &= 0xff;
        size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size);
        WREG32(VCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff);
        WREG32(VCE_VCPU_CACHE_SIZE0, size);
index 1096da327130526080139e628fdd0a0130d32861..75c6d2103e07adad3b11919687e81f8dd7a8fca3 100644 (file)
@@ -659,7 +659,7 @@ static irqreturn_t bma180_trigger_handler(int irq, void *p)
 
        mutex_lock(&data->mutex);
 
-       for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+       for_each_set_bit(bit, indio_dev->active_scan_mask,
                         indio_dev->masklength) {
                ret = bma180_get_data_reg(data, bit);
                if (ret < 0) {
index 066d0c04072c69943fa21313fb47f06c72fedcea..75567fd457dcc4b9bd7c147fdc4cb229cdbaf0c4 100644 (file)
@@ -168,14 +168,14 @@ static const struct {
        int val;
        int val2;
        u8 bw_bits;
-} bmc150_accel_samp_freq_table[] = { {7, 810000, 0x08},
-                                    {15, 630000, 0x09},
-                                    {31, 250000, 0x0A},
-                                    {62, 500000, 0x0B},
-                                    {125, 0, 0x0C},
-                                    {250, 0, 0x0D},
-                                    {500, 0, 0x0E},
-                                    {1000, 0, 0x0F} };
+} bmc150_accel_samp_freq_table[] = { {15, 620000, 0x08},
+                                    {31, 260000, 0x09},
+                                    {62, 500000, 0x0A},
+                                    {125, 0, 0x0B},
+                                    {250, 0, 0x0C},
+                                    {500, 0, 0x0D},
+                                    {1000, 0, 0x0E},
+                                    {2000, 0, 0x0F} };
 
 static const struct {
        int bw_bits;
@@ -840,7 +840,7 @@ static int bmc150_accel_validate_trigger(struct iio_dev *indio_dev,
 }
 
 static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
-               "7.810000 15.630000 31.250000 62.500000 125 250 500 1000");
+               "15.620000 31.260000 62.50000 125 250 500 1000 2000");
 
 static struct attribute *bmc150_accel_attributes[] = {
        &iio_const_attr_sampling_frequency_available.dev_attr.attr,
@@ -986,7 +986,7 @@ static irqreturn_t bmc150_accel_trigger_handler(int irq, void *p)
        int bit, ret, i = 0;
 
        mutex_lock(&data->mutex);
-       for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+       for_each_set_bit(bit, indio_dev->active_scan_mask,
                         indio_dev->masklength) {
                ret = i2c_smbus_read_word_data(data->client,
                                               BMC150_ACCEL_AXIS_TO_REG(bit));
index 567de269cc00650191541a98ac8d5818d0ea8661..1a6379525fa47e73497b17866be4276fc88c8065 100644 (file)
@@ -956,7 +956,7 @@ static irqreturn_t kxcjk1013_trigger_handler(int irq, void *p)
 
        mutex_lock(&data->mutex);
 
-       for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+       for_each_set_bit(bit, indio_dev->active_scan_mask,
                         indio_dev->masklength) {
                ret = kxcjk1013_get_acc_reg(data, bit);
                if (ret < 0) {
index 202daf889be276315d24575b1301349f295fe4c8..46379b1fb25b59b10018a121b2cc8dc78082d4a6 100644 (file)
@@ -137,7 +137,8 @@ config AXP288_ADC
 
 config CC10001_ADC
        tristate "Cosmic Circuits 10001 ADC driver"
-       depends on HAS_IOMEM || HAVE_CLK || REGULATOR
+       depends on HAVE_CLK || REGULATOR
+       depends on HAS_IOMEM
        select IIO_BUFFER
        select IIO_TRIGGERED_BUFFER
        help
index ff61ae55dd3ff8ac73c0925c0b88fa6a75a1083d..8a0eb4a04fb55b9cb2436db5b16f678f654b8a26 100644 (file)
@@ -544,7 +544,6 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
 {
        struct iio_dev *idev = iio_trigger_get_drvdata(trig);
        struct at91_adc_state *st = iio_priv(idev);
-       struct iio_buffer *buffer = idev->buffer;
        struct at91_adc_reg_desc *reg = st->registers;
        u32 status = at91_adc_readl(st, reg->trigger_register);
        int value;
@@ -564,7 +563,7 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
                at91_adc_writel(st, reg->trigger_register,
                                status | value);
 
-               for_each_set_bit(bit, buffer->scan_mask,
+               for_each_set_bit(bit, idev->active_scan_mask,
                                 st->num_channels) {
                        struct iio_chan_spec const *chan = idev->channels + bit;
                        at91_adc_writel(st, AT91_ADC_CHER,
@@ -579,7 +578,7 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
                at91_adc_writel(st, reg->trigger_register,
                                status & ~value);
 
-               for_each_set_bit(bit, buffer->scan_mask,
+               for_each_set_bit(bit, idev->active_scan_mask,
                                 st->num_channels) {
                        struct iio_chan_spec const *chan = idev->channels + bit;
                        at91_adc_writel(st, AT91_ADC_CHDR,
index 2e5cc4409f78884e82f309c729febabe3ff7f982..a0e7161f040c91daca74a469d27ff641be9ea915 100644 (file)
@@ -188,12 +188,11 @@ static int tiadc_buffer_preenable(struct iio_dev *indio_dev)
 static int tiadc_buffer_postenable(struct iio_dev *indio_dev)
 {
        struct tiadc_device *adc_dev = iio_priv(indio_dev);
-       struct iio_buffer *buffer = indio_dev->buffer;
        unsigned int enb = 0;
        u8 bit;
 
        tiadc_step_config(indio_dev);
-       for_each_set_bit(bit, buffer->scan_mask, adc_dev->channels)
+       for_each_set_bit(bit, indio_dev->active_scan_mask, adc_dev->channels)
                enb |= (get_adc_step_bit(adc_dev, bit) << 1);
        adc_dev->buffer_en_ch_steps = enb;
 
index 8ec353c01d98e02e7074df9d4d295ec19f772f09..e63b8e76d4c3d54edc25d23561f28a11afeb05e7 100644 (file)
@@ -141,9 +141,13 @@ struct vf610_adc {
        struct regulator *vref;
        struct vf610_adc_feature adc_feature;
 
+       u32 sample_freq_avail[5];
+
        struct completion completion;
 };
 
+static const u32 vf610_hw_avgs[] = { 1, 4, 8, 16, 32 };
+
 #define VF610_ADC_CHAN(_idx, _chan_type) {                     \
        .type = (_chan_type),                                   \
        .indexed = 1,                                           \
@@ -180,35 +184,47 @@ static const struct iio_chan_spec vf610_adc_iio_channels[] = {
        /* sentinel */
 };
 
-/*
- * ADC sample frequency, unit is ADCK cycles.
- * ADC clk source is ipg clock, which is the same as bus clock.
- *
- * ADC conversion time = SFCAdder + AverageNum x (BCT + LSTAdder)
- * SFCAdder: fixed to 6 ADCK cycles
- * AverageNum: 1, 4, 8, 16, 32 samples for hardware average.
- * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode
- * LSTAdder(Long Sample Time): fixed to 3 ADCK cycles
- *
- * By default, enable 12 bit resolution mode, clock source
- * set to ipg clock, So get below frequency group:
- */
-static const u32 vf610_sample_freq_avail[5] =
-{1941176, 559332, 286957, 145374, 73171};
+static inline void vf610_adc_calculate_rates(struct vf610_adc *info)
+{
+       unsigned long adck_rate, ipg_rate = clk_get_rate(info->clk);
+       int i;
+
+       /*
+        * Calculate ADC sample frequencies
+        * Sample time unit is ADCK cycles. ADCK clk source is ipg clock,
+        * which is the same as bus clock.
+        *
+        * ADC conversion time = SFCAdder + AverageNum x (BCT + LSTAdder)
+        * SFCAdder: fixed to 6 ADCK cycles
+        * AverageNum: 1, 4, 8, 16, 32 samples for hardware average.
+        * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode
+        * LSTAdder(Long Sample Time): fixed to 3 ADCK cycles
+        */
+       adck_rate = ipg_rate / info->adc_feature.clk_div;
+       for (i = 0; i < ARRAY_SIZE(vf610_hw_avgs); i++)
+               info->sample_freq_avail[i] =
+                       adck_rate / (6 + vf610_hw_avgs[i] * (25 + 3));
+}
 
 static inline void vf610_adc_cfg_init(struct vf610_adc *info)
 {
+       struct vf610_adc_feature *adc_feature = &info->adc_feature;
+
        /* set default Configuration for ADC controller */
-       info->adc_feature.clk_sel = VF610_ADCIOC_BUSCLK_SET;
-       info->adc_feature.vol_ref = VF610_ADCIOC_VR_VREF_SET;
+       adc_feature->clk_sel = VF610_ADCIOC_BUSCLK_SET;
+       adc_feature->vol_ref = VF610_ADCIOC_VR_VREF_SET;
+
+       adc_feature->calibration = true;
+       adc_feature->ovwren = true;
+
+       adc_feature->res_mode = 12;
+       adc_feature->sample_rate = 1;
+       adc_feature->lpm = true;
 
-       info->adc_feature.calibration = true;
-       info->adc_feature.ovwren = true;
+       /* Use a save ADCK which is below 20MHz on all devices */
+       adc_feature->clk_div = 8;
 
-       info->adc_feature.clk_div = 1;
-       info->adc_feature.res_mode = 12;
-       info->adc_feature.sample_rate = 1;
-       info->adc_feature.lpm = true;
+       vf610_adc_calculate_rates(info);
 }
 
 static void vf610_adc_cfg_post_set(struct vf610_adc *info)
@@ -290,12 +306,10 @@ static void vf610_adc_cfg_set(struct vf610_adc *info)
 
        cfg_data = readl(info->regs + VF610_REG_ADC_CFG);
 
-       /* low power configuration */
        cfg_data &= ~VF610_ADC_ADLPC_EN;
        if (adc_feature->lpm)
                cfg_data |= VF610_ADC_ADLPC_EN;
 
-       /* disable high speed */
        cfg_data &= ~VF610_ADC_ADHSC_EN;
 
        writel(cfg_data, info->regs + VF610_REG_ADC_CFG);
@@ -435,10 +449,27 @@ static irqreturn_t vf610_adc_isr(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("1941176, 559332, 286957, 145374, 73171");
+static ssize_t vf610_show_samp_freq_avail(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct vf610_adc *info = iio_priv(dev_to_iio_dev(dev));
+       size_t len = 0;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(info->sample_freq_avail); i++)
+               len += scnprintf(buf + len, PAGE_SIZE - len,
+                       "%u ", info->sample_freq_avail[i]);
+
+       /* replace trailing space by newline */
+       buf[len - 1] = '\n';
+
+       return len;
+}
+
+static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(vf610_show_samp_freq_avail);
 
 static struct attribute *vf610_attributes[] = {
-       &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+       &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
        NULL
 };
 
@@ -502,7 +533,7 @@ static int vf610_read_raw(struct iio_dev *indio_dev,
                return IIO_VAL_FRACTIONAL_LOG2;
 
        case IIO_CHAN_INFO_SAMP_FREQ:
-               *val = vf610_sample_freq_avail[info->adc_feature.sample_rate];
+               *val = info->sample_freq_avail[info->adc_feature.sample_rate];
                *val2 = 0;
                return IIO_VAL_INT;
 
@@ -525,9 +556,9 @@ static int vf610_write_raw(struct iio_dev *indio_dev,
        switch (mask) {
                case IIO_CHAN_INFO_SAMP_FREQ:
                        for (i = 0;
-                               i < ARRAY_SIZE(vf610_sample_freq_avail);
+                               i < ARRAY_SIZE(info->sample_freq_avail);
                                i++)
-                               if (val == vf610_sample_freq_avail[i]) {
+                               if (val == info->sample_freq_avail[i]) {
                                        info->adc_feature.sample_rate = i;
                                        vf610_adc_sample_set(info);
                                        return 0;
index 60451b32824212a039b8ac9358751faa1af12819..ccf3ea7e1afa8ca1848937b9a8b92e1f2cefc8be 100644 (file)
@@ -822,7 +822,7 @@ static irqreturn_t bmg160_trigger_handler(int irq, void *p)
        int bit, ret, i = 0;
 
        mutex_lock(&data->mutex);
-       for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+       for_each_set_bit(bit, indio_dev->active_scan_mask,
                         indio_dev->masklength) {
                ret = i2c_smbus_read_word_data(data->client,
                                               BMG160_AXIS_TO_REG(bit));
index e0017c22bb9c6ce3b4f4f9c753a8ce1364621b19..f53e9a803a0e1ec1589a5b0ad25d7aa1f8d54e70 100644 (file)
@@ -60,7 +60,7 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
        iio_trigger_set_drvdata(adis->trig, adis);
        ret = iio_trigger_register(adis->trig);
 
-       indio_dev->trig = adis->trig;
+       indio_dev->trig = iio_trigger_get(adis->trig);
        if (ret)
                goto error_free_irq;
 
index d8d5bed65e072cae577968edb78e2e592c2a5bfa..ef76afe2643cb0bebe512124ca8c9326e09229c2 100644 (file)
@@ -410,42 +410,46 @@ error_read_raw:
        }
 }
 
-static int inv_mpu6050_write_fsr(struct inv_mpu6050_state *st, int fsr)
+static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val)
 {
-       int result;
+       int result, i;
        u8 d;
 
-       if (fsr < 0 || fsr > INV_MPU6050_MAX_GYRO_FS_PARAM)
-               return -EINVAL;
-       if (fsr == st->chip_config.fsr)
-               return 0;
+       for (i = 0; i < ARRAY_SIZE(gyro_scale_6050); ++i) {
+               if (gyro_scale_6050[i] == val) {
+                       d = (i << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT);
+                       result = inv_mpu6050_write_reg(st,
+                                       st->reg->gyro_config, d);
+                       if (result)
+                               return result;
 
-       d = (fsr << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT);
-       result = inv_mpu6050_write_reg(st, st->reg->gyro_config, d);
-       if (result)
-               return result;
-       st->chip_config.fsr = fsr;
+                       st->chip_config.fsr = i;
+                       return 0;
+               }
+       }
 
-       return 0;
+       return -EINVAL;
 }
 
-static int inv_mpu6050_write_accel_fs(struct inv_mpu6050_state *st, int fs)
+static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val)
 {
-       int result;
+       int result, i;
        u8 d;
 
-       if (fs < 0 || fs > INV_MPU6050_MAX_ACCL_FS_PARAM)
-               return -EINVAL;
-       if (fs == st->chip_config.accl_fs)
-               return 0;
+       for (i = 0; i < ARRAY_SIZE(accel_scale); ++i) {
+               if (accel_scale[i] == val) {
+                       d = (i << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
+                       result = inv_mpu6050_write_reg(st,
+                                       st->reg->accl_config, d);
+                       if (result)
+                               return result;
 
-       d = (fs << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
-       result = inv_mpu6050_write_reg(st, st->reg->accl_config, d);
-       if (result)
-               return result;
-       st->chip_config.accl_fs = fs;
+                       st->chip_config.accl_fs = i;
+                       return 0;
+               }
+       }
 
-       return 0;
+       return -EINVAL;
 }
 
 static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
@@ -471,10 +475,10 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
        case IIO_CHAN_INFO_SCALE:
                switch (chan->type) {
                case IIO_ANGL_VEL:
-                       result = inv_mpu6050_write_fsr(st, val);
+                       result = inv_mpu6050_write_gyro_scale(st, val2);
                        break;
                case IIO_ACCEL:
-                       result = inv_mpu6050_write_accel_fs(st, val);
+                       result = inv_mpu6050_write_accel_scale(st, val2);
                        break;
                default:
                        result = -EINVAL;
index 0cd306a72a6e347391ea97d7a02d05e54b6c64ab..ba27e277511fc52585f8fa25fd20d49bca61be96 100644 (file)
 #include <linux/poll.h>
 #include "inv_mpu_iio.h"
 
+static void inv_clear_kfifo(struct inv_mpu6050_state *st)
+{
+       unsigned long flags;
+
+       /* take the spin lock sem to avoid interrupt kick in */
+       spin_lock_irqsave(&st->time_stamp_lock, flags);
+       kfifo_reset(&st->timestamps);
+       spin_unlock_irqrestore(&st->time_stamp_lock, flags);
+}
+
 int inv_reset_fifo(struct iio_dev *indio_dev)
 {
        int result;
@@ -50,6 +60,10 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
                                        INV_MPU6050_BIT_FIFO_RST);
        if (result)
                goto reset_fifo_fail;
+
+       /* clear timestamps fifo */
+       inv_clear_kfifo(st);
+
        /* enable interrupt */
        if (st->chip_config.accl_fifo_enable ||
            st->chip_config.gyro_fifo_enable) {
@@ -83,16 +97,6 @@ reset_fifo_fail:
        return result;
 }
 
-static void inv_clear_kfifo(struct inv_mpu6050_state *st)
-{
-       unsigned long flags;
-
-       /* take the spin lock sem to avoid interrupt kick in */
-       spin_lock_irqsave(&st->time_stamp_lock, flags);
-       kfifo_reset(&st->timestamps);
-       spin_unlock_irqrestore(&st->time_stamp_lock, flags);
-}
-
 /**
  * inv_mpu6050_irq_handler() - Cache a timestamp at each data ready interrupt.
  */
@@ -184,7 +188,6 @@ end_session:
 flush_fifo:
        /* Flush HW and SW FIFOs. */
        inv_reset_fifo(indio_dev);
-       inv_clear_kfifo(st);
        mutex_unlock(&indio_dev->mlock);
        iio_trigger_notify_done(indio_dev->trig);
 
index 5cc3692acf377664dbf255a698bb5e64606fe864..b3a36376c719317006cf8b9d30e1369f8b6af8fb 100644 (file)
@@ -1227,7 +1227,7 @@ static irqreturn_t kmx61_trigger_handler(int irq, void *p)
                base = KMX61_MAG_XOUT_L;
 
        mutex_lock(&data->lock);
-       for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+       for_each_set_bit(bit, indio_dev->active_scan_mask,
                         indio_dev->masklength) {
                ret = kmx61_read_measurement(data, base, bit);
                if (ret < 0) {
index aaba9d3d980ee623ad6b78111a9c9708198b23e0..4df97f650e448e80053e037fed6d5e208493c687 100644 (file)
@@ -847,8 +847,7 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
  * @attr_list: List of IIO device attributes
  *
  * This function frees the memory allocated for each of the IIO device
- * attributes in the list. Note: if you want to reuse the list after calling
- * this function you have to reinitialize it using INIT_LIST_HEAD().
+ * attributes in the list.
  */
 void iio_free_chan_devattr_list(struct list_head *attr_list)
 {
@@ -856,6 +855,7 @@ void iio_free_chan_devattr_list(struct list_head *attr_list)
 
        list_for_each_entry_safe(p, n, attr_list, l) {
                kfree(p->dev_attr.attr.name);
+               list_del(&p->l);
                kfree(p);
        }
 }
@@ -936,6 +936,7 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
 
        iio_free_chan_devattr_list(&indio_dev->channel_attr_list);
        kfree(indio_dev->chan_attr_group.attrs);
+       indio_dev->chan_attr_group.attrs = NULL;
 }
 
 static void iio_dev_release(struct device *device)
index a4b397048f71f9fe2e22be46f45fca0fb06cb8ff..a99692ba91bc75fb3186f69f2d55bfc9efd6652c 100644 (file)
@@ -500,6 +500,7 @@ int iio_device_register_eventset(struct iio_dev *indio_dev)
 error_free_setup_event_lines:
        iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list);
        kfree(indio_dev->event_interface);
+       indio_dev->event_interface = NULL;
        return ret;
 }
 
index 74dff4e4a11acdda1ec44ec6eaec75ef4d0543e4..89fca3a7075039b9b9a0de5514c1b96758d4593b 100644 (file)
@@ -494,7 +494,7 @@ static irqreturn_t sx9500_trigger_handler(int irq, void *private)
 
        mutex_lock(&data->mutex);
 
-       for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+       for_each_set_bit(bit, indio_dev->active_scan_mask,
                         indio_dev->masklength) {
                ret = sx9500_read_proximity(data, &indio_dev->channels[bit],
                                            &val);
index aec7a6aa2951db47bc6b5be969a29d1867688b23..8c014b5dab4c82ff805a744c89655555a56094fc 100644 (file)
@@ -99,6 +99,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
        if (dmasync)
                dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
 
+       /*
+        * If the combination of the addr and size requested for this memory
+        * region causes an integer overflow, return error.
+        */
+       if ((PAGE_ALIGN(addr + size) <= size) ||
+           (PAGE_ALIGN(addr + size) <= addr))
+               return ERR_PTR(-EINVAL);
+
        if (!can_do_mlock())
                return ERR_PTR(-EPERM);
 
index b972c0b41799b51e2f554c1abc703d78e0c4636e..976bea794b5f7726cd642cb08de4073abd5dcb58 100644 (file)
@@ -587,8 +587,9 @@ static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_vio
                ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
        }
 
-       err = mlx4_cmd(dev->dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
+       err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE,
+                      MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+                      MLX4_CMD_WRAPPED);
 
        mlx4_free_cmd_mailbox(dev->dev, mailbox);
        return err;
@@ -1525,8 +1526,8 @@ static void update_gids_task(struct work_struct *work)
        memcpy(gids, gw->gids, sizeof gw->gids);
 
        err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
-                      1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
-                      MLX4_CMD_WRAPPED);
+                      MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT,
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
        if (err)
                pr_warn("set port command failed\n");
        else
@@ -1564,7 +1565,7 @@ static void reset_gids_task(struct work_struct *work)
                                    IB_LINK_LAYER_ETHERNET) {
                err = mlx4_cmd(dev, mailbox->dma,
                               MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
-                              1, MLX4_CMD_SET_PORT,
+                              MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT,
                               MLX4_CMD_TIME_CLASS_B,
                               MLX4_CMD_WRAPPED);
                if (err)
index 39ab0caefdf9759a230b3c0c43377394fb7036c7..66080580e24db3e90c887bcea7132e6687bc3d33 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index c463e7bba5f453f0303a74b88ec96275c796ee0f..2ee6b105197544abb2799e552b129d37eff53906 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -572,11 +572,15 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
 
 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
 {
+       struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
+       void __iomem *uar_page = mdev->priv.uuari.uars[0].map;
+
        mlx5_cq_arm(&to_mcq(ibcq)->mcq,
                    (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
                    MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
-                   to_mdev(ibcq->device)->mdev->priv.uuari.uars[0].map,
-                   MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev->priv.cq_uar_lock));
+                   uar_page,
+                   MLX5_GET_DOORBELL_LOCK(&mdev->priv.cq_uar_lock),
+                   to_mcq(ibcq)->mcq.cons_index);
 
        return 0;
 }
@@ -697,8 +701,6 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
 
        cq->mcq.set_ci_db  = cq->db.db;
        cq->mcq.arm_db     = cq->db.db + 1;
-       *cq->mcq.set_ci_db = 0;
-       *cq->mcq.arm_db    = 0;
        cq->mcq.cqe_sz = cqe_size;
 
        err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size);
@@ -782,7 +784,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
        cq->cqe_size = cqe_size;
        cqb->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5;
        cqb->ctx.log_sz_usr_page = cpu_to_be32((ilog2(entries) << 24) | index);
-       err = mlx5_vector2eqn(dev, vector, &eqn, &irqn);
+       err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
        if (err)
                goto err_cqb;
 
index ece028fc47d681bf7b77b1242bb46cdb1467d527..a0e4e6ddb71ac55fe79222bf181d1fe229ebd6d9 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 657af9a1167cc9ee8c38889ba8cef071c8892c5b..9cf9a37bb5ff9360303a0ea9197869b5fcfbaefc 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index cc4ac1e583b29725af01e03e40bebaee758c6e01..57c9809e8b8774e8aac47806134216ef97c46883 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -62,95 +62,6 @@ static char mlx5_version[] =
        DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
        DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
 
-int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn)
-{
-       struct mlx5_eq_table *table = &dev->mdev->priv.eq_table;
-       struct mlx5_eq *eq, *n;
-       int err = -ENOENT;
-
-       spin_lock(&table->lock);
-       list_for_each_entry_safe(eq, n, &dev->eqs_list, list) {
-               if (eq->index == vector) {
-                       *eqn = eq->eqn;
-                       *irqn = eq->irqn;
-                       err = 0;
-                       break;
-               }
-       }
-       spin_unlock(&table->lock);
-
-       return err;
-}
-
-static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
-{
-       struct mlx5_eq_table *table = &dev->mdev->priv.eq_table;
-       char name[MLX5_MAX_EQ_NAME];
-       struct mlx5_eq *eq, *n;
-       int ncomp_vec;
-       int nent;
-       int err;
-       int i;
-
-       INIT_LIST_HEAD(&dev->eqs_list);
-       ncomp_vec = table->num_comp_vectors;
-       nent = MLX5_COMP_EQ_SIZE;
-       for (i = 0; i < ncomp_vec; i++) {
-               eq = kzalloc(sizeof(*eq), GFP_KERNEL);
-               if (!eq) {
-                       err = -ENOMEM;
-                       goto clean;
-               }
-
-               snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
-               err = mlx5_create_map_eq(dev->mdev, eq,
-                                        i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
-                                        name, &dev->mdev->priv.uuari.uars[0]);
-               if (err) {
-                       kfree(eq);
-                       goto clean;
-               }
-               mlx5_ib_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
-               eq->index = i;
-               spin_lock(&table->lock);
-               list_add_tail(&eq->list, &dev->eqs_list);
-               spin_unlock(&table->lock);
-       }
-
-       dev->num_comp_vectors = ncomp_vec;
-       return 0;
-
-clean:
-       spin_lock(&table->lock);
-       list_for_each_entry_safe(eq, n, &dev->eqs_list, list) {
-               list_del(&eq->list);
-               spin_unlock(&table->lock);
-               if (mlx5_destroy_unmap_eq(dev->mdev, eq))
-                       mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn);
-               kfree(eq);
-               spin_lock(&table->lock);
-       }
-       spin_unlock(&table->lock);
-       return err;
-}
-
-static void free_comp_eqs(struct mlx5_ib_dev *dev)
-{
-       struct mlx5_eq_table *table = &dev->mdev->priv.eq_table;
-       struct mlx5_eq *eq, *n;
-
-       spin_lock(&table->lock);
-       list_for_each_entry_safe(eq, n, &dev->eqs_list, list) {
-               list_del(&eq->list);
-               spin_unlock(&table->lock);
-               if (mlx5_destroy_unmap_eq(dev->mdev, eq))
-                       mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn);
-               kfree(eq);
-               spin_lock(&table->lock);
-       }
-       spin_unlock(&table->lock);
-}
-
 static int mlx5_ib_query_device(struct ib_device *ibdev,
                                struct ib_device_attr *props)
 {
@@ -1291,10 +1202,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
 
        get_ext_port_caps(dev);
 
-       err = alloc_comp_eqs(dev);
-       if (err)
-               goto err_dealloc;
-
        MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
 
        strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
@@ -1303,7 +1210,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
        dev->ib_dev.local_dma_lkey      = mdev->caps.gen.reserved_lkey;
        dev->num_ports          = mdev->caps.gen.num_ports;
        dev->ib_dev.phys_port_cnt     = dev->num_ports;
-       dev->ib_dev.num_comp_vectors    = dev->num_comp_vectors;
+       dev->ib_dev.num_comp_vectors    =
+               dev->mdev->priv.eq_table.num_comp_vectors;
        dev->ib_dev.dma_device  = &mdev->pdev->dev;
 
        dev->ib_dev.uverbs_abi_ver      = MLX5_IB_UVERBS_ABI_VERSION;
@@ -1390,13 +1298,13 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
 
        err = init_node_data(dev);
        if (err)
-               goto err_eqs;
+               goto err_dealloc;
 
        mutex_init(&dev->cap_mask_mutex);
 
        err = create_dev_resources(&dev->devr);
        if (err)
-               goto err_eqs;
+               goto err_dealloc;
 
        err = mlx5_ib_odp_init_one(dev);
        if (err)
@@ -1433,9 +1341,6 @@ err_odp:
 err_rsrc:
        destroy_dev_resources(&dev->devr);
 
-err_eqs:
-       free_comp_eqs(dev);
-
 err_dealloc:
        ib_dealloc_device((struct ib_device *)dev);
 
@@ -1450,7 +1355,6 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
        destroy_umrc_res(dev);
        mlx5_ib_odp_remove_one(dev);
        destroy_dev_resources(&dev->devr);
-       free_comp_eqs(dev);
        ib_dealloc_device(&dev->ib_dev);
 }
 
@@ -1458,6 +1362,7 @@ static struct mlx5_interface mlx5_ib_interface = {
        .add            = mlx5_ib_add,
        .remove         = mlx5_ib_remove,
        .event          = mlx5_ib_event,
+       .protocol       = MLX5_INTERFACE_PROTOCOL_IB,
 };
 
 static int __init mlx5_ib_init(void)
index 611a9fdf2f383cf0982415d4603906e208f24da8..40df2cca0609a6c3dfff09e738bec5450685f8e0 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 83f22fe297c8ac522efa80001819990cdc31508b..dff1cfcdf476cfed06d8835cd5316d234df09e1e 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -421,9 +421,7 @@ struct mlx5_ib_dev {
        struct ib_device                ib_dev;
        struct mlx5_core_dev            *mdev;
        MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
-       struct list_head                eqs_list;
        int                             num_ports;
-       int                             num_comp_vectors;
        /* serialize update of capability mask
         */
        struct mutex                    cap_mask_mutex;
@@ -594,7 +592,6 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
                                          struct ib_ucontext *context,
                                          struct ib_udata *udata);
 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
-int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn);
 int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
index cd9822eeacae3f1ab138731ea9c6a67963974bc2..71c5935838649e71a4a2f6b6cc16cb18f9a1cc16 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index a2c541c4809a583dc330db186593a03e01825f53..5099db08afd2c80c1b9049e3fc71fef4cfbb269b 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index be0cd358b080977ed50fdc2dedcd2e0257a19a56..4d7024b899cb091a12aacfa9450af1e7750d4f45 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -796,9 +796,6 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
                goto err_free;
        }
 
-       qp->db.db[0] = 0;
-       qp->db.db[1] = 0;
-
        qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL);
        qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL);
        qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL);
@@ -1162,10 +1159,11 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
        in = kzalloc(sizeof(*in), GFP_KERNEL);
        if (!in)
                return;
+
        if (qp->state != IB_QPS_RESET) {
                mlx5_ib_qp_disable_pagefaults(qp);
                if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state),
-                                       MLX5_QP_STATE_RST, in, sizeof(*in), &qp->mqp))
+                                       MLX5_QP_STATE_RST, in, 0, &qp->mqp))
                        mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n",
                                     qp->mqp.qpn);
        }
index 41fec66217dd3b2b7f61fcff7068f01a06493933..02d77a29764d5e1ab925423b64bdb20157fbc781 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -165,8 +165,6 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
                return err;
        }
 
-       *srq->db.db = 0;
-
        if (mlx5_buf_alloc(dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
                mlx5_ib_dbg(dev, "buf alloc failed\n");
                err = -ENOMEM;
index d0ba264ac1ed259ab4b58473d3ba12c2d499f4b0..76fb7b927d373ef2710d412ec015309fb24fdd72 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 58b5aa3b6f2dded5d2e6d15aff080551aa9eddd9..657b89b1d291c90c76c57f8d4b8183b87c61aaad 100644 (file)
@@ -842,6 +842,13 @@ static void ipoib_set_mcast_list(struct net_device *dev)
        queue_work(ipoib_workqueue, &priv->restart_task);
 }
 
+static int ipoib_get_iflink(const struct net_device *dev)
+{
+       struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+       return priv->parent->ifindex;
+}
+
 static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
 {
        /*
@@ -1341,6 +1348,7 @@ static const struct net_device_ops ipoib_netdev_ops = {
        .ndo_start_xmit          = ipoib_start_xmit,
        .ndo_tx_timeout          = ipoib_timeout,
        .ndo_set_rx_mode         = ipoib_set_mcast_list,
+       .ndo_get_iflink          = ipoib_get_iflink,
 };
 
 void ipoib_setup(struct net_device *dev)
index 9fad7b5ac8b91910af7e6b1527f20165323e7dc8..4dd1313056a42169ecbe7211bc0fd750d5d9f88e 100644 (file)
@@ -102,7 +102,6 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
        }
 
        priv->child_type  = type;
-       priv->dev->iflink = ppriv->dev->ifindex;
        list_add_tail(&priv->list, &ppriv->child_intfs);
 
        return 0;
index 1bd15ebc01f2df5002eca38f7089a61701471f5d..27bcdbc950c9fc2df9067504cfe055bf379eee78 100644 (file)
@@ -1154,10 +1154,28 @@ out:
        mutex_unlock(&alps_mutex);
 }
 
-static void alps_report_bare_ps2_packet(struct input_dev *dev,
+static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
                                        unsigned char packet[],
                                        bool report_buttons)
 {
+       struct alps_data *priv = psmouse->private;
+       struct input_dev *dev;
+
+       /* Figure out which device to use to report the bare packet */
+       if (priv->proto_version == ALPS_PROTO_V2 &&
+           (priv->flags & ALPS_DUALPOINT)) {
+               /* On V2 devices the DualPoint Stick reports bare packets */
+               dev = priv->dev2;
+       } else if (unlikely(IS_ERR_OR_NULL(priv->dev3))) {
+               /* Register dev3 mouse if we received PS/2 packet first time */
+               if (!IS_ERR(priv->dev3))
+                       psmouse_queue_work(psmouse, &priv->dev3_register_work,
+                                          0);
+               return;
+       } else {
+               dev = priv->dev3;
+       }
+
        if (report_buttons)
                alps_report_buttons(dev, NULL,
                                packet[0] & 1, packet[0] & 2, packet[0] & 4);
@@ -1232,8 +1250,8 @@ static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
                 * de-synchronization.
                 */
 
-               alps_report_bare_ps2_packet(priv->dev2,
-                                           &psmouse->packet[3], false);
+               alps_report_bare_ps2_packet(psmouse, &psmouse->packet[3],
+                                           false);
 
                /*
                 * Continue with the standard ALPS protocol handling,
@@ -1289,18 +1307,9 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
         * properly we only do this if the device is fully synchronized.
         */
        if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) {
-
-               /* Register dev3 mouse if we received PS/2 packet first time */
-               if (unlikely(!priv->dev3))
-                       psmouse_queue_work(psmouse,
-                                          &priv->dev3_register_work, 0);
-
                if (psmouse->pktcnt == 3) {
-                       /* Once dev3 mouse device is registered report data */
-                       if (likely(!IS_ERR_OR_NULL(priv->dev3)))
-                               alps_report_bare_ps2_packet(priv->dev3,
-                                                           psmouse->packet,
-                                                           true);
+                       alps_report_bare_ps2_packet(psmouse, psmouse->packet,
+                                                   true);
                        return PSMOUSE_FULL_PACKET;
                }
                return PSMOUSE_GOOD_DATA;
@@ -2281,10 +2290,12 @@ static int alps_set_protocol(struct psmouse *psmouse,
                priv->set_abs_params = alps_set_abs_params_mt;
                priv->nibble_commands = alps_v3_nibble_commands;
                priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
-               priv->x_max = 1360;
-               priv->y_max = 660;
                priv->x_bits = 23;
                priv->y_bits = 12;
+
+               if (alps_dolphin_get_device_area(psmouse, priv))
+                       return -EIO;
+
                break;
 
        case ALPS_PROTO_V6:
@@ -2303,9 +2314,8 @@ static int alps_set_protocol(struct psmouse *psmouse,
                priv->set_abs_params = alps_set_abs_params_mt;
                priv->nibble_commands = alps_v3_nibble_commands;
                priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
-
-               if (alps_dolphin_get_device_area(psmouse, priv))
-                       return -EIO;
+               priv->x_max = 0xfff;
+               priv->y_max = 0x7ff;
 
                if (priv->fw_ver[1] != 0xba)
                        priv->flags |= ALPS_BUTTONPAD;
index dda605836546847afbdbdd4c77c976134a914a2e..3b06c8a360b661f02ed3aa826e5a996b93e5b358 100644 (file)
@@ -152,6 +152,11 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
                {ANY_BOARD_ID, ANY_BOARD_ID},
                1024, 5022, 2508, 4832
        },
+       {
+               (const char * const []){"LEN2006", NULL},
+               {2691, 2691},
+               1024, 5045, 2457, 4832
+       },
        {
                (const char * const []){"LEN2006", NULL},
                {ANY_BOARD_ID, ANY_BOARD_ID},
@@ -189,7 +194,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
        "LEN2003",
        "LEN2004", /* L440 */
        "LEN2005",
-       "LEN2006",
+       "LEN2006", /* Edge E440/E540 */
        "LEN2007",
        "LEN2008",
        "LEN2009",
index fc13dd56953e1eb2cb25107ef017336546b733a7..a3adde6519f0a24b7150f8e69ceabed534ceaa38 100644 (file)
@@ -1288,10 +1288,13 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
                return 0;
 
        spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
-       if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS)
+       if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
+                       smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
                ret = arm_smmu_iova_to_phys_hard(domain, iova);
-       else
+       } else {
                ret = ops->iova_to_phys(ops, iova);
+       }
+
        spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
 
        return ret;
@@ -1556,7 +1559,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                return -ENODEV;
        }
 
-       if (smmu->version == 1 || (!(id & ID0_ATOSNS) && (id & ID0_S1TS))) {
+       if ((id & ID0_S1TS) && ((smmu->version == 1) || (id & ID0_ATOSNS))) {
                smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
                dev_notice(smmu->dev, "\taddress translation ops\n");
        }
index ae4c1a854e57896fc64e33668369bebc23f70945..2d1e05bdbb53f5901035294a71c65231b004a338 100644 (file)
@@ -1742,9 +1742,8 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
 
 static void domain_exit(struct dmar_domain *domain)
 {
-       struct dmar_drhd_unit *drhd;
-       struct intel_iommu *iommu;
        struct page *freelist = NULL;
+       int i;
 
        /* Domain 0 is reserved, so dont process it */
        if (!domain)
@@ -1764,8 +1763,8 @@ static void domain_exit(struct dmar_domain *domain)
 
        /* clear attached or cached domains */
        rcu_read_lock();
-       for_each_active_iommu(iommu, drhd)
-               iommu_detach_domain(domain, iommu);
+       for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
+               iommu_detach_domain(domain, g_iommus[i]);
        rcu_read_unlock();
 
        dma_free_pagelist(freelist);
index 10186cac7716e246ea8b8a6e915bf07970670f5b..bc39bdf7b99bf170d793965e28777d3c7a279537 100644 (file)
@@ -851,6 +851,7 @@ static int ipmmu_remove(struct platform_device *pdev)
 
 static const struct of_device_id ipmmu_of_ids[] = {
        { .compatible = "renesas,ipmmu-vmsa", },
+       { }
 };
 
 static struct platform_driver ipmmu_driver = {
index 596b0a9eee99a9f2ea1beaac726bddc4069c4937..9687f8afebffbb865256ba6677663e6c76702aa1 100644 (file)
@@ -169,7 +169,7 @@ static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
 
 static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
 {
-       cmd->raw_cmd[0] &= ~(0xffffUL << 32);
+       cmd->raw_cmd[0] &= BIT_ULL(32) - 1;
        cmd->raw_cmd[0] |= ((u64)devid) << 32;
 }
 
@@ -802,6 +802,7 @@ static int its_alloc_tables(struct its_node *its)
        int i;
        int psz = SZ_64K;
        u64 shr = GITS_BASER_InnerShareable;
+       u64 cache = GITS_BASER_WaWb;
 
        for (i = 0; i < GITS_BASER_NR_REGS; i++) {
                u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
@@ -848,7 +849,7 @@ retry_baser:
                val = (virt_to_phys(base)                                |
                       (type << GITS_BASER_TYPE_SHIFT)                   |
                       ((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
-                      GITS_BASER_WaWb                                   |
+                      cache                                             |
                       shr                                               |
                       GITS_BASER_VALID);
 
@@ -874,9 +875,12 @@ retry_baser:
                         * Shareability didn't stick. Just use
                         * whatever the read reported, which is likely
                         * to be the only thing this redistributor
-                        * supports.
+                        * supports. If that's zero, make it
+                        * non-cacheable as well.
                         */
                        shr = tmp & GITS_BASER_SHAREABILITY_MASK;
+                       if (!shr)
+                               cache = GITS_BASER_nC;
                        goto retry_baser;
                }
 
@@ -980,16 +984,39 @@ static void its_cpu_init_lpis(void)
        tmp = readq_relaxed(rbase + GICR_PROPBASER);
 
        if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
+               if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
+                       /*
+                        * The HW reports non-shareable, we must
+                        * remove the cacheability attributes as
+                        * well.
+                        */
+                       val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
+                                GICR_PROPBASER_CACHEABILITY_MASK);
+                       val |= GICR_PROPBASER_nC;
+                       writeq_relaxed(val, rbase + GICR_PROPBASER);
+               }
                pr_info_once("GIC: using cache flushing for LPI property table\n");
                gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
        }
 
        /* set PENDBASE */
        val = (page_to_phys(pend_page) |
-              GICR_PROPBASER_InnerShareable |
-              GICR_PROPBASER_WaWb);
+              GICR_PENDBASER_InnerShareable |
+              GICR_PENDBASER_WaWb);
 
        writeq_relaxed(val, rbase + GICR_PENDBASER);
+       tmp = readq_relaxed(rbase + GICR_PENDBASER);
+
+       if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
+               /*
+                * The HW reports non-shareable, we must remove the
+                * cacheability attributes as well.
+                */
+               val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
+                        GICR_PENDBASER_CACHEABILITY_MASK);
+               val |= GICR_PENDBASER_nC;
+               writeq_relaxed(val, rbase + GICR_PENDBASER);
+       }
 
        /* Enable LPIs */
        val = readl_relaxed(rbase + GICR_CTLR);
@@ -1026,7 +1053,7 @@ static void its_cpu_init_collection(void)
                         * This ITS wants a linear CPU number.
                         */
                        target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER);
-                       target = GICR_TYPER_CPU_NUMBER(target);
+                       target = GICR_TYPER_CPU_NUMBER(target) << 16;
                }
 
                /* Perform collection mapping */
@@ -1422,14 +1449,26 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
 
        writeq_relaxed(baser, its->base + GITS_CBASER);
        tmp = readq_relaxed(its->base + GITS_CBASER);
-       writeq_relaxed(0, its->base + GITS_CWRITER);
-       writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
 
-       if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) {
+       if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
+               if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
+                       /*
+                        * The HW reports non-shareable, we must
+                        * remove the cacheability attributes as
+                        * well.
+                        */
+                       baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
+                                  GITS_CBASER_CACHEABILITY_MASK);
+                       baser |= GITS_CBASER_nC;
+                       writeq_relaxed(baser, its->base + GITS_CBASER);
+               }
                pr_info("ITS: using cache flushing for cmd queue\n");
                its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
        }
 
+       writeq_relaxed(0, its->base + GITS_CWRITER);
+       writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
+
        if (of_property_read_bool(its->msi_chip.of_node, "msi-controller")) {
                its->domain = irq_domain_add_tree(NULL, &its_domain_ops, its);
                if (!its->domain) {
index ee035ec4526bd802a89de51c6edc099d8bbb9747..169172d2ba05c8b4187b9151975dcd1be52043c2 100644 (file)
@@ -1,6 +1,6 @@
 config LGUEST
        tristate "Linux hypervisor example code"
-       depends on X86_32 && EVENTFD && TTY
+       depends on X86_32 && EVENTFD && TTY && PCI_DIRECT
        select HVC_DRIVER
        ---help---
          This is a very simple module which allows you to run
index 9b641b38b857106000c6645035e7490c943f9233..8001fe9e3434734ad92c8109ef8fa860906238ce 100644 (file)
@@ -433,7 +433,6 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
 
        dm_get(md);
        atomic_inc(&md->open_count);
-
 out:
        spin_unlock(&_minor_lock);
 
@@ -442,16 +441,20 @@ out:
 
 static void dm_blk_close(struct gendisk *disk, fmode_t mode)
 {
-       struct mapped_device *md = disk->private_data;
+       struct mapped_device *md;
 
        spin_lock(&_minor_lock);
 
+       md = disk->private_data;
+       if (WARN_ON(!md))
+               goto out;
+
        if (atomic_dec_and_test(&md->open_count) &&
            (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
                queue_work(deferred_remove_workqueue, &deferred_remove_work);
 
        dm_put(md);
-
+out:
        spin_unlock(&_minor_lock);
 }
 
@@ -2241,7 +2244,6 @@ static void free_dev(struct mapped_device *md)
        int minor = MINOR(disk_devt(md->disk));
 
        unlock_fs(md);
-       bdput(md->bdev);
        destroy_workqueue(md->wq);
 
        if (md->kworker_task)
@@ -2252,19 +2254,22 @@ static void free_dev(struct mapped_device *md)
                mempool_destroy(md->rq_pool);
        if (md->bs)
                bioset_free(md->bs);
-       blk_integrity_unregister(md->disk);
-       del_gendisk(md->disk);
+
        cleanup_srcu_struct(&md->io_barrier);
        free_table_devices(&md->table_devices);
-       free_minor(minor);
+       dm_stats_cleanup(&md->stats);
 
        spin_lock(&_minor_lock);
        md->disk->private_data = NULL;
        spin_unlock(&_minor_lock);
-
+       if (blk_get_integrity(md->disk))
+               blk_integrity_unregister(md->disk);
+       del_gendisk(md->disk);
        put_disk(md->disk);
        blk_cleanup_queue(md->queue);
-       dm_stats_cleanup(&md->stats);
+       bdput(md->bdev);
+       free_minor(minor);
+
        module_put(THIS_MODULE);
        kfree(md);
 }
@@ -2642,8 +2647,9 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
 
        might_sleep();
 
-       spin_lock(&_minor_lock);
        map = dm_get_live_table(md, &srcu_idx);
+
+       spin_lock(&_minor_lock);
        idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
        set_bit(DMF_FREEING, &md->flags);
        spin_unlock(&_minor_lock);
index f38ec424872e362b934aaa6edd2fb21358bcb326..5615522f8d628b0f9d4b43d240d18d7bad02c835 100644 (file)
@@ -739,7 +739,7 @@ static int __init kempld_init(void)
                for (id = kempld_dmi_table;
                     id->matches[0].slot != DMI_NONE; id++)
                        if (strstr(id->ident, force_device_id))
-                               if (id->callback && id->callback(id))
+                               if (id->callback && !id->callback(id))
                                        break;
                if (id->matches[0].slot == DMI_NONE)
                        return -ENODEV;
index ede50244f265b14d950fba0448652960304fa971..dbd907d7170ebe990cb63fede374624ea03b6fd4 100644 (file)
@@ -196,18 +196,27 @@ EXPORT_SYMBOL_GPL(rtsx_usb_ep0_write_register);
 int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data)
 {
        u16 value;
+       u8 *buf;
+       int ret;
 
        if (!data)
                return -EINVAL;
-       *data = 0;
+
+       buf = kzalloc(sizeof(u8), GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
 
        addr |= EP0_READ_REG_CMD << EP0_OP_SHIFT;
        value = swab16(addr);
 
-       return usb_control_msg(ucr->pusb_dev,
+       ret = usb_control_msg(ucr->pusb_dev,
                        usb_rcvctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP,
                        USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-                       value, 0, data, 1, 100);
+                       value, 0, buf, 1, 100);
+       *data = *buf;
+
+       kfree(buf);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(rtsx_usb_ep0_read_register);
 
@@ -288,18 +297,27 @@ static int rtsx_usb_get_status_with_bulk(struct rtsx_ucr *ucr, u16 *status)
 int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status)
 {
        int ret;
+       u16 *buf;
 
        if (!status)
                return -EINVAL;
 
-       if (polling_pipe == 0)
+       if (polling_pipe == 0) {
+               buf = kzalloc(sizeof(u16), GFP_KERNEL);
+               if (!buf)
+                       return -ENOMEM;
+
                ret = usb_control_msg(ucr->pusb_dev,
                                usb_rcvctrlpipe(ucr->pusb_dev, 0),
                                RTSX_USB_REQ_POLL,
                                USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-                               0, 0, status, 2, 100);
-       else
+                               0, 0, buf, 2, 100);
+               *status = *buf;
+
+               kfree(buf);
+       } else {
                ret = rtsx_usb_get_status_with_bulk(ucr, status);
+       }
 
        /* usb_control_msg may return positive when success */
        if (ret < 0)
index 374696de796cd707c99cf150ec12349335d5f958..fbd54f0e32e8d7baf7e1f2de3c5155201aeccfea 100644 (file)
@@ -1428,8 +1428,10 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr)
                        else
                                port->aggregator->is_individual = true;
 
-                       port->aggregator->actor_admin_aggregator_key = port->actor_admin_port_key;
-                       port->aggregator->actor_oper_aggregator_key = port->actor_oper_port_key;
+                       port->aggregator->actor_admin_aggregator_key =
+                               port->actor_admin_port_key;
+                       port->aggregator->actor_oper_aggregator_key =
+                               port->actor_oper_port_key;
                        port->aggregator->partner_system =
                                port->partner_oper.system;
                        port->aggregator->partner_system_priority =
@@ -1755,14 +1757,9 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
        };
 
        if (port) {
-               port->actor_port_number = 1;
                port->actor_port_priority = 0xff;
-               port->actor_system = null_mac_addr;
-               port->actor_system_priority = 0xffff;
                port->actor_port_aggregator_identifier = 0;
                port->ntt = false;
-               port->actor_admin_port_key = 1;
-               port->actor_oper_port_key  = 1;
                port->actor_admin_port_state = AD_STATE_AGGREGATION |
                                               AD_STATE_LACP_ACTIVITY;
                port->actor_oper_port_state  = AD_STATE_AGGREGATION |
@@ -1784,8 +1781,6 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
                port->sm_mux_state = 0;
                port->sm_mux_timer_counter = 0;
                port->sm_tx_state = 0;
-               port->sm_tx_timer_counter = 0;
-               port->slave = NULL;
                port->aggregator = NULL;
                port->next_port_in_aggregator = NULL;
                port->transaction_id = 0;
@@ -1968,8 +1963,6 @@ void bond_3ad_bind_slave(struct slave *slave)
                 * lacpdu's are sent in one second)
                 */
                port->sm_tx_timer_counter = ad_ticks_per_sec/AD_MAX_TX_IN_SECOND;
-               port->aggregator = NULL;
-               port->next_port_in_aggregator = NULL;
 
                __disable_port(port);
 
@@ -2332,8 +2325,8 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
        spin_lock_bh(&slave->bond->mode_lock);
 
        port->actor_admin_port_key &= ~AD_SPEED_KEY_MASKS;
-       port->actor_oper_port_key = port->actor_admin_port_key |=
-               (__get_link_speed(port) << 1);
+       port->actor_admin_port_key |= __get_link_speed(port) << 1;
+       port->actor_oper_port_key = port->actor_admin_port_key;
        netdev_dbg(slave->bond->dev, "Port %d changed speed\n", port->actor_port_number);
        /* there is no need to reselect a new aggregator, just signal the
         * state machines to reinitialize
@@ -2365,8 +2358,8 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
        spin_lock_bh(&slave->bond->mode_lock);
 
        port->actor_admin_port_key &= ~AD_DUPLEX_KEY_MASKS;
-       port->actor_oper_port_key = port->actor_admin_port_key |=
-               __get_duplex(port);
+       port->actor_admin_port_key |= __get_duplex(port);
+       port->actor_oper_port_key = port->actor_admin_port_key;
        netdev_dbg(slave->bond->dev, "Port %d slave %s changed duplex\n",
                   port->actor_port_number, slave->dev->name);
        if (port->actor_oper_port_key & AD_DUPLEX_KEY_MASKS)
@@ -2407,24 +2400,19 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
         * on link up we are forcing recheck on the duplex and speed since
         * some of he adaptors(ce1000.lan) report.
         */
+       port->actor_admin_port_key &= ~(AD_DUPLEX_KEY_MASKS|AD_SPEED_KEY_MASKS);
        if (link == BOND_LINK_UP) {
                port->is_enabled = true;
-               port->actor_admin_port_key &= ~AD_DUPLEX_KEY_MASKS;
-               port->actor_oper_port_key = port->actor_admin_port_key |=
-                       __get_duplex(port);
-               port->actor_admin_port_key &= ~AD_SPEED_KEY_MASKS;
-               port->actor_oper_port_key = port->actor_admin_port_key |=
-                       (__get_link_speed(port) << 1);
-               if (port->actor_oper_port_key & AD_DUPLEX_KEY_MASKS)
+               port->actor_admin_port_key |=
+                       (__get_link_speed(port) << 1) | __get_duplex(port);
+               if (port->actor_admin_port_key & AD_DUPLEX_KEY_MASKS)
                        port->sm_vars |= AD_PORT_LACP_ENABLED;
        } else {
                /* link has failed */
                port->is_enabled = false;
-               port->actor_admin_port_key &= ~AD_DUPLEX_KEY_MASKS;
-               port->actor_oper_port_key = (port->actor_admin_port_key &=
-                                            ~AD_SPEED_KEY_MASKS);
                port->sm_vars &= ~AD_PORT_LACP_ENABLED;
        }
+       port->actor_oper_port_key = port->actor_admin_port_key;
        netdev_dbg(slave->bond->dev, "Port %d changed link status to %s\n",
                   port->actor_port_number,
                   link == BOND_LINK_UP ? "UP" : "DOWN");
index 7b4684ccdb3fae520e568638e6a3fb240a388d12..78dde56ae6e6fa9dd7d04e64ae1969023d285238 100644 (file)
@@ -3881,7 +3881,8 @@ static inline int bond_slave_override(struct bonding *bond,
        /* Find out if any slaves have the same mapping as this skb. */
        bond_for_each_slave_rcu(bond, slave, iter) {
                if (slave->queue_id == skb->queue_mapping) {
-                       if (bond_slave_can_tx(slave)) {
+                       if (bond_slave_is_up(slave) &&
+                           slave->link == BOND_LINK_UP) {
                                bond_dev_queue_xmit(bond, skb, slave->dev);
                                return 0;
                        }
index 27bbc56de15fa37497676f8b389cdc17eced09a0..9da06537237ff220a16b3c5831c728d809d91bb1 100644 (file)
@@ -70,7 +70,6 @@ struct ser_device {
        struct tty_struct *tty;
        bool tx_started;
        unsigned long state;
-       char *tty_name;
 #ifdef CONFIG_DEBUG_FS
        struct dentry *debugfs_tty_dir;
        struct debugfs_blob_wrapper tx_blob;
index 80c46ad4cee439d2015b3ee7de66d578f54afd0b..ad0a7e8c2c2bdf33626824645a8180d8e6d900ff 100644 (file)
@@ -592,13 +592,12 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
                rx_state = unlikely(reg_esr & FLEXCAN_ESR_RX_WRN) ?
                           CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE;
                new_state = max(tx_state, rx_state);
-       } else if (unlikely(flt == FLEXCAN_ESR_FLT_CONF_PASSIVE)) {
+       } else {
                __flexcan_get_berr_counter(dev, &bec);
-               new_state = CAN_STATE_ERROR_PASSIVE;
+               new_state = flt == FLEXCAN_ESR_FLT_CONF_PASSIVE ?
+                           CAN_STATE_ERROR_PASSIVE : CAN_STATE_BUS_OFF;
                rx_state = bec.rxerr >= bec.txerr ? new_state : 0;
                tx_state = bec.rxerr <= bec.txerr ? new_state : 0;
-       } else {
-               new_state = CAN_STATE_BUS_OFF;
        }
 
        /* state hasn't changed */
@@ -1158,12 +1157,19 @@ static int flexcan_probe(struct platform_device *pdev)
        const struct flexcan_devtype_data *devtype_data;
        struct net_device *dev;
        struct flexcan_priv *priv;
+       struct regulator *reg_xceiver;
        struct resource *mem;
        struct clk *clk_ipg = NULL, *clk_per = NULL;
        void __iomem *base;
        int err, irq;
        u32 clock_freq = 0;
 
+       reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
+       if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER)
+               return -EPROBE_DEFER;
+       else if (IS_ERR(reg_xceiver))
+               reg_xceiver = NULL;
+
        if (pdev->dev.of_node)
                of_property_read_u32(pdev->dev.of_node,
                                                "clock-frequency", &clock_freq);
@@ -1224,9 +1230,7 @@ static int flexcan_probe(struct platform_device *pdev)
        priv->pdata = dev_get_platdata(&pdev->dev);
        priv->devtype_data = devtype_data;
 
-       priv->reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
-       if (IS_ERR(priv->reg_xceiver))
-               priv->reg_xceiver = NULL;
+       priv->reg_xceiver = reg_xceiver;
 
        netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
 
index 9376f5e5b94ed2956c85808c5642ccbdb34036bd..866bac0ae7e966855d1085f5a8735988832a1bb7 100644 (file)
@@ -123,7 +123,7 @@ MODULE_LICENSE("GPL v2");
  * CPC_MSG_TYPE_EXT_CAN_FRAME or CPC_MSG_TYPE_EXT_RTR_FRAME.
  */
 struct cpc_can_msg {
-       u32 id;
+       __le32 id;
        u8 length;
        u8 msg[8];
 };
@@ -200,8 +200,8 @@ struct __packed ems_cpc_msg {
        u8 type;        /* type of message */
        u8 length;      /* length of data within union 'msg' */
        u8 msgid;       /* confirmation handle */
-       u32 ts_sec;     /* timestamp in seconds */
-       u32 ts_nsec;    /* timestamp in nano seconds */
+       __le32 ts_sec;  /* timestamp in seconds */
+       __le32 ts_nsec; /* timestamp in nano seconds */
 
        union {
                u8 generic[64];
@@ -765,7 +765,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
 
        msg = (struct ems_cpc_msg *)&buf[CPC_HEADER_SIZE];
 
-       msg->msg.can_msg.id = cf->can_id & CAN_ERR_MASK;
+       msg->msg.can_msg.id = cpu_to_le32(cf->can_id & CAN_ERR_MASK);
        msg->msg.can_msg.length = cf->can_dlc;
 
        if (cf->can_id & CAN_RTR_FLAG) {
@@ -783,9 +783,6 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
                msg->length = CPC_CAN_MSG_MIN_SIZE + cf->can_dlc;
        }
 
-       /* Respect byte order */
-       msg->msg.can_msg.id = cpu_to_le32(msg->msg.can_msg.id);
-
        for (i = 0; i < MAX_TX_URBS; i++) {
                if (dev->tx_contexts[i].echo_index == MAX_TX_URBS) {
                        context = &dev->tx_contexts[i];
index 009acc8641fc557cb580cb688983daf041519e4b..8b4d3e6875eb17e6bca38c812132953d0c5ce2c2 100644 (file)
@@ -901,6 +901,8 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *
        }
 
        dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+       if (!dev)
+               return -ENOMEM;
        init_usb_anchor(&dev->rx_submitted);
 
        atomic_set(&dev->active_channels, 0);
index d269ae0b072a4660ba90bcd5fc132b891c3b0522..4643914859b2c7894f7556cfe023e1903eeb9cc6 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/can/dev.h>
 #include <linux/can/error.h>
 
-#define MAX_TX_URBS                    16
 #define MAX_RX_URBS                    4
 #define START_TIMEOUT                  1000 /* msecs */
 #define STOP_TIMEOUT                   1000 /* msecs */
@@ -443,6 +442,7 @@ struct kvaser_usb_error_summary {
        };
 };
 
+/* Context for an outstanding, not yet ACKed, transmission */
 struct kvaser_usb_tx_urb_context {
        struct kvaser_usb_net_priv *priv;
        u32 echo_index;
@@ -456,8 +456,13 @@ struct kvaser_usb {
        struct usb_endpoint_descriptor *bulk_in, *bulk_out;
        struct usb_anchor rx_submitted;
 
+       /* @max_tx_urbs: Firmware-reported maximum number of oustanding,
+        * not yet ACKed, transmissions on this device. This value is
+        * also used as a sentinel for marking free tx contexts.
+        */
        u32 fw_version;
        unsigned int nchannels;
+       unsigned int max_tx_urbs;
        enum kvaser_usb_family family;
 
        bool rxinitdone;
@@ -467,19 +472,18 @@ struct kvaser_usb {
 
 struct kvaser_usb_net_priv {
        struct can_priv can;
-
-       spinlock_t tx_contexts_lock;
-       int active_tx_contexts;
-       struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS];
-
-       struct usb_anchor tx_submitted;
-       struct completion start_comp, stop_comp;
+       struct can_berr_counter bec;
 
        struct kvaser_usb *dev;
        struct net_device *netdev;
        int channel;
 
-       struct can_berr_counter bec;
+       struct completion start_comp, stop_comp;
+       struct usb_anchor tx_submitted;
+
+       spinlock_t tx_contexts_lock;
+       int active_tx_contexts;
+       struct kvaser_usb_tx_urb_context tx_contexts[];
 };
 
 static const struct usb_device_id kvaser_usb_table[] = {
@@ -592,8 +596,8 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
                         * for further details.
                         */
                        if (tmp->len == 0) {
-                               pos = round_up(pos,
-                                              dev->bulk_in->wMaxPacketSize);
+                               pos = round_up(pos, le16_to_cpu(dev->bulk_in->
+                                                               wMaxPacketSize));
                                continue;
                        }
 
@@ -657,9 +661,13 @@ static int kvaser_usb_get_software_info(struct kvaser_usb *dev)
        switch (dev->family) {
        case KVASER_LEAF:
                dev->fw_version = le32_to_cpu(msg.u.leaf.softinfo.fw_version);
+               dev->max_tx_urbs =
+                       le16_to_cpu(msg.u.leaf.softinfo.max_outstanding_tx);
                break;
        case KVASER_USBCAN:
                dev->fw_version = le32_to_cpu(msg.u.usbcan.softinfo.fw_version);
+               dev->max_tx_urbs =
+                       le16_to_cpu(msg.u.usbcan.softinfo.max_outstanding_tx);
                break;
        }
 
@@ -715,7 +723,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
 
        stats = &priv->netdev->stats;
 
-       context = &priv->tx_contexts[tid % MAX_TX_URBS];
+       context = &priv->tx_contexts[tid % dev->max_tx_urbs];
 
        /* Sometimes the state change doesn't come after a bus-off event */
        if (priv->can.restart_ms &&
@@ -744,7 +752,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
        spin_lock_irqsave(&priv->tx_contexts_lock, flags);
 
        can_get_echo_skb(priv->netdev, context->echo_index);
-       context->echo_index = MAX_TX_URBS;
+       context->echo_index = dev->max_tx_urbs;
        --priv->active_tx_contexts;
        netif_wake_queue(priv->netdev);
 
@@ -1329,7 +1337,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
                 * number of events in case of a heavy rx load on the bus.
                 */
                if (msg->len == 0) {
-                       pos = round_up(pos, dev->bulk_in->wMaxPacketSize);
+                       pos = round_up(pos, le16_to_cpu(dev->bulk_in->
+                                                       wMaxPacketSize));
                        continue;
                }
 
@@ -1512,11 +1521,13 @@ error:
 
 static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
 {
-       int i;
+       int i, max_tx_urbs;
+
+       max_tx_urbs = priv->dev->max_tx_urbs;
 
        priv->active_tx_contexts = 0;
-       for (i = 0; i < MAX_TX_URBS; i++)
-               priv->tx_contexts[i].echo_index = MAX_TX_URBS;
+       for (i = 0; i < max_tx_urbs; i++)
+               priv->tx_contexts[i].echo_index = max_tx_urbs;
 }
 
 /* This method might sleep. Do not call it in the atomic context
@@ -1702,14 +1713,14 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
                *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME;
 
        spin_lock_irqsave(&priv->tx_contexts_lock, flags);
-       for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) {
-               if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) {
+       for (i = 0; i < dev->max_tx_urbs; i++) {
+               if (priv->tx_contexts[i].echo_index == dev->max_tx_urbs) {
                        context = &priv->tx_contexts[i];
 
                        context->echo_index = i;
                        can_put_echo_skb(skb, netdev, context->echo_index);
                        ++priv->active_tx_contexts;
-                       if (priv->active_tx_contexts >= MAX_TX_URBS)
+                       if (priv->active_tx_contexts >= dev->max_tx_urbs)
                                netif_stop_queue(netdev);
 
                        break;
@@ -1743,7 +1754,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
                spin_lock_irqsave(&priv->tx_contexts_lock, flags);
 
                can_free_echo_skb(netdev, context->echo_index);
-               context->echo_index = MAX_TX_URBS;
+               context->echo_index = dev->max_tx_urbs;
                --priv->active_tx_contexts;
                netif_wake_queue(netdev);
 
@@ -1881,7 +1892,9 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
        if (err)
                return err;
 
-       netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS);
+       netdev = alloc_candev(sizeof(*priv) +
+                             dev->max_tx_urbs * sizeof(*priv->tx_contexts),
+                             dev->max_tx_urbs);
        if (!netdev) {
                dev_err(&intf->dev, "Cannot alloc candev\n");
                return -ENOMEM;
@@ -2009,6 +2022,13 @@ static int kvaser_usb_probe(struct usb_interface *intf,
                return err;
        }
 
+       dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
+               ((dev->fw_version >> 24) & 0xff),
+               ((dev->fw_version >> 16) & 0xff),
+               (dev->fw_version & 0xffff));
+
+       dev_dbg(&intf->dev, "Max oustanding tx = %d URBs\n", dev->max_tx_urbs);
+
        err = kvaser_usb_get_card_info(dev);
        if (err) {
                dev_err(&intf->dev,
@@ -2016,11 +2036,6 @@ static int kvaser_usb_probe(struct usb_interface *intf,
                return err;
        }
 
-       dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
-               ((dev->fw_version >> 24) & 0xff),
-               ((dev->fw_version >> 16) & 0xff),
-               (dev->fw_version & 0xffff));
-
        for (i = 0; i < dev->nchannels; i++) {
                err = kvaser_usb_init_one(intf, id, i);
                if (err) {
index 1ba7c25002e1e27ec1a9333412c175edb0f5594a..e8fc4952c6b074f80a2af132d2bc161e7fd3d8c7 100644 (file)
@@ -26,8 +26,8 @@
 #define PUCAN_CMD_FILTER_STD           0x008
 #define PUCAN_CMD_TX_ABORT             0x009
 #define PUCAN_CMD_WR_ERR_CNT           0x00a
-#define PUCAN_CMD_RX_FRAME_ENABLE      0x00b
-#define PUCAN_CMD_RX_FRAME_DISABLE     0x00c
+#define PUCAN_CMD_SET_EN_OPTION                0x00b
+#define PUCAN_CMD_CLR_DIS_OPTION       0x00c
 #define PUCAN_CMD_END_OF_COLLECTION    0x3ff
 
 /* uCAN received messages list */
@@ -101,14 +101,15 @@ struct __packed pucan_wr_err_cnt {
        u16     unused;
 };
 
-/* uCAN RX_FRAME_ENABLE command fields */
-#define PUCAN_FLTEXT_ERROR             0x0001
-#define PUCAN_FLTEXT_BUSLOAD           0x0002
+/* uCAN SET_EN/CLR_DIS _OPTION command fields */
+#define PUCAN_OPTION_ERROR             0x0001
+#define PUCAN_OPTION_BUSLOAD           0x0002
+#define PUCAN_OPTION_CANDFDISO         0x0004
 
-struct __packed pucan_filter_ext {
+struct __packed pucan_options {
        __le16  opcode_channel;
 
-       __le16  ext_mask;
+       __le16  options;
        u32     unused;
 };
 
index 0bac0f14edc3cd73727b4a0d9f0776d857c4ba8c..09d14e70abd746b7c17ff0cac6ee6832e9d8771f 100644 (file)
@@ -110,13 +110,13 @@ struct __packed pcan_ufd_led {
        u8      unused[5];
 };
 
-/* Extended usage of uCAN commands CMD_RX_FRAME_xxxABLE for PCAN-USB Pro FD */
+/* Extended usage of uCAN commands CMD_xxx_xx_OPTION for PCAN-USB Pro FD */
 #define PCAN_UFD_FLTEXT_CALIBRATION    0x8000
 
-struct __packed pcan_ufd_filter_ext {
+struct __packed pcan_ufd_options {
        __le16  opcode_channel;
 
-       __le16  ext_mask;
+       __le16  ucan_mask;
        u16     unused;
        __le16  usb_mask;
 };
@@ -182,7 +182,7 @@ static inline void *pcan_usb_fd_cmd_buffer(struct peak_usb_device *dev)
 static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
 {
        void *cmd_head = pcan_usb_fd_cmd_buffer(dev);
-       int err;
+       int err = 0;
        u8 *packet_ptr;
        int i, n = 1, packet_len;
        ptrdiff_t cmd_len;
@@ -251,6 +251,27 @@ static int pcan_usb_fd_build_restart_cmd(struct peak_usb_device *dev, u8 *buf)
        /* moves the pointer forward */
        pc += sizeof(struct pucan_wr_err_cnt);
 
+       /* add command to switch from ISO to non-ISO mode, if fw allows it */
+       if (dev->can.ctrlmode_supported & CAN_CTRLMODE_FD_NON_ISO) {
+               struct pucan_options *puo = (struct pucan_options *)pc;
+
+               puo->opcode_channel =
+                       (dev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) ?
+                       pucan_cmd_opcode_channel(dev,
+                                                PUCAN_CMD_CLR_DIS_OPTION) :
+                       pucan_cmd_opcode_channel(dev, PUCAN_CMD_SET_EN_OPTION);
+
+               puo->options = cpu_to_le16(PUCAN_OPTION_CANDFDISO);
+
+               /* to be sure that no other extended bits will be taken into
+                * account
+                */
+               puo->unused = 0;
+
+               /* moves the pointer forward */
+               pc += sizeof(struct pucan_options);
+       }
+
        /* next, go back to operational mode */
        cmd = (struct pucan_command *)pc;
        cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
@@ -321,21 +342,21 @@ static int pcan_usb_fd_set_filter_std(struct peak_usb_device *dev, int idx,
        return pcan_usb_fd_send_cmd(dev, cmd);
 }
 
-/* set/unset notifications filter:
+/* set/unset options
  *
- *     onoff   sets(1)/unset(0) notifications
- *     mask    each bit defines a kind of notification to set/unset
+ *     onoff   set(1)/unset(0) options
+ *     mask    each bit defines a kind of options to set/unset
  */
-static int pcan_usb_fd_set_filter_ext(struct peak_usb_device *dev,
-                                     bool onoff, u16 ext_mask, u16 usb_mask)
+static int pcan_usb_fd_set_options(struct peak_usb_device *dev,
+                                  bool onoff, u16 ucan_mask, u16 usb_mask)
 {
-       struct pcan_ufd_filter_ext *cmd = pcan_usb_fd_cmd_buffer(dev);
+       struct pcan_ufd_options *cmd = pcan_usb_fd_cmd_buffer(dev);
 
        cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
-                                       (onoff) ? PUCAN_CMD_RX_FRAME_ENABLE :
-                                                 PUCAN_CMD_RX_FRAME_DISABLE);
+                                       (onoff) ? PUCAN_CMD_SET_EN_OPTION :
+                                                 PUCAN_CMD_CLR_DIS_OPTION);
 
-       cmd->ext_mask = cpu_to_le16(ext_mask);
+       cmd->ucan_mask = cpu_to_le16(ucan_mask);
        cmd->usb_mask = cpu_to_le16(usb_mask);
 
        /* send the command */
@@ -770,9 +791,9 @@ static int pcan_usb_fd_start(struct peak_usb_device *dev)
                                       &pcan_usb_pro_fd);
 
                /* enable USB calibration messages */
-               err = pcan_usb_fd_set_filter_ext(dev, 1,
-                                                PUCAN_FLTEXT_ERROR,
-                                                PCAN_UFD_FLTEXT_CALIBRATION);
+               err = pcan_usb_fd_set_options(dev, 1,
+                                             PUCAN_OPTION_ERROR,
+                                             PCAN_UFD_FLTEXT_CALIBRATION);
        }
 
        pdev->usb_if->dev_opened_count++;
@@ -806,9 +827,9 @@ static int pcan_usb_fd_stop(struct peak_usb_device *dev)
 
        /* turn off special msgs for that interface if no other dev opened */
        if (pdev->usb_if->dev_opened_count == 1)
-               pcan_usb_fd_set_filter_ext(dev, 0,
-                                          PUCAN_FLTEXT_ERROR,
-                                          PCAN_UFD_FLTEXT_CALIBRATION);
+               pcan_usb_fd_set_options(dev, 0,
+                                       PUCAN_OPTION_ERROR,
+                                       PCAN_UFD_FLTEXT_CALIBRATION);
        pdev->usb_if->dev_opened_count--;
 
        return 0;
@@ -860,8 +881,14 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
                         pdev->usb_if->fw_info.fw_version[2],
                         dev->adapter->ctrl_count);
 
-               /* the currently supported hw is non-ISO */
-               dev->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO;
+               /* check for ability to switch between ISO/non-ISO modes */
+               if (pdev->usb_if->fw_info.fw_version[0] >= 2) {
+                       /* firmware >= 2.x supports ISO/non-ISO switching */
+                       dev->can.ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO;
+               } else {
+                       /* firmware < 2.x only supports fixed(!) non-ISO */
+                       dev->can.ctrlmode |= CAN_CTRLMODE_FD_NON_ISO;
+               }
 
                /* tell the hardware the can driver is running */
                err = pcan_usb_fd_drv_loaded(dev, 1);
@@ -937,9 +964,9 @@ static void pcan_usb_fd_exit(struct peak_usb_device *dev)
        if (dev->ctrl_idx == 0) {
                /* turn off calibration message if any device were opened */
                if (pdev->usb_if->dev_opened_count > 0)
-                       pcan_usb_fd_set_filter_ext(dev, 0,
-                                                  PUCAN_FLTEXT_ERROR,
-                                                  PCAN_UFD_FLTEXT_CALIBRATION);
+                       pcan_usb_fd_set_options(dev, 0,
+                                               PUCAN_OPTION_ERROR,
+                                               PCAN_UFD_FLTEXT_CALIBRATION);
 
                /* tell USB adapter that the driver is being unloaded */
                pcan_usb_fd_drv_loaded(dev, 0);
index 2d7e1ffe9fdc49664ca4d2686b3339c447d00589..b4af6d5aff7cc970773f55c3f9a8fedbe3c06f1f 100644 (file)
@@ -25,66 +25,33 @@ static char *mv88e6123_61_65_probe(struct device *host_dev, int sw_addr)
        if (bus == NULL)
                return NULL;
 
-       ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
+       ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
        if (ret >= 0) {
-               if (ret == 0x1212)
+               if (ret == PORT_SWITCH_ID_6123_A1)
                        return "Marvell 88E6123 (A1)";
-               if (ret == 0x1213)
+               if (ret == PORT_SWITCH_ID_6123_A2)
                        return "Marvell 88E6123 (A2)";
-               if ((ret & 0xfff0) == 0x1210)
+               if ((ret & 0xfff0) == PORT_SWITCH_ID_6123)
                        return "Marvell 88E6123";
 
-               if (ret == 0x1612)
+               if (ret == PORT_SWITCH_ID_6161_A1)
                        return "Marvell 88E6161 (A1)";
-               if (ret == 0x1613)
+               if (ret == PORT_SWITCH_ID_6161_A2)
                        return "Marvell 88E6161 (A2)";
-               if ((ret & 0xfff0) == 0x1610)
+               if ((ret & 0xfff0) == PORT_SWITCH_ID_6161)
                        return "Marvell 88E6161";
 
-               if (ret == 0x1652)
+               if (ret == PORT_SWITCH_ID_6165_A1)
                        return "Marvell 88E6165 (A1)";
-               if (ret == 0x1653)
+               if (ret == PORT_SWITCH_ID_6165_A2)
                        return "Marvell 88e6165 (A2)";
-               if ((ret & 0xfff0) == 0x1650)
+               if ((ret & 0xfff0) == PORT_SWITCH_ID_6165)
                        return "Marvell 88E6165";
        }
 
        return NULL;
 }
 
-static int mv88e6123_61_65_switch_reset(struct dsa_switch *ds)
-{
-       int i;
-       int ret;
-       unsigned long timeout;
-
-       /* Set all ports to the disabled state. */
-       for (i = 0; i < 8; i++) {
-               ret = REG_READ(REG_PORT(i), 0x04);
-               REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
-       }
-
-       /* Wait for transmit queues to drain. */
-       usleep_range(2000, 4000);
-
-       /* Reset the switch. */
-       REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
-
-       /* Wait up to one second for reset to complete. */
-       timeout = jiffies + 1 * HZ;
-       while (time_before(jiffies, timeout)) {
-               ret = REG_READ(REG_GLOBAL, 0x00);
-               if ((ret & 0xc800) == 0xc800)
-                       break;
-
-               usleep_range(1000, 2000);
-       }
-       if (time_after(jiffies, timeout))
-               return -ETIMEDOUT;
-
-       return 0;
-}
-
 static int mv88e6123_61_65_setup_global(struct dsa_switch *ds)
 {
        int ret;
@@ -271,6 +238,7 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
 
 static int mv88e6123_61_65_setup(struct dsa_switch *ds)
 {
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int i;
        int ret;
 
@@ -278,7 +246,19 @@ static int mv88e6123_61_65_setup(struct dsa_switch *ds)
        if (ret < 0)
                return ret;
 
-       ret = mv88e6123_61_65_switch_reset(ds);
+       switch (ps->id) {
+       case PORT_SWITCH_ID_6123:
+               ps->num_ports = 3;
+               break;
+       case PORT_SWITCH_ID_6161:
+       case PORT_SWITCH_ID_6165:
+               ps->num_ports = 6;
+               break;
+       default:
+               return -ENODEV;
+       }
+
+       ret = mv88e6xxx_switch_reset(ds, false);
        if (ret < 0)
                return ret;
 
@@ -288,7 +268,7 @@ static int mv88e6123_61_65_setup(struct dsa_switch *ds)
        if (ret < 0)
                return ret;
 
-       for (i = 0; i < 6; i++) {
+       for (i = 0; i < ps->num_ports; i++) {
                ret = mv88e6123_61_65_setup_port(ds, i);
                if (ret < 0)
                        return ret;
@@ -297,108 +277,18 @@ static int mv88e6123_61_65_setup(struct dsa_switch *ds)
        return 0;
 }
 
-static int mv88e6123_61_65_port_to_phy_addr(int port)
-{
-       if (port >= 0 && port <= 4)
-               return port;
-       return -1;
-}
-
-static int
-mv88e6123_61_65_phy_read(struct dsa_switch *ds, int port, int regnum)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int addr = mv88e6123_61_65_port_to_phy_addr(port);
-       int ret;
-
-       mutex_lock(&ps->phy_mutex);
-       ret = mv88e6xxx_phy_read(ds, addr, regnum);
-       mutex_unlock(&ps->phy_mutex);
-       return ret;
-}
-
-static int
-mv88e6123_61_65_phy_write(struct dsa_switch *ds,
-                             int port, int regnum, u16 val)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int addr = mv88e6123_61_65_port_to_phy_addr(port);
-       int ret;
-
-       mutex_lock(&ps->phy_mutex);
-       ret = mv88e6xxx_phy_write(ds, addr, regnum, val);
-       mutex_unlock(&ps->phy_mutex);
-       return ret;
-}
-
-static struct mv88e6xxx_hw_stat mv88e6123_61_65_hw_stats[] = {
-       { "in_good_octets", 8, 0x00, },
-       { "in_bad_octets", 4, 0x02, },
-       { "in_unicast", 4, 0x04, },
-       { "in_broadcasts", 4, 0x06, },
-       { "in_multicasts", 4, 0x07, },
-       { "in_pause", 4, 0x16, },
-       { "in_undersize", 4, 0x18, },
-       { "in_fragments", 4, 0x19, },
-       { "in_oversize", 4, 0x1a, },
-       { "in_jabber", 4, 0x1b, },
-       { "in_rx_error", 4, 0x1c, },
-       { "in_fcs_error", 4, 0x1d, },
-       { "out_octets", 8, 0x0e, },
-       { "out_unicast", 4, 0x10, },
-       { "out_broadcasts", 4, 0x13, },
-       { "out_multicasts", 4, 0x12, },
-       { "out_pause", 4, 0x15, },
-       { "excessive", 4, 0x11, },
-       { "collisions", 4, 0x1e, },
-       { "deferred", 4, 0x05, },
-       { "single", 4, 0x14, },
-       { "multiple", 4, 0x17, },
-       { "out_fcs_error", 4, 0x03, },
-       { "late", 4, 0x1f, },
-       { "hist_64bytes", 4, 0x08, },
-       { "hist_65_127bytes", 4, 0x09, },
-       { "hist_128_255bytes", 4, 0x0a, },
-       { "hist_256_511bytes", 4, 0x0b, },
-       { "hist_512_1023bytes", 4, 0x0c, },
-       { "hist_1024_max_bytes", 4, 0x0d, },
-       { "sw_in_discards", 4, 0x110, },
-       { "sw_in_filtered", 2, 0x112, },
-       { "sw_out_filtered", 2, 0x113, },
-};
-
-static void
-mv88e6123_61_65_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
-{
-       mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6123_61_65_hw_stats),
-                             mv88e6123_61_65_hw_stats, port, data);
-}
-
-static void
-mv88e6123_61_65_get_ethtool_stats(struct dsa_switch *ds,
-                                 int port, uint64_t *data)
-{
-       mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6123_61_65_hw_stats),
-                                   mv88e6123_61_65_hw_stats, port, data);
-}
-
-static int mv88e6123_61_65_get_sset_count(struct dsa_switch *ds)
-{
-       return ARRAY_SIZE(mv88e6123_61_65_hw_stats);
-}
-
 struct dsa_switch_driver mv88e6123_61_65_switch_driver = {
        .tag_protocol           = DSA_TAG_PROTO_EDSA,
        .priv_size              = sizeof(struct mv88e6xxx_priv_state),
        .probe                  = mv88e6123_61_65_probe,
        .setup                  = mv88e6123_61_65_setup,
        .set_addr               = mv88e6xxx_set_addr_indirect,
-       .phy_read               = mv88e6123_61_65_phy_read,
-       .phy_write              = mv88e6123_61_65_phy_write,
+       .phy_read               = mv88e6xxx_phy_read,
+       .phy_write              = mv88e6xxx_phy_write,
        .poll_link              = mv88e6xxx_poll_link,
-       .get_strings            = mv88e6123_61_65_get_strings,
-       .get_ethtool_stats      = mv88e6123_61_65_get_ethtool_stats,
-       .get_sset_count         = mv88e6123_61_65_get_sset_count,
+       .get_strings            = mv88e6xxx_get_strings,
+       .get_ethtool_stats      = mv88e6xxx_get_ethtool_stats,
+       .get_sset_count         = mv88e6xxx_get_sset_count,
 #ifdef CONFIG_NET_DSA_HWMON
        .get_temp               = mv88e6xxx_get_temp,
 #endif
index 2540ef0142afd68f5dfb489850bbd9ad64f3fdc2..e54824fa0d959f919586c7ec07cbb678a40a031a 100644 (file)
 #include <net/dsa.h>
 #include "mv88e6xxx.h"
 
-/* Switch product IDs */
-#define ID_6085                0x04a0
-#define ID_6095                0x0950
-#define ID_6131                0x1060
-#define ID_6131_B2     0x1066
-
 static char *mv88e6131_probe(struct device *host_dev, int sw_addr)
 {
        struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev);
@@ -31,56 +25,23 @@ static char *mv88e6131_probe(struct device *host_dev, int sw_addr)
        if (bus == NULL)
                return NULL;
 
-       ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
+       ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
        if (ret >= 0) {
                int ret_masked = ret & 0xfff0;
 
-               if (ret_masked == ID_6085)
+               if (ret_masked == PORT_SWITCH_ID_6085)
                        return "Marvell 88E6085";
-               if (ret_masked == ID_6095)
+               if (ret_masked == PORT_SWITCH_ID_6095)
                        return "Marvell 88E6095/88E6095F";
-               if (ret == ID_6131_B2)
+               if (ret == PORT_SWITCH_ID_6131_B2)
                        return "Marvell 88E6131 (B2)";
-               if (ret_masked == ID_6131)
+               if (ret_masked == PORT_SWITCH_ID_6131)
                        return "Marvell 88E6131";
        }
 
        return NULL;
 }
 
-static int mv88e6131_switch_reset(struct dsa_switch *ds)
-{
-       int i;
-       int ret;
-       unsigned long timeout;
-
-       /* Set all ports to the disabled state. */
-       for (i = 0; i < 11; i++) {
-               ret = REG_READ(REG_PORT(i), 0x04);
-               REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
-       }
-
-       /* Wait for transmit queues to drain. */
-       usleep_range(2000, 4000);
-
-       /* Reset the switch. */
-       REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
-
-       /* Wait up to one second for reset to complete. */
-       timeout = jiffies + 1 * HZ;
-       while (time_before(jiffies, timeout)) {
-               ret = REG_READ(REG_GLOBAL, 0x00);
-               if ((ret & 0xc800) == 0xc800)
-                       break;
-
-               usleep_range(1000, 2000);
-       }
-       if (time_after(jiffies, timeout))
-               return -ETIMEDOUT;
-
-       return 0;
-}
-
 static int mv88e6131_setup_global(struct dsa_switch *ds)
 {
        int ret;
@@ -174,7 +135,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
         * (100 Mb/s on 6085) full duplex.
         */
        if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
-               if (ps->id == ID_6085)
+               if (ps->id == PORT_SWITCH_ID_6085)
                        REG_WRITE(addr, 0x01, 0x003d); /* 100 Mb/s */
                else
                        REG_WRITE(addr, 0x01, 0x003e); /* 1000 Mb/s */
@@ -201,35 +162,13 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
                /* On 6085, unknown multicast forward is controlled
                 * here rather than in Port Control 2 register.
                 */
-               if (ps->id == ID_6085)
+               if (ps->id == PORT_SWITCH_ID_6085)
                        val |= 0x0008;
        }
        if (ds->dsa_port_mask & (1 << p))
                val |= 0x0100;
        REG_WRITE(addr, 0x04, val);
 
-       /* Port Control 1: disable trunking.  Also, if this is the
-        * CPU port, enable learn messages to be sent to this port.
-        */
-       REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000);
-
-       /* Port based VLAN map: give each port its own address
-        * database, allow the CPU port to talk to each of the 'real'
-        * ports, and allow each of the 'real' ports to only talk to
-        * the upstream port.
-        */
-       val = (p & 0xf) << 12;
-       if (dsa_is_cpu_port(ds, p))
-               val |= ds->phys_port_mask;
-       else
-               val |= 1 << dsa_upstream_port(ds);
-       REG_WRITE(addr, 0x06, val);
-
-       /* Default VLAN ID and priority: don't set a default VLAN
-        * ID, and set the default packet priority to zero.
-        */
-       REG_WRITE(addr, 0x07, 0x0000);
-
        /* Port Control 2: don't force a good FCS, don't use
         * VLAN-based, source address-based or destination
         * address-based priority overrides, don't let the switch
@@ -242,7 +181,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
         * If this is the upstream port for this switch, enable
         * forwarding of unknown multicast addresses.
         */
-       if (ps->id == ID_6085)
+       if (ps->id == PORT_SWITCH_ID_6085)
                /* on 6085, bits 3:0 are reserved, bit 6 control ARP
                 * mirroring, and multicast forward is handled in
                 * Port Control register.
@@ -278,7 +217,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
         */
        REG_WRITE(addr, 0x19, 0x7654);
 
-       return 0;
+       return mv88e6xxx_setup_port_common(ds, p);
 }
 
 static int mv88e6131_setup(struct dsa_switch *ds)
@@ -287,13 +226,28 @@ static int mv88e6131_setup(struct dsa_switch *ds)
        int i;
        int ret;
 
-       mutex_init(&ps->smi_mutex);
+       ret = mv88e6xxx_setup_common(ds);
+       if (ret < 0)
+               return ret;
+
        mv88e6xxx_ppu_state_init(ds);
-       mutex_init(&ps->stats_mutex);
 
-       ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0;
+       switch (ps->id) {
+       case PORT_SWITCH_ID_6085:
+               ps->num_ports = 10;
+               break;
+       case PORT_SWITCH_ID_6095:
+               ps->num_ports = 11;
+               break;
+       case PORT_SWITCH_ID_6131:
+       case PORT_SWITCH_ID_6131_B2:
+               ps->num_ports = 8;
+               break;
+       default:
+               return -ENODEV;
+       }
 
-       ret = mv88e6131_switch_reset(ds);
+       ret = mv88e6xxx_switch_reset(ds, false);
        if (ret < 0)
                return ret;
 
@@ -303,7 +257,7 @@ static int mv88e6131_setup(struct dsa_switch *ds)
        if (ret < 0)
                return ret;
 
-       for (i = 0; i < 11; i++) {
+       for (i = 0; i < ps->num_ports; i++) {
                ret = mv88e6131_setup_port(ds, i);
                if (ret < 0)
                        return ret;
@@ -312,17 +266,24 @@ static int mv88e6131_setup(struct dsa_switch *ds)
        return 0;
 }
 
-static int mv88e6131_port_to_phy_addr(int port)
+static int mv88e6131_port_to_phy_addr(struct dsa_switch *ds, int port)
 {
-       if (port >= 0 && port <= 11)
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+       if (port >= 0 && port < ps->num_ports)
                return port;
-       return -1;
+
+       return -EINVAL;
 }
 
 static int
 mv88e6131_phy_read(struct dsa_switch *ds, int port, int regnum)
 {
-       int addr = mv88e6131_port_to_phy_addr(port);
+       int addr = mv88e6131_port_to_phy_addr(ds, port);
+
+       if (addr < 0)
+               return addr;
+
        return mv88e6xxx_phy_read_ppu(ds, addr, regnum);
 }
 
@@ -330,61 +291,12 @@ static int
 mv88e6131_phy_write(struct dsa_switch *ds,
                              int port, int regnum, u16 val)
 {
-       int addr = mv88e6131_port_to_phy_addr(port);
-       return mv88e6xxx_phy_write_ppu(ds, addr, regnum, val);
-}
-
-static struct mv88e6xxx_hw_stat mv88e6131_hw_stats[] = {
-       { "in_good_octets", 8, 0x00, },
-       { "in_bad_octets", 4, 0x02, },
-       { "in_unicast", 4, 0x04, },
-       { "in_broadcasts", 4, 0x06, },
-       { "in_multicasts", 4, 0x07, },
-       { "in_pause", 4, 0x16, },
-       { "in_undersize", 4, 0x18, },
-       { "in_fragments", 4, 0x19, },
-       { "in_oversize", 4, 0x1a, },
-       { "in_jabber", 4, 0x1b, },
-       { "in_rx_error", 4, 0x1c, },
-       { "in_fcs_error", 4, 0x1d, },
-       { "out_octets", 8, 0x0e, },
-       { "out_unicast", 4, 0x10, },
-       { "out_broadcasts", 4, 0x13, },
-       { "out_multicasts", 4, 0x12, },
-       { "out_pause", 4, 0x15, },
-       { "excessive", 4, 0x11, },
-       { "collisions", 4, 0x1e, },
-       { "deferred", 4, 0x05, },
-       { "single", 4, 0x14, },
-       { "multiple", 4, 0x17, },
-       { "out_fcs_error", 4, 0x03, },
-       { "late", 4, 0x1f, },
-       { "hist_64bytes", 4, 0x08, },
-       { "hist_65_127bytes", 4, 0x09, },
-       { "hist_128_255bytes", 4, 0x0a, },
-       { "hist_256_511bytes", 4, 0x0b, },
-       { "hist_512_1023bytes", 4, 0x0c, },
-       { "hist_1024_max_bytes", 4, 0x0d, },
-};
-
-static void
-mv88e6131_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
-{
-       mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6131_hw_stats),
-                             mv88e6131_hw_stats, port, data);
-}
+       int addr = mv88e6131_port_to_phy_addr(ds, port);
 
-static void
-mv88e6131_get_ethtool_stats(struct dsa_switch *ds,
-                                 int port, uint64_t *data)
-{
-       mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6131_hw_stats),
-                                   mv88e6131_hw_stats, port, data);
-}
+       if (addr < 0)
+               return addr;
 
-static int mv88e6131_get_sset_count(struct dsa_switch *ds)
-{
-       return ARRAY_SIZE(mv88e6131_hw_stats);
+       return mv88e6xxx_phy_write_ppu(ds, addr, regnum, val);
 }
 
 struct dsa_switch_driver mv88e6131_switch_driver = {
@@ -396,9 +308,9 @@ struct dsa_switch_driver mv88e6131_switch_driver = {
        .phy_read               = mv88e6131_phy_read,
        .phy_write              = mv88e6131_phy_write,
        .poll_link              = mv88e6xxx_poll_link,
-       .get_strings            = mv88e6131_get_strings,
-       .get_ethtool_stats      = mv88e6131_get_ethtool_stats,
-       .get_sset_count         = mv88e6131_get_sset_count,
+       .get_strings            = mv88e6xxx_get_strings,
+       .get_ethtool_stats      = mv88e6xxx_get_ethtool_stats,
+       .get_sset_count         = mv88e6xxx_get_sset_count,
 };
 
 MODULE_ALIAS("platform:mv88e6085");
index 18cfead83dc94851d4bedc0cfca6f8f0eaf43797..9104efea0e3e8289803c53348e79eded3d80e50c 100644 (file)
 #include <net/dsa.h>
 #include "mv88e6xxx.h"
 
-/* Switch product IDs */
-#define ID_6171        0x1710
-#define ID_6172        0x1720
-
 static char *mv88e6171_probe(struct device *host_dev, int sw_addr)
 {
        struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev);
@@ -29,64 +25,20 @@ static char *mv88e6171_probe(struct device *host_dev, int sw_addr)
        if (bus == NULL)
                return NULL;
 
-       ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
+       ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
        if (ret >= 0) {
-               if ((ret & 0xfff0) == ID_6171)
+               if ((ret & 0xfff0) == PORT_SWITCH_ID_6171)
                        return "Marvell 88E6171";
-               if ((ret & 0xfff0) == ID_6172)
+               if ((ret & 0xfff0) == PORT_SWITCH_ID_6172)
                        return "Marvell 88E6172";
        }
 
        return NULL;
 }
 
-static int mv88e6171_switch_reset(struct dsa_switch *ds)
-{
-       int i;
-       int ret;
-       unsigned long timeout;
-
-       /* Set all ports to the disabled state. */
-       for (i = 0; i < 8; i++) {
-               ret = REG_READ(REG_PORT(i), 0x04);
-               REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
-       }
-
-       /* Wait for transmit queues to drain. */
-       usleep_range(2000, 4000);
-
-       /* Reset the switch. Keep PPU active.  The PPU needs to be
-        * active to support indirect phy register accesses through
-        * global registers 0x18 and 0x19.
-        */
-       REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
-
-       /* Wait up to one second for reset to complete. */
-       timeout = jiffies + 1 * HZ;
-       while (time_before(jiffies, timeout)) {
-               ret = REG_READ(REG_GLOBAL, 0x00);
-               if ((ret & 0xc800) == 0xc800)
-                       break;
-
-               usleep_range(1000, 2000);
-       }
-       if (time_after(jiffies, timeout))
-               return -ETIMEDOUT;
-
-       /* Enable ports not under DSA, e.g. WAN port */
-       for (i = 0; i < 8; i++) {
-               if (dsa_is_cpu_port(ds, i) || ds->phys_port_mask & (1 << i))
-                       continue;
-
-               ret = REG_READ(REG_PORT(i), 0x04);
-               REG_WRITE(REG_PORT(i), 0x04, ret | 0x03);
-       }
-
-       return 0;
-}
-
 static int mv88e6171_setup_global(struct dsa_switch *ds)
 {
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int ret;
        int i;
 
@@ -151,7 +103,7 @@ static int mv88e6171_setup_global(struct dsa_switch *ds)
        }
 
        /* Clear all trunk masks. */
-       for (i = 0; i < 8; i++)
+       for (i = 0; i < ps->num_ports; i++)
                REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0xff);
 
        /* Clear all trunk mappings. */
@@ -274,6 +226,7 @@ static int mv88e6171_setup_port(struct dsa_switch *ds, int p)
 
 static int mv88e6171_setup(struct dsa_switch *ds)
 {
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int i;
        int ret;
 
@@ -281,7 +234,9 @@ static int mv88e6171_setup(struct dsa_switch *ds)
        if (ret < 0)
                return ret;
 
-       ret = mv88e6171_switch_reset(ds);
+       ps->num_ports = 7;
+
+       ret = mv88e6xxx_switch_reset(ds, true);
        if (ret < 0)
                return ret;
 
@@ -291,7 +246,7 @@ static int mv88e6171_setup(struct dsa_switch *ds)
        if (ret < 0)
                return ret;
 
-       for (i = 0; i < 8; i++) {
+       for (i = 0; i < ps->num_ports; i++) {
                if (!(dsa_is_cpu_port(ds, i) || ds->phys_port_mask & (1 << i)))
                        continue;
 
@@ -303,99 +258,12 @@ static int mv88e6171_setup(struct dsa_switch *ds)
        return 0;
 }
 
-static int mv88e6171_port_to_phy_addr(int port)
-{
-       if (port >= 0 && port <= 4)
-               return port;
-       return -1;
-}
-
-static int
-mv88e6171_phy_read(struct dsa_switch *ds, int port, int regnum)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int addr = mv88e6171_port_to_phy_addr(port);
-       int ret;
-
-       mutex_lock(&ps->phy_mutex);
-       ret = mv88e6xxx_phy_read_indirect(ds, addr, regnum);
-       mutex_unlock(&ps->phy_mutex);
-       return ret;
-}
-
-static int
-mv88e6171_phy_write(struct dsa_switch *ds,
-                   int port, int regnum, u16 val)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int addr = mv88e6171_port_to_phy_addr(port);
-       int ret;
-
-       mutex_lock(&ps->phy_mutex);
-       ret = mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
-       mutex_unlock(&ps->phy_mutex);
-       return ret;
-}
-
-static struct mv88e6xxx_hw_stat mv88e6171_hw_stats[] = {
-       { "in_good_octets", 8, 0x00, },
-       { "in_bad_octets", 4, 0x02, },
-       { "in_unicast", 4, 0x04, },
-       { "in_broadcasts", 4, 0x06, },
-       { "in_multicasts", 4, 0x07, },
-       { "in_pause", 4, 0x16, },
-       { "in_undersize", 4, 0x18, },
-       { "in_fragments", 4, 0x19, },
-       { "in_oversize", 4, 0x1a, },
-       { "in_jabber", 4, 0x1b, },
-       { "in_rx_error", 4, 0x1c, },
-       { "in_fcs_error", 4, 0x1d, },
-       { "out_octets", 8, 0x0e, },
-       { "out_unicast", 4, 0x10, },
-       { "out_broadcasts", 4, 0x13, },
-       { "out_multicasts", 4, 0x12, },
-       { "out_pause", 4, 0x15, },
-       { "excessive", 4, 0x11, },
-       { "collisions", 4, 0x1e, },
-       { "deferred", 4, 0x05, },
-       { "single", 4, 0x14, },
-       { "multiple", 4, 0x17, },
-       { "out_fcs_error", 4, 0x03, },
-       { "late", 4, 0x1f, },
-       { "hist_64bytes", 4, 0x08, },
-       { "hist_65_127bytes", 4, 0x09, },
-       { "hist_128_255bytes", 4, 0x0a, },
-       { "hist_256_511bytes", 4, 0x0b, },
-       { "hist_512_1023bytes", 4, 0x0c, },
-       { "hist_1024_max_bytes", 4, 0x0d, },
-};
-
-static void
-mv88e6171_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
-{
-       mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6171_hw_stats),
-                             mv88e6171_hw_stats, port, data);
-}
-
-static void
-mv88e6171_get_ethtool_stats(struct dsa_switch *ds,
-                           int port, uint64_t *data)
-{
-       mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6171_hw_stats),
-                                   mv88e6171_hw_stats, port, data);
-}
-
-static int mv88e6171_get_sset_count(struct dsa_switch *ds)
-{
-       return ARRAY_SIZE(mv88e6171_hw_stats);
-}
-
 static int mv88e6171_get_eee(struct dsa_switch *ds, int port,
                             struct ethtool_eee *e)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 
-       if (ps->id == ID_6172)
+       if (ps->id == PORT_SWITCH_ID_6172)
                return mv88e6xxx_get_eee(ds, port, e);
 
        return -EOPNOTSUPP;
@@ -406,7 +274,7 @@ static int mv88e6171_set_eee(struct dsa_switch *ds, int port,
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 
-       if (ps->id == ID_6172)
+       if (ps->id == PORT_SWITCH_ID_6172)
                return mv88e6xxx_set_eee(ds, port, phydev, e);
 
        return -EOPNOTSUPP;
@@ -418,12 +286,12 @@ struct dsa_switch_driver mv88e6171_switch_driver = {
        .probe                  = mv88e6171_probe,
        .setup                  = mv88e6171_setup,
        .set_addr               = mv88e6xxx_set_addr_indirect,
-       .phy_read               = mv88e6171_phy_read,
-       .phy_write              = mv88e6171_phy_write,
+       .phy_read               = mv88e6xxx_phy_read_indirect,
+       .phy_write              = mv88e6xxx_phy_write_indirect,
        .poll_link              = mv88e6xxx_poll_link,
-       .get_strings            = mv88e6171_get_strings,
-       .get_ethtool_stats      = mv88e6171_get_ethtool_stats,
-       .get_sset_count         = mv88e6171_get_sset_count,
+       .get_strings            = mv88e6xxx_get_strings,
+       .get_ethtool_stats      = mv88e6xxx_get_ethtool_stats,
+       .get_sset_count         = mv88e6xxx_get_sset_count,
        .set_eee                = mv88e6171_set_eee,
        .get_eee                = mv88e6171_get_eee,
 #ifdef CONFIG_NET_DSA_HWMON
index 41fe3a6a72d1fa213239f47c68cec5bf34bb92cb..126c11b81e756ec232106de72583904638ccb024 100644 (file)
@@ -30,58 +30,24 @@ static char *mv88e6352_probe(struct device *host_dev, int sw_addr)
        if (bus == NULL)
                return NULL;
 
-       ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
+       ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
        if (ret >= 0) {
-               if ((ret & 0xfff0) == 0x1760)
+               if ((ret & 0xfff0) == PORT_SWITCH_ID_6176)
                        return "Marvell 88E6176";
-               if (ret == 0x3521)
+               if (ret == PORT_SWITCH_ID_6352_A0)
                        return "Marvell 88E6352 (A0)";
-               if (ret == 0x3522)
+               if (ret == PORT_SWITCH_ID_6352_A1)
                        return "Marvell 88E6352 (A1)";
-               if ((ret & 0xfff0) == 0x3520)
+               if ((ret & 0xfff0) == PORT_SWITCH_ID_6352)
                        return "Marvell 88E6352";
        }
 
        return NULL;
 }
 
-static int mv88e6352_switch_reset(struct dsa_switch *ds)
-{
-       unsigned long timeout;
-       int ret;
-       int i;
-
-       /* Set all ports to the disabled state. */
-       for (i = 0; i < 7; i++) {
-               ret = REG_READ(REG_PORT(i), 0x04);
-               REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
-       }
-
-       /* Wait for transmit queues to drain. */
-       usleep_range(2000, 4000);
-
-       /* Reset the switch. Keep PPU active (bit 14, undocumented).
-        * The PPU needs to be active to support indirect phy register
-        * accesses through global registers 0x18 and 0x19.
-        */
-       REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
-
-       /* Wait up to one second for reset to complete. */
-       timeout = jiffies + 1 * HZ;
-       while (time_before(jiffies, timeout)) {
-               ret = REG_READ(REG_GLOBAL, 0x00);
-               if ((ret & 0x8800) == 0x8800)
-                       break;
-               usleep_range(1000, 2000);
-       }
-       if (time_after(jiffies, timeout))
-               return -ETIMEDOUT;
-
-       return 0;
-}
-
 static int mv88e6352_setup_global(struct dsa_switch *ds)
 {
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int ret;
        int i;
 
@@ -152,7 +118,7 @@ static int mv88e6352_setup_global(struct dsa_switch *ds)
        /* Disable ingress rate limiting by resetting all ingress
         * rate limit registers to their initial state.
         */
-       for (i = 0; i < 7; i++)
+       for (i = 0; i < ps->num_ports; i++)
                REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8));
 
        /* Initialise cross-chip port VLAN table to reset defaults. */
@@ -264,48 +230,13 @@ static int mv88e6352_setup_port(struct dsa_switch *ds, int p)
 
 #ifdef CONFIG_NET_DSA_HWMON
 
-static int mv88e6352_phy_page_read(struct dsa_switch *ds,
-                                  int port, int page, int reg)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int ret;
-
-       mutex_lock(&ps->phy_mutex);
-       ret = mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
-       if (ret < 0)
-               goto error;
-       ret = mv88e6xxx_phy_read_indirect(ds, port, reg);
-error:
-       mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
-       mutex_unlock(&ps->phy_mutex);
-       return ret;
-}
-
-static int mv88e6352_phy_page_write(struct dsa_switch *ds,
-                                   int port, int page, int reg, int val)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int ret;
-
-       mutex_lock(&ps->phy_mutex);
-       ret = mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
-       if (ret < 0)
-               goto error;
-
-       ret = mv88e6xxx_phy_write_indirect(ds, port, reg, val);
-error:
-       mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
-       mutex_unlock(&ps->phy_mutex);
-       return ret;
-}
-
 static int mv88e6352_get_temp(struct dsa_switch *ds, int *temp)
 {
        int ret;
 
        *temp = 0;
 
-       ret = mv88e6352_phy_page_read(ds, 0, 6, 27);
+       ret = mv88e6xxx_phy_page_read(ds, 0, 6, 27);
        if (ret < 0)
                return ret;
 
@@ -320,7 +251,7 @@ static int mv88e6352_get_temp_limit(struct dsa_switch *ds, int *temp)
 
        *temp = 0;
 
-       ret = mv88e6352_phy_page_read(ds, 0, 6, 26);
+       ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26);
        if (ret < 0)
                return ret;
 
@@ -333,11 +264,11 @@ static int mv88e6352_set_temp_limit(struct dsa_switch *ds, int temp)
 {
        int ret;
 
-       ret = mv88e6352_phy_page_read(ds, 0, 6, 26);
+       ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26);
        if (ret < 0)
                return ret;
        temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
-       return mv88e6352_phy_page_write(ds, 0, 6, 26,
+       return mv88e6xxx_phy_page_write(ds, 0, 6, 26,
                                        (ret & 0xe0ff) | (temp << 8));
 }
 
@@ -347,7 +278,7 @@ static int mv88e6352_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
 
        *alarm = false;
 
-       ret = mv88e6352_phy_page_read(ds, 0, 6, 26);
+       ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26);
        if (ret < 0)
                return ret;
 
@@ -367,9 +298,11 @@ static int mv88e6352_setup(struct dsa_switch *ds)
        if (ret < 0)
                return ret;
 
+       ps->num_ports = 7;
+
        mutex_init(&ps->eeprom_mutex);
 
-       ret = mv88e6352_switch_reset(ds);
+       ret = mv88e6xxx_switch_reset(ds, true);
        if (ret < 0)
                return ret;
 
@@ -379,7 +312,7 @@ static int mv88e6352_setup(struct dsa_switch *ds)
        if (ret < 0)
                return ret;
 
-       for (i = 0; i < 7; i++) {
+       for (i = 0; i < ps->num_ports; i++) {
                ret = mv88e6352_setup_port(ds, i);
                if (ret < 0)
                        return ret;
@@ -388,83 +321,6 @@ static int mv88e6352_setup(struct dsa_switch *ds)
        return 0;
 }
 
-static int mv88e6352_port_to_phy_addr(int port)
-{
-       if (port >= 0 && port <= 4)
-               return port;
-       return -EINVAL;
-}
-
-static int
-mv88e6352_phy_read(struct dsa_switch *ds, int port, int regnum)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int addr = mv88e6352_port_to_phy_addr(port);
-       int ret;
-
-       if (addr < 0)
-               return addr;
-
-       mutex_lock(&ps->phy_mutex);
-       ret = mv88e6xxx_phy_read_indirect(ds, addr, regnum);
-       mutex_unlock(&ps->phy_mutex);
-
-       return ret;
-}
-
-static int
-mv88e6352_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int addr = mv88e6352_port_to_phy_addr(port);
-       int ret;
-
-       if (addr < 0)
-               return addr;
-
-       mutex_lock(&ps->phy_mutex);
-       ret = mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
-       mutex_unlock(&ps->phy_mutex);
-
-       return ret;
-}
-
-static struct mv88e6xxx_hw_stat mv88e6352_hw_stats[] = {
-       { "in_good_octets", 8, 0x00, },
-       { "in_bad_octets", 4, 0x02, },
-       { "in_unicast", 4, 0x04, },
-       { "in_broadcasts", 4, 0x06, },
-       { "in_multicasts", 4, 0x07, },
-       { "in_pause", 4, 0x16, },
-       { "in_undersize", 4, 0x18, },
-       { "in_fragments", 4, 0x19, },
-       { "in_oversize", 4, 0x1a, },
-       { "in_jabber", 4, 0x1b, },
-       { "in_rx_error", 4, 0x1c, },
-       { "in_fcs_error", 4, 0x1d, },
-       { "out_octets", 8, 0x0e, },
-       { "out_unicast", 4, 0x10, },
-       { "out_broadcasts", 4, 0x13, },
-       { "out_multicasts", 4, 0x12, },
-       { "out_pause", 4, 0x15, },
-       { "excessive", 4, 0x11, },
-       { "collisions", 4, 0x1e, },
-       { "deferred", 4, 0x05, },
-       { "single", 4, 0x14, },
-       { "multiple", 4, 0x17, },
-       { "out_fcs_error", 4, 0x03, },
-       { "late", 4, 0x1f, },
-       { "hist_64bytes", 4, 0x08, },
-       { "hist_65_127bytes", 4, 0x09, },
-       { "hist_128_255bytes", 4, 0x0a, },
-       { "hist_256_511bytes", 4, 0x0b, },
-       { "hist_512_1023bytes", 4, 0x0c, },
-       { "hist_1024_max_bytes", 4, 0x0d, },
-       { "sw_in_discards", 4, 0x110, },
-       { "sw_in_filtered", 2, 0x112, },
-       { "sw_out_filtered", 2, 0x113, },
-};
-
 static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
@@ -663,37 +519,18 @@ static int mv88e6352_set_eeprom(struct dsa_switch *ds,
        return 0;
 }
 
-static void
-mv88e6352_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
-{
-       mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6352_hw_stats),
-                             mv88e6352_hw_stats, port, data);
-}
-
-static void
-mv88e6352_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
-{
-       mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6352_hw_stats),
-                                   mv88e6352_hw_stats, port, data);
-}
-
-static int mv88e6352_get_sset_count(struct dsa_switch *ds)
-{
-       return ARRAY_SIZE(mv88e6352_hw_stats);
-}
-
 struct dsa_switch_driver mv88e6352_switch_driver = {
        .tag_protocol           = DSA_TAG_PROTO_EDSA,
        .priv_size              = sizeof(struct mv88e6xxx_priv_state),
        .probe                  = mv88e6352_probe,
        .setup                  = mv88e6352_setup,
        .set_addr               = mv88e6xxx_set_addr_indirect,
-       .phy_read               = mv88e6352_phy_read,
-       .phy_write              = mv88e6352_phy_write,
+       .phy_read               = mv88e6xxx_phy_read_indirect,
+       .phy_write              = mv88e6xxx_phy_write_indirect,
        .poll_link              = mv88e6xxx_poll_link,
-       .get_strings            = mv88e6352_get_strings,
-       .get_ethtool_stats      = mv88e6352_get_ethtool_stats,
-       .get_sset_count         = mv88e6352_get_sset_count,
+       .get_strings            = mv88e6xxx_get_strings,
+       .get_ethtool_stats      = mv88e6xxx_get_ethtool_stats,
+       .get_sset_count         = mv88e6xxx_get_sset_count,
        .set_eee                = mv88e6xxx_set_eee,
        .get_eee                = mv88e6xxx_get_eee,
 #ifdef CONFIG_NET_DSA_HWMON
index 13572cc24c6dc42a308e4bc0b660f3c0f673e48d..fc8d3b6ffe8e0b35e64ed2aaa78b342a1b86f66a 100644 (file)
@@ -33,11 +33,11 @@ static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
        int i;
 
        for (i = 0; i < 16; i++) {
-               ret = mdiobus_read(bus, sw_addr, 0);
+               ret = mdiobus_read(bus, sw_addr, SMI_CMD);
                if (ret < 0)
                        return ret;
 
-               if ((ret & 0x8000) == 0)
+               if ((ret & SMI_CMD_BUSY) == 0)
                        return 0;
        }
 
@@ -57,7 +57,8 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
                return ret;
 
        /* Transmit the read command. */
-       ret = mdiobus_write(bus, sw_addr, 0, 0x9800 | (addr << 5) | reg);
+       ret = mdiobus_write(bus, sw_addr, SMI_CMD,
+                           SMI_CMD_OP_22_READ | (addr << 5) | reg);
        if (ret < 0)
                return ret;
 
@@ -67,7 +68,7 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
                return ret;
 
        /* Read the data. */
-       ret = mdiobus_read(bus, sw_addr, 1);
+       ret = mdiobus_read(bus, sw_addr, SMI_DATA);
        if (ret < 0)
                return ret;
 
@@ -119,12 +120,13 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
                return ret;
 
        /* Transmit the data to write. */
-       ret = mdiobus_write(bus, sw_addr, 1, val);
+       ret = mdiobus_write(bus, sw_addr, SMI_DATA, val);
        if (ret < 0)
                return ret;
 
        /* Transmit the write command. */
-       ret = mdiobus_write(bus, sw_addr, 0, 0x9400 | (addr << 5) | reg);
+       ret = mdiobus_write(bus, sw_addr, SMI_CMD,
+                           SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
        if (ret < 0)
                return ret;
 
@@ -166,26 +168,26 @@ int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
 int mv88e6xxx_config_prio(struct dsa_switch *ds)
 {
        /* Configure the IP ToS mapping registers. */
-       REG_WRITE(REG_GLOBAL, 0x10, 0x0000);
-       REG_WRITE(REG_GLOBAL, 0x11, 0x0000);
-       REG_WRITE(REG_GLOBAL, 0x12, 0x5555);
-       REG_WRITE(REG_GLOBAL, 0x13, 0x5555);
-       REG_WRITE(REG_GLOBAL, 0x14, 0xaaaa);
-       REG_WRITE(REG_GLOBAL, 0x15, 0xaaaa);
-       REG_WRITE(REG_GLOBAL, 0x16, 0xffff);
-       REG_WRITE(REG_GLOBAL, 0x17, 0xffff);
+       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
+       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
+       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
+       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
+       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
+       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
+       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
+       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
 
        /* Configure the IEEE 802.1p priority mapping register. */
-       REG_WRITE(REG_GLOBAL, 0x18, 0xfa41);
+       REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
 
        return 0;
 }
 
 int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
 {
-       REG_WRITE(REG_GLOBAL, 0x01, (addr[0] << 8) | addr[1]);
-       REG_WRITE(REG_GLOBAL, 0x02, (addr[2] << 8) | addr[3]);
-       REG_WRITE(REG_GLOBAL, 0x03, (addr[4] << 8) | addr[5]);
+       REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
+       REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
+       REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
 
        return 0;
 }
@@ -199,12 +201,13 @@ int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
                int j;
 
                /* Write the MAC address byte. */
-               REG_WRITE(REG_GLOBAL2, 0x0d, 0x8000 | (i << 8) | addr[i]);
+               REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
+                         GLOBAL2_SWITCH_MAC_BUSY | (i << 8) | addr[i]);
 
                /* Wait for the write to complete. */
                for (j = 0; j < 16; j++) {
-                       ret = REG_READ(REG_GLOBAL2, 0x0d);
-                       if ((ret & 0x8000) == 0)
+                       ret = REG_READ(REG_GLOBAL2, GLOBAL2_SWITCH_MAC);
+                       if ((ret & GLOBAL2_SWITCH_MAC_BUSY) == 0)
                                break;
                }
                if (j == 16)
@@ -214,14 +217,17 @@ int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
        return 0;
 }
 
-int mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
+/* Must be called with phy mutex held */
+static int _mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
 {
        if (addr >= 0)
                return mv88e6xxx_reg_read(ds, addr, regnum);
        return 0xffff;
 }
 
-int mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum, u16 val)
+/* Must be called with phy mutex held */
+static int _mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum,
+                               u16 val)
 {
        if (addr >= 0)
                return mv88e6xxx_reg_write(ds, addr, regnum, val);
@@ -234,14 +240,16 @@ static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
        int ret;
        unsigned long timeout;
 
-       ret = REG_READ(REG_GLOBAL, 0x04);
-       REG_WRITE(REG_GLOBAL, 0x04, ret & ~0x4000);
+       ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
+       REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
+                 ret & ~GLOBAL_CONTROL_PPU_ENABLE);
 
        timeout = jiffies + 1 * HZ;
        while (time_before(jiffies, timeout)) {
-               ret = REG_READ(REG_GLOBAL, 0x00);
+               ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
                usleep_range(1000, 2000);
-               if ((ret & 0xc000) != 0xc000)
+               if ((ret & GLOBAL_STATUS_PPU_MASK) !=
+                   GLOBAL_STATUS_PPU_POLLING)
                        return 0;
        }
 
@@ -253,14 +261,15 @@ static int mv88e6xxx_ppu_enable(struct dsa_switch *ds)
        int ret;
        unsigned long timeout;
 
-       ret = REG_READ(REG_GLOBAL, 0x04);
-       REG_WRITE(REG_GLOBAL, 0x04, ret | 0x4000);
+       ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
+       REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, ret | GLOBAL_CONTROL_PPU_ENABLE);
 
        timeout = jiffies + 1 * HZ;
        while (time_before(jiffies, timeout)) {
-               ret = REG_READ(REG_GLOBAL, 0x00);
+               ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
                usleep_range(1000, 2000);
-               if ((ret & 0xc000) == 0xc000)
+               if ((ret & GLOBAL_STATUS_PPU_MASK) ==
+                   GLOBAL_STATUS_PPU_POLLING)
                        return 0;
        }
 
@@ -381,11 +390,12 @@ void mv88e6xxx_poll_link(struct dsa_switch *ds)
 
                link = 0;
                if (dev->flags & IFF_UP) {
-                       port_status = mv88e6xxx_reg_read(ds, REG_PORT(i), 0x00);
+                       port_status = mv88e6xxx_reg_read(ds, REG_PORT(i),
+                                                        PORT_STATUS);
                        if (port_status < 0)
                                continue;
 
-                       link = !!(port_status & 0x0800);
+                       link = !!(port_status & PORT_STATUS_LINK);
                }
 
                if (!link) {
@@ -396,22 +406,22 @@ void mv88e6xxx_poll_link(struct dsa_switch *ds)
                        continue;
                }
 
-               switch (port_status & 0x0300) {
-               case 0x0000:
+               switch (port_status & PORT_STATUS_SPEED_MASK) {
+               case PORT_STATUS_SPEED_10:
                        speed = 10;
                        break;
-               case 0x0100:
+               case PORT_STATUS_SPEED_100:
                        speed = 100;
                        break;
-               case 0x0200:
+               case PORT_STATUS_SPEED_1000:
                        speed = 1000;
                        break;
                default:
                        speed = -1;
                        break;
                }
-               duplex = (port_status & 0x0400) ? 1 : 0;
-               fc = (port_status & 0x8000) ? 1 : 0;
+               duplex = (port_status & PORT_STATUS_DUPLEX) ? 1 : 0;
+               fc = (port_status & PORT_STATUS_PAUSE_EN) ? 1 : 0;
 
                if (!netif_carrier_ok(dev)) {
                        netdev_info(dev,
@@ -424,14 +434,27 @@ void mv88e6xxx_poll_link(struct dsa_switch *ds)
        }
 }
 
+static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+       switch (ps->id) {
+       case PORT_SWITCH_ID_6352:
+       case PORT_SWITCH_ID_6172:
+       case PORT_SWITCH_ID_6176:
+               return true;
+       }
+       return false;
+}
+
 static int mv88e6xxx_stats_wait(struct dsa_switch *ds)
 {
        int ret;
        int i;
 
        for (i = 0; i < 10; i++) {
-               ret = REG_READ(REG_GLOBAL, 0x1d);
-               if ((ret & 0x8000) == 0)
+               ret = REG_READ(REG_GLOBAL, GLOBAL_STATS_OP);
+               if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
                        return 0;
        }
 
@@ -442,8 +465,13 @@ static int mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
 {
        int ret;
 
+       if (mv88e6xxx_6352_family(ds))
+               port = (port + 1) << 5;
+
        /* Snapshot the hardware statistics counters for this port. */
-       REG_WRITE(REG_GLOBAL, 0x1d, 0xdc00 | port);
+       REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP,
+                 GLOBAL_STATS_OP_CAPTURE_PORT |
+                 GLOBAL_STATS_OP_HIST_RX_TX | port);
 
        /* Wait for the snapshotting to complete. */
        ret = mv88e6xxx_stats_wait(ds);
@@ -460,7 +488,9 @@ static void mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
 
        *val = 0;
 
-       ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x1d, 0xcc00 | stat);
+       ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
+                                 GLOBAL_STATS_OP_READ_CAPTURED |
+                                 GLOBAL_STATS_OP_HIST_RX_TX | stat);
        if (ret < 0)
                return;
 
@@ -468,22 +498,77 @@ static void mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
        if (ret < 0)
                return;
 
-       ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, 0x1e);
+       ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
        if (ret < 0)
                return;
 
        _val = ret << 16;
 
-       ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, 0x1f);
+       ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
        if (ret < 0)
                return;
 
        *val = _val | ret;
 }
 
-void mv88e6xxx_get_strings(struct dsa_switch *ds,
-                          int nr_stats, struct mv88e6xxx_hw_stat *stats,
-                          int port, uint8_t *data)
+static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
+       { "in_good_octets", 8, 0x00, },
+       { "in_bad_octets", 4, 0x02, },
+       { "in_unicast", 4, 0x04, },
+       { "in_broadcasts", 4, 0x06, },
+       { "in_multicasts", 4, 0x07, },
+       { "in_pause", 4, 0x16, },
+       { "in_undersize", 4, 0x18, },
+       { "in_fragments", 4, 0x19, },
+       { "in_oversize", 4, 0x1a, },
+       { "in_jabber", 4, 0x1b, },
+       { "in_rx_error", 4, 0x1c, },
+       { "in_fcs_error", 4, 0x1d, },
+       { "out_octets", 8, 0x0e, },
+       { "out_unicast", 4, 0x10, },
+       { "out_broadcasts", 4, 0x13, },
+       { "out_multicasts", 4, 0x12, },
+       { "out_pause", 4, 0x15, },
+       { "excessive", 4, 0x11, },
+       { "collisions", 4, 0x1e, },
+       { "deferred", 4, 0x05, },
+       { "single", 4, 0x14, },
+       { "multiple", 4, 0x17, },
+       { "out_fcs_error", 4, 0x03, },
+       { "late", 4, 0x1f, },
+       { "hist_64bytes", 4, 0x08, },
+       { "hist_65_127bytes", 4, 0x09, },
+       { "hist_128_255bytes", 4, 0x0a, },
+       { "hist_256_511bytes", 4, 0x0b, },
+       { "hist_512_1023bytes", 4, 0x0c, },
+       { "hist_1024_max_bytes", 4, 0x0d, },
+       /* Not all devices have the following counters */
+       { "sw_in_discards", 4, 0x110, },
+       { "sw_in_filtered", 2, 0x112, },
+       { "sw_out_filtered", 2, 0x113, },
+
+};
+
+static bool have_sw_in_discards(struct dsa_switch *ds)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+       switch (ps->id) {
+       case PORT_SWITCH_ID_6095: case PORT_SWITCH_ID_6161:
+       case PORT_SWITCH_ID_6165: case PORT_SWITCH_ID_6171:
+       case PORT_SWITCH_ID_6172: case PORT_SWITCH_ID_6176:
+       case PORT_SWITCH_ID_6182: case PORT_SWITCH_ID_6185:
+       case PORT_SWITCH_ID_6352:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static void _mv88e6xxx_get_strings(struct dsa_switch *ds,
+                                  int nr_stats,
+                                  struct mv88e6xxx_hw_stat *stats,
+                                  int port, uint8_t *data)
 {
        int i;
 
@@ -493,9 +578,10 @@ void mv88e6xxx_get_strings(struct dsa_switch *ds,
        }
 }
 
-void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
-                                int nr_stats, struct mv88e6xxx_hw_stat *stats,
-                                int port, uint64_t *data)
+static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
+                                        int nr_stats,
+                                        struct mv88e6xxx_hw_stat *stats,
+                                        int port, uint64_t *data)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int ret;
@@ -543,6 +629,39 @@ error:
        mutex_unlock(&ps->stats_mutex);
 }
 
+/* All the statistics in the table */
+void
+mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+{
+       if (have_sw_in_discards(ds))
+               _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
+                                      mv88e6xxx_hw_stats, port, data);
+       else
+               _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
+                                      mv88e6xxx_hw_stats, port, data);
+}
+
+int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
+{
+       if (have_sw_in_discards(ds))
+               return ARRAY_SIZE(mv88e6xxx_hw_stats);
+       return ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
+}
+
+void
+mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
+                           int port, uint64_t *data)
+{
+       if (have_sw_in_discards(ds))
+               _mv88e6xxx_get_ethtool_stats(
+                       ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
+                       mv88e6xxx_hw_stats, port, data);
+       else
+               _mv88e6xxx_get_ethtool_stats(
+                       ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
+                       mv88e6xxx_hw_stats, port, data);
+}
+
 int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
 {
        return 32 * sizeof(u16);
@@ -579,37 +698,37 @@ int  mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
 
        mutex_lock(&ps->phy_mutex);
 
-       ret = mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
+       ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
        if (ret < 0)
                goto error;
 
        /* Enable temperature sensor */
-       ret = mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+       ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
        if (ret < 0)
                goto error;
 
-       ret = mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
+       ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
        if (ret < 0)
                goto error;
 
        /* Wait for temperature to stabilize */
        usleep_range(10000, 12000);
 
-       val = mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+       val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
        if (val < 0) {
                ret = val;
                goto error;
        }
 
        /* Disable temperature sensor */
-       ret = mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
+       ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
        if (ret < 0)
                goto error;
 
        *temp = ((val & 0x1f) - 5) * 5;
 
 error:
-       mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
+       _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
        mutex_unlock(&ps->phy_mutex);
        return ret;
 }
@@ -633,17 +752,20 @@ static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
 
 int mv88e6xxx_phy_wait(struct dsa_switch *ds)
 {
-       return mv88e6xxx_wait(ds, REG_GLOBAL2, 0x18, 0x8000);
+       return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
+                             GLOBAL2_SMI_OP_BUSY);
 }
 
 int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
 {
-       return mv88e6xxx_wait(ds, REG_GLOBAL2, 0x14, 0x0800);
+       return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+                             GLOBAL2_EEPROM_OP_LOAD);
 }
 
 int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
 {
-       return mv88e6xxx_wait(ds, REG_GLOBAL2, 0x14, 0x8000);
+       return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+                             GLOBAL2_EEPROM_OP_BUSY);
 }
 
 /* Must be called with SMI lock held */
@@ -668,80 +790,87 @@ static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
 /* Must be called with SMI lock held */
 static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
 {
-       return _mv88e6xxx_wait(ds, REG_GLOBAL, 0x0b, ATU_BUSY);
+       return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_ATU_OP,
+                              GLOBAL_ATU_OP_BUSY);
 }
 
-int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum)
+/* Must be called with phy mutex held */
+static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
+                                       int regnum)
 {
        int ret;
 
-       REG_WRITE(REG_GLOBAL2, 0x18, 0x9800 | (addr << 5) | regnum);
+       REG_WRITE(REG_GLOBAL2, GLOBAL2_SMI_OP,
+                 GLOBAL2_SMI_OP_22_READ | (addr << 5) | regnum);
 
        ret = mv88e6xxx_phy_wait(ds);
        if (ret < 0)
                return ret;
 
-       return REG_READ(REG_GLOBAL2, 0x19);
+       return REG_READ(REG_GLOBAL2, GLOBAL2_SMI_DATA);
 }
 
-int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr, int regnum,
-                                u16 val)
+/* Must be called with phy mutex held */
+static int _mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr,
+                                        int regnum, u16 val)
 {
-       REG_WRITE(REG_GLOBAL2, 0x19, val);
-       REG_WRITE(REG_GLOBAL2, 0x18, 0x9400 | (addr << 5) | regnum);
+       REG_WRITE(REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
+       REG_WRITE(REG_GLOBAL2, GLOBAL2_SMI_OP,
+                 GLOBAL2_SMI_OP_22_WRITE | (addr << 5) | regnum);
 
        return mv88e6xxx_phy_wait(ds);
 }
 
 int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
 {
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int reg;
 
-       reg = mv88e6xxx_phy_read_indirect(ds, port, 16);
+       mutex_lock(&ps->phy_mutex);
+
+       reg = _mv88e6xxx_phy_read_indirect(ds, port, 16);
        if (reg < 0)
-               return -EOPNOTSUPP;
+               goto out;
 
        e->eee_enabled = !!(reg & 0x0200);
        e->tx_lpi_enabled = !!(reg & 0x0100);
 
-       reg = REG_READ(REG_PORT(port), 0);
-       e->eee_active = !!(reg & 0x0040);
-
-       return 0;
-}
-
-static int mv88e6xxx_eee_enable_set(struct dsa_switch *ds, int port,
-                                   bool eee_enabled, bool tx_lpi_enabled)
-{
-       int reg, nreg;
-
-       reg = mv88e6xxx_phy_read_indirect(ds, port, 16);
+       reg = mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
        if (reg < 0)
-               return reg;
-
-       nreg = reg & ~0x0300;
-       if (eee_enabled)
-               nreg |= 0x0200;
-       if (tx_lpi_enabled)
-               nreg |= 0x0100;
+               goto out;
 
-       if (nreg != reg)
-               return mv88e6xxx_phy_write_indirect(ds, port, 16, nreg);
+       e->eee_active = !!(reg & PORT_STATUS_EEE);
+       reg = 0;
 
-       return 0;
+out:
+       mutex_unlock(&ps->phy_mutex);
+       return reg;
 }
 
 int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
                      struct phy_device *phydev, struct ethtool_eee *e)
 {
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int reg;
        int ret;
 
-       ret = mv88e6xxx_eee_enable_set(ds, port, e->eee_enabled,
-                                      e->tx_lpi_enabled);
-       if (ret)
-               return -EOPNOTSUPP;
+       mutex_lock(&ps->phy_mutex);
 
-       return 0;
+       ret = _mv88e6xxx_phy_read_indirect(ds, port, 16);
+       if (ret < 0)
+               goto out;
+
+       reg = ret & ~0x0300;
+       if (e->eee_enabled)
+               reg |= 0x0200;
+       if (e->tx_lpi_enabled)
+               reg |= 0x0100;
+
+       ret = _mv88e6xxx_phy_write_indirect(ds, port, 16, reg);
+out:
+       mutex_unlock(&ps->phy_mutex);
+
+       return ret;
 }
 
 static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, int fid, u16 cmd)
@@ -752,7 +881,7 @@ static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, int fid, u16 cmd)
        if (ret < 0)
                return ret;
 
-       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x0b, cmd);
+       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
        if (ret < 0)
                return ret;
 
@@ -767,7 +896,7 @@ static int _mv88e6xxx_flush_fid(struct dsa_switch *ds, int fid)
        if (ret < 0)
                return ret;
 
-       return _mv88e6xxx_atu_cmd(ds, fid, ATU_CMD_FLUSH_NONSTATIC_FID);
+       return _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_FLUSH_NON_STATIC_DB);
 }
 
 static int mv88e6xxx_set_port_state(struct dsa_switch *ds, int port, u8 state)
@@ -778,23 +907,25 @@ static int mv88e6xxx_set_port_state(struct dsa_switch *ds, int port, u8 state)
 
        mutex_lock(&ps->smi_mutex);
 
-       reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), 0x04);
+       reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL);
        if (reg < 0)
                goto abort;
 
-       oldstate = reg & PSTATE_MASK;
+       oldstate = reg & PORT_CONTROL_STATE_MASK;
        if (oldstate != state) {
                /* Flush forwarding database if we're moving a port
                 * from Learning or Forwarding state to Disabled or
                 * Blocking or Listening state.
                 */
-               if (oldstate >= PSTATE_LEARNING && state <= PSTATE_BLOCKING) {
+               if (oldstate >= PORT_CONTROL_STATE_LEARNING &&
+                   state <= PORT_CONTROL_STATE_BLOCKING) {
                        ret = _mv88e6xxx_flush_fid(ds, ps->fid[port]);
                        if (ret)
                                goto abort;
                }
-               reg = (reg & ~PSTATE_MASK) | state;
-               ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), 0x04, reg);
+               reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
+               ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL,
+                                          reg);
        }
 
 abort:
@@ -815,7 +946,7 @@ static int _mv88e6xxx_update_port_config(struct dsa_switch *ds, int port)
                reg |= (ps->bridge_mask[fid] |
                       (1 << dsa_upstream_port(ds))) & ~(1 << port);
 
-       return _mv88e6xxx_reg_write(ds, REG_PORT(port), 0x06, reg);
+       return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg);
 }
 
 /* Must be called with smi lock held */
@@ -927,18 +1058,18 @@ int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
 
        switch (state) {
        case BR_STATE_DISABLED:
-               stp_state = PSTATE_DISABLED;
+               stp_state = PORT_CONTROL_STATE_DISABLED;
                break;
        case BR_STATE_BLOCKING:
        case BR_STATE_LISTENING:
-               stp_state = PSTATE_BLOCKING;
+               stp_state = PORT_CONTROL_STATE_BLOCKING;
                break;
        case BR_STATE_LEARNING:
-               stp_state = PSTATE_LEARNING;
+               stp_state = PORT_CONTROL_STATE_LEARNING;
                break;
        case BR_STATE_FORWARDING:
        default:
-               stp_state = PSTATE_FORWARDING;
+               stp_state = PORT_CONTROL_STATE_FORWARDING;
                break;
        }
 
@@ -960,8 +1091,9 @@ static int __mv88e6xxx_write_addr(struct dsa_switch *ds,
        int i, ret;
 
        for (i = 0; i < 3; i++) {
-               ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x0d + i,
-                                       (addr[i * 2] << 8) | addr[i * 2 + 1]);
+               ret = _mv88e6xxx_reg_write(
+                       ds, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
+                       (addr[i * 2] << 8) | addr[i * 2 + 1]);
                if (ret < 0)
                        return ret;
        }
@@ -974,7 +1106,8 @@ static int __mv88e6xxx_read_addr(struct dsa_switch *ds, unsigned char *addr)
        int i, ret;
 
        for (i = 0; i < 3; i++) {
-               ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, 0x0d + i);
+               ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+                                         GLOBAL_ATU_MAC_01 + i);
                if (ret < 0)
                        return ret;
                addr[i * 2] = ret >> 8;
@@ -999,12 +1132,12 @@ static int __mv88e6xxx_port_fdb_cmd(struct dsa_switch *ds, int port,
        if (ret < 0)
                return ret;
 
-       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x0c,
+       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA,
                                   (0x10 << port) | state);
        if (ret)
                return ret;
 
-       ret = _mv88e6xxx_atu_cmd(ds, fid, ATU_CMD_LOAD_FID);
+       ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_LOAD_DB);
 
        return ret;
 }
@@ -1013,7 +1146,8 @@ int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
                           const unsigned char *addr, u16 vid)
 {
        int state = is_multicast_ether_addr(addr) ?
-                                       FDB_STATE_MC_STATIC : FDB_STATE_STATIC;
+               GLOBAL_ATU_DATA_STATE_MC_STATIC :
+               GLOBAL_ATU_DATA_STATE_UC_STATIC;
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int ret;
 
@@ -1031,7 +1165,8 @@ int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
        int ret;
 
        mutex_lock(&ps->smi_mutex);
-       ret = __mv88e6xxx_port_fdb_cmd(ds, port, addr, FDB_STATE_UNUSED);
+       ret = __mv88e6xxx_port_fdb_cmd(ds, port, addr,
+                                      GLOBAL_ATU_DATA_STATE_UNUSED);
        mutex_unlock(&ps->smi_mutex);
 
        return ret;
@@ -1053,15 +1188,15 @@ static int __mv88e6xxx_port_getnext(struct dsa_switch *ds, int port,
                return ret;
 
        do {
-               ret = _mv88e6xxx_atu_cmd(ds, fid, ATU_CMD_GETNEXT_FID);
+               ret = _mv88e6xxx_atu_cmd(ds, fid,  GLOBAL_ATU_OP_GET_NEXT_DB);
                if (ret < 0)
                        return ret;
 
-               ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, 0x0c);
+               ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
                if (ret < 0)
                        return ret;
-               state = ret & FDB_STATE_MASK;
-               if (state == FDB_STATE_UNUSED)
+               state = ret & GLOBAL_ATU_DATA_STATE_MASK;
+               if (state == GLOBAL_ATU_DATA_STATE_UNUSED)
                        return -ENOENT;
        } while (!(((ret >> 4) & 0xff) & (1 << port)));
 
@@ -1070,7 +1205,8 @@ static int __mv88e6xxx_port_getnext(struct dsa_switch *ds, int port,
                return ret;
 
        *is_static = state == (is_multicast_ether_addr(addr) ?
-                              FDB_STATE_MC_STATIC : FDB_STATE_STATIC);
+                              GLOBAL_ATU_DATA_STATE_MC_STATIC :
+                              GLOBAL_ATU_DATA_STATE_UC_STATIC);
 
        return 0;
 }
@@ -1115,7 +1251,8 @@ int mv88e6xxx_setup_port_common(struct dsa_switch *ds, int port)
        /* Port Control 1: disable trunking, disable sending
         * learning messages to this port.
         */
-       ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), 0x05, 0x0000);
+       ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
+                                  0x0000);
        if (ret)
                goto abort;
 
@@ -1152,7 +1289,7 @@ int mv88e6xxx_setup_common(struct dsa_switch *ds)
        mutex_init(&ps->stats_mutex);
        mutex_init(&ps->phy_mutex);
 
-       ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0;
+       ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
 
        ps->fid_mask = (1 << DSA_MAX_PORTS) - 1;
 
@@ -1161,6 +1298,154 @@ int mv88e6xxx_setup_common(struct dsa_switch *ds)
        return 0;
 }
 
+int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
+       unsigned long timeout;
+       int ret;
+       int i;
+
+       /* Set all ports to the disabled state. */
+       for (i = 0; i < ps->num_ports; i++) {
+               ret = REG_READ(REG_PORT(i), PORT_CONTROL);
+               REG_WRITE(REG_PORT(i), PORT_CONTROL, ret & 0xfffc);
+       }
+
+       /* Wait for transmit queues to drain. */
+       usleep_range(2000, 4000);
+
+       /* Reset the switch. Keep the PPU active if requested. The PPU
+        * needs to be active to support indirect phy register access
+        * through global registers 0x18 and 0x19.
+        */
+       if (ppu_active)
+               REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
+       else
+               REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
+
+       /* Wait up to one second for reset to complete. */
+       timeout = jiffies + 1 * HZ;
+       while (time_before(jiffies, timeout)) {
+               ret = REG_READ(REG_GLOBAL, 0x00);
+               if ((ret & is_reset) == is_reset)
+                       break;
+               usleep_range(1000, 2000);
+       }
+       if (time_after(jiffies, timeout))
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int ret;
+
+       mutex_lock(&ps->phy_mutex);
+       ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
+       if (ret < 0)
+               goto error;
+       ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
+error:
+       _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
+       mutex_unlock(&ps->phy_mutex);
+       return ret;
+}
+
+int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
+                            int reg, int val)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int ret;
+
+       mutex_lock(&ps->phy_mutex);
+       ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
+       if (ret < 0)
+               goto error;
+
+       ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
+error:
+       _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
+       mutex_unlock(&ps->phy_mutex);
+       return ret;
+}
+
+static int mv88e6xxx_port_to_phy_addr(struct dsa_switch *ds, int port)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+       if (port >= 0 && port < ps->num_ports)
+               return port;
+       return -EINVAL;
+}
+
+int
+mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int addr = mv88e6xxx_port_to_phy_addr(ds, port);
+       int ret;
+
+       if (addr < 0)
+               return addr;
+
+       mutex_lock(&ps->phy_mutex);
+       ret = _mv88e6xxx_phy_read(ds, addr, regnum);
+       mutex_unlock(&ps->phy_mutex);
+       return ret;
+}
+
+int
+mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int addr = mv88e6xxx_port_to_phy_addr(ds, port);
+       int ret;
+
+       if (addr < 0)
+               return addr;
+
+       mutex_lock(&ps->phy_mutex);
+       ret = _mv88e6xxx_phy_write(ds, addr, regnum, val);
+       mutex_unlock(&ps->phy_mutex);
+       return ret;
+}
+
+int
+mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int addr = mv88e6xxx_port_to_phy_addr(ds, port);
+       int ret;
+
+       if (addr < 0)
+               return addr;
+
+       mutex_lock(&ps->phy_mutex);
+       ret = _mv88e6xxx_phy_read_indirect(ds, addr, regnum);
+       mutex_unlock(&ps->phy_mutex);
+       return ret;
+}
+
+int
+mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
+                            u16 val)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int addr = mv88e6xxx_port_to_phy_addr(ds, port);
+       int ret;
+
+       if (addr < 0)
+               return addr;
+
+       mutex_lock(&ps->phy_mutex);
+       ret = _mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
+       mutex_unlock(&ps->phy_mutex);
+       return ret;
+}
+
 static int __init mv88e6xxx_init(void)
 {
 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
index aaf239aba7260ddfc562e84b60c3883d9031733d..e045154f33646692cb292150efde8e0ad78a9326 100644 (file)
 #ifndef __MV88E6XXX_H
 #define __MV88E6XXX_H
 
-#define REG_PORT(p)            (0x10 + (p))
-#define REG_GLOBAL             0x1b
-#define REG_GLOBAL2            0x1c
-
-/* ATU commands */
-
-#define ATU_BUSY                       0x8000
-
-#define ATU_CMD_LOAD_FID               (ATU_BUSY | 0x3000)
-#define ATU_CMD_GETNEXT_FID            (ATU_BUSY | 0x4000)
-#define ATU_CMD_FLUSH_NONSTATIC_FID    (ATU_BUSY | 0x6000)
-
-/* port states */
+#define SMI_CMD                        0x00
+#define SMI_CMD_BUSY           BIT(15)
+#define SMI_CMD_CLAUSE_22      BIT(12)
+#define SMI_CMD_OP_22_WRITE    ((1 << 10) | SMI_CMD_BUSY | SMI_CMD_CLAUSE_22)
+#define SMI_CMD_OP_22_READ     ((2 << 10) | SMI_CMD_BUSY | SMI_CMD_CLAUSE_22)
+#define SMI_CMD_OP_45_WRITE_ADDR       ((0 << 10) | SMI_CMD_BUSY)
+#define SMI_CMD_OP_45_WRITE_DATA       ((1 << 10) | SMI_CMD_BUSY)
+#define SMI_CMD_OP_45_READ_DATA                ((2 << 10) | SMI_CMD_BUSY)
+#define SMI_CMD_OP_45_READ_DATA_INC    ((3 << 10) | SMI_CMD_BUSY)
+#define SMI_DATA               0x01
 
-#define PSTATE_MASK            0x03
-#define PSTATE_DISABLED                0x00
-#define PSTATE_BLOCKING                0x01
-#define PSTATE_LEARNING                0x02
-#define PSTATE_FORWARDING      0x03
-
-/* FDB states */
+#define REG_PORT(p)            (0x10 + (p))
+#define PORT_STATUS            0x00
+#define PORT_STATUS_PAUSE_EN   BIT(15)
+#define PORT_STATUS_MY_PAUSE   BIT(14)
+#define PORT_STATUS_HD_FLOW    BIT(13)
+#define PORT_STATUS_PHY_DETECT BIT(12)
+#define PORT_STATUS_LINK       BIT(11)
+#define PORT_STATUS_DUPLEX     BIT(10)
+#define PORT_STATUS_SPEED_MASK 0x0300
+#define PORT_STATUS_SPEED_10   0x0000
+#define PORT_STATUS_SPEED_100  0x0100
+#define PORT_STATUS_SPEED_1000 0x0200
+#define PORT_STATUS_EEE                BIT(6) /* 6352 */
+#define PORT_STATUS_AM_DIS     BIT(6) /* 6165 */
+#define PORT_STATUS_MGMII      BIT(6) /* 6185 */
+#define PORT_STATUS_TX_PAUSED  BIT(5)
+#define PORT_STATUS_FLOW_CTRL  BIT(4)
+#define PORT_PCS_CTRL          0x01
+#define PORT_SWITCH_ID         0x03
+#define PORT_SWITCH_ID_6085    0x04a0
+#define PORT_SWITCH_ID_6095    0x0950
+#define PORT_SWITCH_ID_6123    0x1210
+#define PORT_SWITCH_ID_6123_A1 0x1212
+#define PORT_SWITCH_ID_6123_A2 0x1213
+#define PORT_SWITCH_ID_6131    0x1060
+#define PORT_SWITCH_ID_6131_B2 0x1066
+#define PORT_SWITCH_ID_6152    0x1a40
+#define PORT_SWITCH_ID_6155    0x1a50
+#define PORT_SWITCH_ID_6161    0x1610
+#define PORT_SWITCH_ID_6161_A1 0x1612
+#define PORT_SWITCH_ID_6161_A2 0x1613
+#define PORT_SWITCH_ID_6165    0x1650
+#define PORT_SWITCH_ID_6165_A1 0x1652
+#define PORT_SWITCH_ID_6165_A2 0x1653
+#define PORT_SWITCH_ID_6171    0x1710
+#define PORT_SWITCH_ID_6172    0x1720
+#define PORT_SWITCH_ID_6176    0x1760
+#define PORT_SWITCH_ID_6182    0x1a60
+#define PORT_SWITCH_ID_6185    0x1a70
+#define PORT_SWITCH_ID_6352    0x3520
+#define PORT_SWITCH_ID_6352_A0 0x3521
+#define PORT_SWITCH_ID_6352_A1 0x3522
+#define PORT_CONTROL           0x04
+#define PORT_CONTROL_STATE_MASK                0x03
+#define PORT_CONTROL_STATE_DISABLED    0x00
+#define PORT_CONTROL_STATE_BLOCKING    0x01
+#define PORT_CONTROL_STATE_LEARNING    0x02
+#define PORT_CONTROL_STATE_FORWARDING  0x03
+#define PORT_CONTROL_1         0x05
+#define PORT_BASE_VLAN         0x06
+#define PORT_DEFAULT_VLAN      0x07
+#define PORT_CONTROL_2         0x08
+#define PORT_RATE_CONTROL      0x09
+#define PORT_RATE_CONTROL_2    0x0a
+#define PORT_ASSOC_VECTOR      0x0b
+#define PORT_IN_DISCARD_LO     0x10
+#define PORT_IN_DISCARD_HI     0x11
+#define PORT_IN_FILTERED       0x12
+#define PORT_OUT_FILTERED      0x13
+#define PORT_TAG_REGMAP_0123   0x19
+#define PORT_TAG_REGMAP_4567   0x1a
 
-#define FDB_STATE_MASK                 0x0f
+#define REG_GLOBAL             0x1b
+#define GLOBAL_STATUS          0x00
+#define GLOBAL_STATUS_PPU_STATE BIT(15) /* 6351 and 6171 */
+/* Two bits for 6165, 6185 etc */
+#define GLOBAL_STATUS_PPU_MASK         (0x3 << 14)
+#define GLOBAL_STATUS_PPU_DISABLED_RST (0x0 << 14)
+#define GLOBAL_STATUS_PPU_INITIALIZING (0x1 << 14)
+#define GLOBAL_STATUS_PPU_DISABLED     (0x2 << 14)
+#define GLOBAL_STATUS_PPU_POLLING      (0x3 << 14)
+#define GLOBAL_MAC_01          0x01
+#define GLOBAL_MAC_23          0x02
+#define GLOBAL_MAC_45          0x03
+#define GLOBAL_CONTROL         0x04
+#define GLOBAL_CONTROL_SW_RESET                BIT(15)
+#define GLOBAL_CONTROL_PPU_ENABLE      BIT(14)
+#define GLOBAL_CONTROL_DISCARD_EXCESS  BIT(13) /* 6352 */
+#define GLOBAL_CONTROL_SCHED_PRIO      BIT(11) /* 6152 */
+#define GLOBAL_CONTROL_MAX_FRAME_1632  BIT(10) /* 6152 */
+#define GLOBAL_CONTROL_RELOAD_EEPROM   BIT(9)  /* 6152 */
+#define GLOBAL_CONTROL_DEVICE_EN       BIT(7)
+#define GLOBAL_CONTROL_STATS_DONE_EN   BIT(6)
+#define GLOBAL_CONTROL_VTU_PROBLEM_EN  BIT(5)
+#define GLOBAL_CONTROL_VTU_DONE_EN     BIT(4)
+#define GLOBAL_CONTROL_ATU_PROBLEM_EN  BIT(3)
+#define GLOBAL_CONTROL_ATU_DONE_EN     BIT(2)
+#define GLOBAL_CONTROL_TCAM_EN         BIT(1)
+#define GLOBAL_CONTROL_EEPROM_DONE_EN  BIT(0)
+#define GLOBAL_VTU_OP          0x05
+#define GLOBAL_VTU_VID         0x06
+#define GLOBAL_VTU_DATA_0_3    0x07
+#define GLOBAL_VTU_DATA_4_7    0x08
+#define GLOBAL_VTU_DATA_8_11   0x09
+#define GLOBAL_ATU_CONTROL     0x0a
+#define GLOBAL_ATU_OP          0x0b
+#define GLOBAL_ATU_OP_BUSY     BIT(15)
+#define GLOBAL_ATU_OP_NOP              (0 << 12)
+#define GLOBAL_ATU_OP_FLUSH_ALL                ((1 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_FLUSH_NON_STATIC ((2 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_LOAD_DB          ((3 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_GET_NEXT_DB      ((4 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_FLUSH_DB         ((5 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_FLUSH_NON_STATIC_DB ((6 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_GET_CLR_VIOLATION          ((7 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_DATA                0x0c
+#define GLOBAL_ATU_DATA_STATE_MASK             0x0f
+#define GLOBAL_ATU_DATA_STATE_UNUSED           0x00
+#define GLOBAL_ATU_DATA_STATE_UC_MGMT          0x0d
+#define GLOBAL_ATU_DATA_STATE_UC_STATIC                0x0e
+#define GLOBAL_ATU_DATA_STATE_UC_PRIO_OVER     0x0f
+#define GLOBAL_ATU_DATA_STATE_MC_NONE_RATE     0x05
+#define GLOBAL_ATU_DATA_STATE_MC_STATIC                0x07
+#define GLOBAL_ATU_DATA_STATE_MC_MGMT          0x0e
+#define GLOBAL_ATU_DATA_STATE_MC_PRIO_OVER     0x0f
+#define GLOBAL_ATU_MAC_01      0x0d
+#define GLOBAL_ATU_MAC_23      0x0e
+#define GLOBAL_ATU_MAC_45      0x0f
+#define GLOBAL_IP_PRI_0                0x10
+#define GLOBAL_IP_PRI_1                0x11
+#define GLOBAL_IP_PRI_2                0x12
+#define GLOBAL_IP_PRI_3                0x13
+#define GLOBAL_IP_PRI_4                0x14
+#define GLOBAL_IP_PRI_5                0x15
+#define GLOBAL_IP_PRI_6                0x16
+#define GLOBAL_IP_PRI_7                0x17
+#define GLOBAL_IEEE_PRI                0x18
+#define GLOBAL_CORE_TAG_TYPE   0x19
+#define GLOBAL_MONITOR_CONTROL 0x1a
+#define GLOBAL_CONTROL_2       0x1c
+#define GLOBAL_STATS_OP                0x1d
+#define GLOBAL_STATS_OP_BUSY   BIT(15)
+#define GLOBAL_STATS_OP_NOP            (0 << 12)
+#define GLOBAL_STATS_OP_FLUSH_ALL      ((1 << 12) | GLOBAL_STATS_OP_BUSY)
+#define GLOBAL_STATS_OP_FLUSH_PORT     ((2 << 12) | GLOBAL_STATS_OP_BUSY)
+#define GLOBAL_STATS_OP_READ_CAPTURED  ((4 << 12) | GLOBAL_STATS_OP_BUSY)
+#define GLOBAL_STATS_OP_CAPTURE_PORT   ((5 << 12) | GLOBAL_STATS_OP_BUSY)
+#define GLOBAL_STATS_OP_HIST_RX                ((1 << 10) | GLOBAL_STATS_OP_BUSY)
+#define GLOBAL_STATS_OP_HIST_TX                ((2 << 10) | GLOBAL_STATS_OP_BUSY)
+#define GLOBAL_STATS_OP_HIST_RX_TX     ((3 << 10) | GLOBAL_STATS_OP_BUSY)
+#define GLOBAL_STATS_COUNTER_32        0x1e
+#define GLOBAL_STATS_COUNTER_01        0x1f
 
-#define FDB_STATE_UNUSED               0x00
-#define FDB_STATE_MC_STATIC            0x07    /* static multicast */
-#define FDB_STATE_STATIC               0x0e    /* static unicast */
+#define REG_GLOBAL2            0x1c
+#define GLOBAL2_INT_SOURCE     0x00
+#define GLOBAL2_INT_MASK       0x01
+#define GLOBAL2_MGMT_EN_2X     0x02
+#define GLOBAL2_MGMT_EN_0X     0x03
+#define GLOBAL2_FLOW_CONTROL   0x04
+#define GLOBAL2_SWITCH_MGMT    0x05
+#define GLOBAL2_DEVICE_MAPPING 0x06
+#define GLOBAL2_TRUNK_MASK     0x07
+#define GLOBAL2_TRUNK_MAPPING  0x08
+#define GLOBAL2_INGRESS_OP     0x09
+#define GLOBAL2_INGRESS_DATA   0x0a
+#define GLOBAL2_PVT_ADDR       0x0b
+#define GLOBAL2_PVT_DATA       0x0c
+#define GLOBAL2_SWITCH_MAC     0x0d
+#define GLOBAL2_SWITCH_MAC_BUSY BIT(15)
+#define GLOBAL2_ATU_STATS      0x0e
+#define GLOBAL2_PRIO_OVERRIDE  0x0f
+#define GLOBAL2_EEPROM_OP      0x14
+#define GLOBAL2_EEPROM_OP_BUSY BIT(15)
+#define GLOBAL2_EEPROM_OP_LOAD BIT(11)
+#define GLOBAL2_EEPROM_DATA    0x15
+#define GLOBAL2_PTP_AVB_OP     0x16
+#define GLOBAL2_PTP_AVB_DATA   0x17
+#define GLOBAL2_SMI_OP         0x18
+#define GLOBAL2_SMI_OP_BUSY            BIT(15)
+#define GLOBAL2_SMI_OP_CLAUSE_22       BIT(12)
+#define GLOBAL2_SMI_OP_22_WRITE                ((1 << 10) | GLOBAL2_SMI_OP_BUSY | \
+                                        GLOBAL2_SMI_OP_CLAUSE_22)
+#define GLOBAL2_SMI_OP_22_READ         ((2 << 10) | GLOBAL2_SMI_OP_BUSY | \
+                                        GLOBAL2_SMI_OP_CLAUSE_22)
+#define GLOBAL2_SMI_OP_45_WRITE_ADDR   ((0 << 10) | GLOBAL2_SMI_OP_BUSY)
+#define GLOBAL2_SMI_OP_45_WRITE_DATA   ((1 << 10) | GLOBAL2_SMI_OP_BUSY)
+#define GLOBAL2_SMI_OP_45_READ_DATA    ((2 << 10) | GLOBAL2_SMI_OP_BUSY)
+#define GLOBAL2_SMI_DATA       0x19
+#define GLOBAL2_SCRATCH_MISC   0x1a
+#define GLOBAL2_WDOG_CONTROL   0x1b
+#define GLOBAL2_QOS_WEIGHT     0x1c
+#define GLOBAL2_MISC           0x1d
 
 struct mv88e6xxx_priv_state {
        /* When using multi-chip addressing, this mutex protects
@@ -73,6 +239,7 @@ struct mv88e6xxx_priv_state {
        struct mutex eeprom_mutex;
 
        int             id; /* switch product id */
+       int             num_ports;      /* number of switch ports */
 
        /* hw bridging */
 
@@ -92,6 +259,7 @@ struct mv88e6xxx_hw_stat {
        int reg;
 };
 
+int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active);
 int mv88e6xxx_setup_port_common(struct dsa_switch *ds, int port);
 int mv88e6xxx_setup_common(struct dsa_switch *ds);
 int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg);
@@ -102,19 +270,21 @@ int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val);
 int mv88e6xxx_config_prio(struct dsa_switch *ds);
 int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr);
 int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr);
-int mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum);
-int mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum, u16 val);
+int mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum);
+int mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val);
+int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum);
+int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
+                                u16 val);
 void mv88e6xxx_ppu_state_init(struct dsa_switch *ds);
 int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum);
 int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
                            int regnum, u16 val);
 void mv88e6xxx_poll_link(struct dsa_switch *ds);
-void mv88e6xxx_get_strings(struct dsa_switch *ds,
-                          int nr_stats, struct mv88e6xxx_hw_stat *stats,
-                          int port, uint8_t *data);
-void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
-                                int nr_stats, struct mv88e6xxx_hw_stat *stats,
-                                int port, uint64_t *data);
+void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data);
+void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
+                                uint64_t *data);
+int mv88e6xxx_get_sset_count(struct dsa_switch *ds);
+int mv88e6xxx_get_sset_count_basic(struct dsa_switch *ds);
 int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port);
 void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
                        struct ethtool_regs *regs, void *_p);
@@ -137,7 +307,9 @@ int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
                           const unsigned char *addr, u16 vid);
 int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
                               unsigned char *addr, bool *is_static);
-
+int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg);
+int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
+                            int reg, int val);
 extern struct dsa_switch_driver mv88e6131_switch_driver;
 extern struct dsa_switch_driver mv88e6123_61_65_switch_driver;
 extern struct dsa_switch_driver mv88e6352_switch_driver;
index 80dd7a92f3574b5d09f04d077def087016c136e0..21d9497518fde356f45e6e0eb0632368d680116b 100644 (file)
@@ -853,6 +853,22 @@ static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
        return 0;
 }
 
+static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
+{
+       struct net_device *netdev = pdata->netdev;
+       unsigned int pr_mode, am_mode;
+
+       pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
+       am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
+
+       xgbe_set_promiscuous_mode(pdata, pr_mode);
+       xgbe_set_all_multicast_mode(pdata, am_mode);
+
+       xgbe_add_mac_addresses(pdata);
+
+       return 0;
+}
+
 static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
                              int mmd_reg)
 {
@@ -1101,9 +1117,24 @@ static void xgbe_tx_desc_init(struct xgbe_channel *channel)
        DBGPR("<--tx_desc_init\n");
 }
 
-static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
+static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
+                              struct xgbe_ring_data *rdata, unsigned int index)
 {
        struct xgbe_ring_desc *rdesc = rdata->rdesc;
+       unsigned int rx_usecs = pdata->rx_usecs;
+       unsigned int rx_frames = pdata->rx_frames;
+       unsigned int inte;
+
+       if (!rx_usecs && !rx_frames) {
+               /* No coalescing, interrupt for every descriptor */
+               inte = 1;
+       } else {
+               /* Set interrupt based on Rx frame coalescing setting */
+               if (rx_frames && !((index + 1) % rx_frames))
+                       inte = 1;
+               else
+                       inte = 0;
+       }
 
        /* Reset the Rx descriptor
         *   Set buffer 1 (lo) address to header dma address (lo)
@@ -1117,8 +1148,7 @@ static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
        rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma));
        rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma));
 
-       XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
-                         rdata->interrupt ? 1 : 0);
+       XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
 
        /* Since the Rx DMA engine is likely running, make sure everything
         * is written to the descriptor(s) before setting the OWN bit
@@ -1138,26 +1168,16 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
        struct xgbe_ring *ring = channel->rx_ring;
        struct xgbe_ring_data *rdata;
        unsigned int start_index = ring->cur;
-       unsigned int rx_coalesce, rx_frames;
        unsigned int i;
 
        DBGPR("-->rx_desc_init\n");
 
-       rx_coalesce = (pdata->rx_riwt || pdata->rx_frames) ? 1 : 0;
-       rx_frames = pdata->rx_frames;
-
        /* Initialize all descriptors */
        for (i = 0; i < ring->rdesc_count; i++) {
                rdata = XGBE_GET_DESC_DATA(ring, i);
 
-               /* Set interrupt on completion bit as appropriate */
-               if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames)))
-                       rdata->interrupt = 0;
-               else
-                       rdata->interrupt = 1;
-
                /* Initialize Rx descriptor */
-               xgbe_rx_desc_reset(rdata);
+               xgbe_rx_desc_reset(pdata, rdata, i);
        }
 
        /* Update the total number of Rx descriptors */
@@ -2804,6 +2824,7 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
         * Initialize MAC related features
         */
        xgbe_config_mac_address(pdata);
+       xgbe_config_rx_mode(pdata);
        xgbe_config_jumbo_enable(pdata);
        xgbe_config_flow_control(pdata);
        xgbe_config_mac_speed(pdata);
@@ -2823,10 +2844,8 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
 
        hw_if->tx_complete = xgbe_tx_complete;
 
-       hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
-       hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
-       hw_if->add_mac_addresses = xgbe_add_mac_addresses;
        hw_if->set_mac_address = xgbe_set_mac_address;
+       hw_if->config_rx_mode = xgbe_config_rx_mode;
 
        hw_if->enable_rx_csum = xgbe_enable_rx_csum;
        hw_if->disable_rx_csum = xgbe_disable_rx_csum;
index 347fe2419a18a0514b7ea3f29350a0c1e33deae4..db84ddcfec8464191a3edcccfd87c869ac1c5a7c 100644 (file)
 
 static int xgbe_one_poll(struct napi_struct *, int);
 static int xgbe_all_poll(struct napi_struct *, int);
-static void xgbe_set_rx_mode(struct net_device *);
 
 static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
 {
@@ -952,8 +951,6 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
 
        DBGPR("-->xgbe_start\n");
 
-       xgbe_set_rx_mode(netdev);
-
        hw_if->init(pdata);
 
        phy_start(pdata->phydev);
@@ -1533,17 +1530,10 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
        struct xgbe_hw_if *hw_if = &pdata->hw_if;
-       unsigned int pr_mode, am_mode;
 
        DBGPR("-->xgbe_set_rx_mode\n");
 
-       pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
-       am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
-
-       hw_if->set_promiscuous_mode(pdata, pr_mode);
-       hw_if->set_all_multicast_mode(pdata, am_mode);
-
-       hw_if->add_mac_addresses(pdata);
+       hw_if->config_rx_mode(pdata);
 
        DBGPR("<--xgbe_set_rx_mode\n");
 }
@@ -1610,6 +1600,14 @@ static int xgbe_change_mtu(struct net_device *netdev, int mtu)
        return 0;
 }
 
+static void xgbe_tx_timeout(struct net_device *netdev)
+{
+       struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+       netdev_warn(netdev, "tx timeout, device restarting\n");
+       schedule_work(&pdata->restart_work);
+}
+
 static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
                                                  struct rtnl_link_stats64 *s)
 {
@@ -1774,6 +1772,7 @@ static const struct net_device_ops xgbe_netdev_ops = {
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_do_ioctl           = xgbe_ioctl,
        .ndo_change_mtu         = xgbe_change_mtu,
+       .ndo_tx_timeout         = xgbe_tx_timeout,
        .ndo_get_stats64        = xgbe_get_stats64,
        .ndo_vlan_rx_add_vid    = xgbe_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = xgbe_vlan_rx_kill_vid,
@@ -1806,7 +1805,7 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
                if (desc_if->map_rx_buffer(pdata, ring, rdata))
                        break;
 
-               hw_if->rx_desc_reset(rdata);
+               hw_if->rx_desc_reset(pdata, rdata, ring->dirty);
 
                ring->dirty++;
        }
index b4f6eaaa08f0732211435c011e87e518c63cfbad..5f149e8ee20f0fa1a0878c37f2d44427fbe75f7f 100644 (file)
@@ -424,16 +424,6 @@ static int xgbe_set_coalesce(struct net_device *netdev,
            (ec->rate_sample_interval))
                return -EOPNOTSUPP;
 
-       /* Can only change rx-frames when interface is down (see
-        * rx_descriptor_init in xgbe-dev.c)
-        */
-       rx_frames = pdata->rx_frames;
-       if (rx_frames != ec->rx_max_coalesced_frames && netif_running(netdev)) {
-               netdev_alert(netdev,
-                            "interface must be down to change rx-frames\n");
-               return -EINVAL;
-       }
-
        rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
        rx_usecs = ec->rx_coalesce_usecs;
        rx_frames = ec->rx_max_coalesced_frames;
index 2e4c22d94a6bef50e0606b9fce861c29f88c24d4..7149053849008de10da3be7eb054884b4a808f8c 100644 (file)
@@ -491,6 +491,9 @@ static int xgbe_probe(struct platform_device *pdev)
 
        netdev->priv_flags |= IFF_UNICAST_FLT;
 
+       /* Use default watchdog timeout */
+       netdev->watchdog_timeo = 0;
+
        xgbe_init_rx_coalesce(pdata);
        xgbe_init_tx_coalesce(pdata);
 
index dd742426eb0425e9c4a7b041ee4cf3764694515e..e62dfa2deab67565cbb6c62116f4f8b6b722384f 100644 (file)
@@ -325,8 +325,6 @@ struct xgbe_ring_data {
        struct xgbe_tx_ring_data tx;    /* Tx-related data */
        struct xgbe_rx_ring_data rx;    /* Rx-related data */
 
-       unsigned int interrupt;         /* Interrupt indicator */
-
        unsigned int mapped_as_page;
 
        /* Incomplete receive save location.  If the budget is exhausted
@@ -497,10 +495,8 @@ struct xgbe_mmc_stats {
 struct xgbe_hw_if {
        int (*tx_complete)(struct xgbe_ring_desc *);
 
-       int (*set_promiscuous_mode)(struct xgbe_prv_data *, unsigned int);
-       int (*set_all_multicast_mode)(struct xgbe_prv_data *, unsigned int);
-       int (*add_mac_addresses)(struct xgbe_prv_data *);
        int (*set_mac_address)(struct xgbe_prv_data *, u8 *addr);
+       int (*config_rx_mode)(struct xgbe_prv_data *);
 
        int (*enable_rx_csum)(struct xgbe_prv_data *);
        int (*disable_rx_csum)(struct xgbe_prv_data *);
@@ -536,8 +532,9 @@ struct xgbe_hw_if {
        int (*dev_read)(struct xgbe_channel *);
        void (*tx_desc_init)(struct xgbe_channel *);
        void (*rx_desc_init)(struct xgbe_channel *);
-       void (*rx_desc_reset)(struct xgbe_ring_data *);
        void (*tx_desc_reset)(struct xgbe_ring_data *);
+       void (*rx_desc_reset)(struct xgbe_prv_data *, struct xgbe_ring_data *,
+                             unsigned int);
        int (*is_last_desc)(struct xgbe_ring_desc *);
        int (*is_context_desc)(struct xgbe_ring_desc *);
        void (*tx_start_xmit)(struct xgbe_channel *, struct xgbe_ring *);
index 756053c028becff93c62cfcdbd6e1f635594cc01..4085c4b310470b6ebf718121fa423d2995dcb8ef 100644 (file)
@@ -1811,7 +1811,7 @@ struct bnx2x {
        int                     stats_state;
 
        /* used for synchronization of concurrent threads statistics handling */
-       spinlock_t              stats_lock;
+       struct mutex            stats_lock;
 
        /* used by dmae command loader */
        struct dmae_command     stats_dmae;
@@ -1935,8 +1935,6 @@ struct bnx2x {
 
        int fp_array_size;
        u32 dump_preset_idx;
-       bool                                    stats_started;
-       struct semaphore                        stats_sema;
 
        u8                                      phys_port_id[ETH_ALEN];
 
index 177cb0e722e79af7dd76c119d58ae2912d98867e..b9f85fccb419be528ae328efc3af4303f0498103 100644 (file)
@@ -129,8 +129,8 @@ struct bnx2x_mac_vals {
        u32 xmac_val;
        u32 emac_addr;
        u32 emac_val;
-       u32 umac_addr;
-       u32 umac_val;
+       u32 umac_addr[2];
+       u32 umac_val[2];
        u32 bmac_addr;
        u32 bmac_val[2];
 };
@@ -7866,6 +7866,20 @@ int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
        return 0;
 }
 
+/* previous driver DMAE transaction may have occurred when pre-boot stage ended
+ * and boot began, or when kdump kernel was loaded. Either case would invalidate
+ * the addresses of the transaction, resulting in was-error bit set in the pci
+ * causing all hw-to-host pcie transactions to timeout. If this happened we want
+ * to clear the interrupt which detected this from the pglueb and the was done
+ * bit
+ */
+static void bnx2x_clean_pglue_errors(struct bnx2x *bp)
+{
+       if (!CHIP_IS_E1x(bp))
+               REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
+                      1 << BP_ABS_FUNC(bp));
+}
+
 static int bnx2x_init_hw_func(struct bnx2x *bp)
 {
        int port = BP_PORT(bp);
@@ -7958,8 +7972,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
 
        bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
 
-       if (!CHIP_IS_E1x(bp))
-               REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
+       bnx2x_clean_pglue_errors(bp);
 
        bnx2x_init_block(bp, BLOCK_ATC, init_phase);
        bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
@@ -10141,6 +10154,25 @@ static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
        return base + (BP_ABS_FUNC(bp)) * stride;
 }
 
+static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp,
+                                        u8 port, u32 reset_reg,
+                                        struct bnx2x_mac_vals *vals)
+{
+       u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
+       u32 base_addr;
+
+       if (!(mask & reset_reg))
+               return false;
+
+       BNX2X_DEV_INFO("Disable umac Rx %02x\n", port);
+       base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+       vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG;
+       vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]);
+       REG_WR(bp, vals->umac_addr[port], 0);
+
+       return true;
+}
+
 static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
                                        struct bnx2x_mac_vals *vals)
 {
@@ -10149,10 +10181,7 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
        u8 port = BP_PORT(bp);
 
        /* reset addresses as they also mark which values were changed */
-       vals->bmac_addr = 0;
-       vals->umac_addr = 0;
-       vals->xmac_addr = 0;
-       vals->emac_addr = 0;
+       memset(vals, 0, sizeof(*vals));
 
        reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
 
@@ -10201,15 +10230,11 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
                        REG_WR(bp, vals->xmac_addr, 0);
                        mac_stopped = true;
                }
-               mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
-               if (mask & reset_reg) {
-                       BNX2X_DEV_INFO("Disable umac Rx\n");
-                       base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
-                       vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
-                       vals->umac_val = REG_RD(bp, vals->umac_addr);
-                       REG_WR(bp, vals->umac_addr, 0);
-                       mac_stopped = true;
-               }
+
+               mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0,
+                                                           reset_reg, vals);
+               mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1,
+                                                           reset_reg, vals);
        }
 
        if (mac_stopped)
@@ -10505,8 +10530,11 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
                /* Close the MAC Rx to prevent BRB from filling up */
                bnx2x_prev_unload_close_mac(bp, &mac_vals);
 
-               /* close LLH filters towards the BRB */
+               /* close LLH filters for both ports towards the BRB */
                bnx2x_set_rx_filter(&bp->link_params, 0);
+               bp->link_params.port ^= 1;
+               bnx2x_set_rx_filter(&bp->link_params, 0);
+               bp->link_params.port ^= 1;
 
                /* Check if the UNDI driver was previously loaded */
                if (bnx2x_prev_is_after_undi(bp)) {
@@ -10553,8 +10581,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
 
        if (mac_vals.xmac_addr)
                REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
-       if (mac_vals.umac_addr)
-               REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val);
+       if (mac_vals.umac_addr[0])
+               REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]);
+       if (mac_vals.umac_addr[1])
+               REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]);
        if (mac_vals.emac_addr)
                REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
        if (mac_vals.bmac_addr) {
@@ -10571,26 +10601,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
        return bnx2x_prev_mcp_done(bp);
 }
 
-/* previous driver DMAE transaction may have occurred when pre-boot stage ended
- * and boot began, or when kdump kernel was loaded. Either case would invalidate
- * the addresses of the transaction, resulting in was-error bit set in the pci
- * causing all hw-to-host pcie transactions to timeout. If this happened we want
- * to clear the interrupt which detected this from the pglueb and the was done
- * bit
- */
-static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
-{
-       if (!CHIP_IS_E1x(bp)) {
-               u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
-               if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
-                       DP(BNX2X_MSG_SP,
-                          "'was error' bit was found to be set in pglueb upon startup. Clearing\n");
-                       REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
-                              1 << BP_FUNC(bp));
-               }
-       }
-}
-
 static int bnx2x_prev_unload(struct bnx2x *bp)
 {
        int time_counter = 10;
@@ -10600,7 +10610,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
        /* clear hw from errors which may have resulted from an interrupted
         * dmae transaction.
         */
-       bnx2x_prev_interrupted_dmae(bp);
+       bnx2x_clean_pglue_errors(bp);
 
        /* Release previously held locks */
        hw_lock_reg = (BP_FUNC(bp) <= 5) ?
@@ -12044,9 +12054,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
        mutex_init(&bp->port.phy_mutex);
        mutex_init(&bp->fw_mb_mutex);
        mutex_init(&bp->drv_info_mutex);
+       mutex_init(&bp->stats_lock);
        bp->drv_info_mng_owner = false;
-       spin_lock_init(&bp->stats_lock);
-       sema_init(&bp->stats_sema, 1);
 
        INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
        INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
@@ -13673,9 +13682,9 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
        cancel_delayed_work_sync(&bp->sp_task);
        cancel_delayed_work_sync(&bp->period_task);
 
-       spin_lock_bh(&bp->stats_lock);
+       mutex_lock(&bp->stats_lock);
        bp->stats_state = STATS_STATE_DISABLED;
-       spin_unlock_bh(&bp->stats_lock);
+       mutex_unlock(&bp->stats_lock);
 
        bnx2x_save_statistics(bp);
 
index 8638d6c97caa4e615ccf2bd3083d837a0eb870c0..d95f7b4e19e16c2e26bf23392a0d2f5275d5c474 100644 (file)
@@ -2238,7 +2238,9 @@ int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
 
                cookie.vf = vf;
                cookie.state = VF_ACQUIRED;
-               bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
+               rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
+               if (rc)
+                       goto op_err;
        }
 
        DP(BNX2X_MSG_IOV, "set state to acquired\n");
index 612cafb5df5387f060b370d7effbb09e671a398f..266b055c2360af759c7f78395636d541210e5b9d 100644 (file)
@@ -123,36 +123,28 @@ static void bnx2x_dp_stats(struct bnx2x *bp)
  */
 static void bnx2x_storm_stats_post(struct bnx2x *bp)
 {
-       if (!bp->stats_pending) {
-               int rc;
+       int rc;
 
-               spin_lock_bh(&bp->stats_lock);
-
-               if (bp->stats_pending) {
-                       spin_unlock_bh(&bp->stats_lock);
-                       return;
-               }
-
-               bp->fw_stats_req->hdr.drv_stats_counter =
-                       cpu_to_le16(bp->stats_counter++);
+       if (bp->stats_pending)
+               return;
 
-               DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
-                  le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter));
+       bp->fw_stats_req->hdr.drv_stats_counter =
+               cpu_to_le16(bp->stats_counter++);
 
-               /* adjust the ramrod to include VF queues statistics */
-               bnx2x_iov_adjust_stats_req(bp);
-               bnx2x_dp_stats(bp);
+       DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
+          le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter));
 
-               /* send FW stats ramrod */
-               rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
-                                  U64_HI(bp->fw_stats_req_mapping),
-                                  U64_LO(bp->fw_stats_req_mapping),
-                                  NONE_CONNECTION_TYPE);
-               if (rc == 0)
-                       bp->stats_pending = 1;
+       /* adjust the ramrod to include VF queues statistics */
+       bnx2x_iov_adjust_stats_req(bp);
+       bnx2x_dp_stats(bp);
 
-               spin_unlock_bh(&bp->stats_lock);
-       }
+       /* send FW stats ramrod */
+       rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
+                          U64_HI(bp->fw_stats_req_mapping),
+                          U64_LO(bp->fw_stats_req_mapping),
+                          NONE_CONNECTION_TYPE);
+       if (rc == 0)
+               bp->stats_pending = 1;
 }
 
 static void bnx2x_hw_stats_post(struct bnx2x *bp)
@@ -221,7 +213,7 @@ static void bnx2x_stats_comp(struct bnx2x *bp)
  */
 
 /* should be called under stats_sema */
-static void __bnx2x_stats_pmf_update(struct bnx2x *bp)
+static void bnx2x_stats_pmf_update(struct bnx2x *bp)
 {
        struct dmae_command *dmae;
        u32 opcode;
@@ -519,7 +511,7 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
 }
 
 /* should be called under stats_sema */
-static void __bnx2x_stats_start(struct bnx2x *bp)
+static void bnx2x_stats_start(struct bnx2x *bp)
 {
        if (IS_PF(bp)) {
                if (bp->port.pmf)
@@ -531,34 +523,13 @@ static void __bnx2x_stats_start(struct bnx2x *bp)
                bnx2x_hw_stats_post(bp);
                bnx2x_storm_stats_post(bp);
        }
-
-       bp->stats_started = true;
-}
-
-static void bnx2x_stats_start(struct bnx2x *bp)
-{
-       if (down_timeout(&bp->stats_sema, HZ/10))
-               BNX2X_ERR("Unable to acquire stats lock\n");
-       __bnx2x_stats_start(bp);
-       up(&bp->stats_sema);
 }
 
 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
 {
-       if (down_timeout(&bp->stats_sema, HZ/10))
-               BNX2X_ERR("Unable to acquire stats lock\n");
        bnx2x_stats_comp(bp);
-       __bnx2x_stats_pmf_update(bp);
-       __bnx2x_stats_start(bp);
-       up(&bp->stats_sema);
-}
-
-static void bnx2x_stats_pmf_update(struct bnx2x *bp)
-{
-       if (down_timeout(&bp->stats_sema, HZ/10))
-               BNX2X_ERR("Unable to acquire stats lock\n");
-       __bnx2x_stats_pmf_update(bp);
-       up(&bp->stats_sema);
+       bnx2x_stats_pmf_update(bp);
+       bnx2x_stats_start(bp);
 }
 
 static void bnx2x_stats_restart(struct bnx2x *bp)
@@ -568,11 +539,9 @@ static void bnx2x_stats_restart(struct bnx2x *bp)
         */
        if (IS_VF(bp))
                return;
-       if (down_timeout(&bp->stats_sema, HZ/10))
-               BNX2X_ERR("Unable to acquire stats lock\n");
+
        bnx2x_stats_comp(bp);
-       __bnx2x_stats_start(bp);
-       up(&bp->stats_sema);
+       bnx2x_stats_start(bp);
 }
 
 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
@@ -1246,18 +1215,12 @@ static void bnx2x_stats_update(struct bnx2x *bp)
 {
        u32 *stats_comp = bnx2x_sp(bp, stats_comp);
 
-       /* we run update from timer context, so give up
-        * if somebody is in the middle of transition
-        */
-       if (down_trylock(&bp->stats_sema))
+       if (bnx2x_edebug_stats_stopped(bp))
                return;
 
-       if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started)
-               goto out;
-
        if (IS_PF(bp)) {
                if (*stats_comp != DMAE_COMP_VAL)
-                       goto out;
+                       return;
 
                if (bp->port.pmf)
                        bnx2x_hw_stats_update(bp);
@@ -1267,7 +1230,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
                                BNX2X_ERR("storm stats were not updated for 3 times\n");
                                bnx2x_panic();
                        }
-                       goto out;
+                       return;
                }
        } else {
                /* vf doesn't collect HW statistics, and doesn't get completions
@@ -1281,7 +1244,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
 
        /* vf is done */
        if (IS_VF(bp))
-               goto out;
+               return;
 
        if (netif_msg_timer(bp)) {
                struct bnx2x_eth_stats *estats = &bp->eth_stats;
@@ -1292,9 +1255,6 @@ static void bnx2x_stats_update(struct bnx2x *bp)
 
        bnx2x_hw_stats_post(bp);
        bnx2x_storm_stats_post(bp);
-
-out:
-       up(&bp->stats_sema);
 }
 
 static void bnx2x_port_stats_stop(struct bnx2x *bp)
@@ -1358,12 +1318,7 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp)
 
 static void bnx2x_stats_stop(struct bnx2x *bp)
 {
-       int update = 0;
-
-       if (down_timeout(&bp->stats_sema, HZ/10))
-               BNX2X_ERR("Unable to acquire stats lock\n");
-
-       bp->stats_started = false;
+       bool update = false;
 
        bnx2x_stats_comp(bp);
 
@@ -1381,8 +1336,6 @@ static void bnx2x_stats_stop(struct bnx2x *bp)
                bnx2x_hw_stats_post(bp);
                bnx2x_stats_comp(bp);
        }
-
-       up(&bp->stats_sema);
 }
 
 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
@@ -1410,18 +1363,28 @@ static const struct {
 
 void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
 {
-       enum bnx2x_stats_state state;
-       void (*action)(struct bnx2x *bp);
+       enum bnx2x_stats_state state = bp->stats_state;
+
        if (unlikely(bp->panic))
                return;
 
-       spin_lock_bh(&bp->stats_lock);
-       state = bp->stats_state;
+       /* Statistics update run from timer context, and we don't want to stop
+        * that context in case someone is in the middle of a transition.
+        * For other events, wait a bit until lock is taken.
+        */
+       if (!mutex_trylock(&bp->stats_lock)) {
+               if (event == STATS_EVENT_UPDATE)
+                       return;
+
+               DP(BNX2X_MSG_STATS,
+                  "Unlikely stats' lock contention [event %d]\n", event);
+               mutex_lock(&bp->stats_lock);
+       }
+
+       bnx2x_stats_stm[state][event].action(bp);
        bp->stats_state = bnx2x_stats_stm[state][event].next_state;
-       action = bnx2x_stats_stm[state][event].action;
-       spin_unlock_bh(&bp->stats_lock);
 
-       action(bp);
+       mutex_unlock(&bp->stats_lock);
 
        if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
                DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
@@ -1998,13 +1961,34 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
        }
 }
 
-void bnx2x_stats_safe_exec(struct bnx2x *bp,
-                          void (func_to_exec)(void *cookie),
-                          void *cookie){
-       if (down_timeout(&bp->stats_sema, HZ/10))
-               BNX2X_ERR("Unable to acquire stats lock\n");
+int bnx2x_stats_safe_exec(struct bnx2x *bp,
+                         void (func_to_exec)(void *cookie),
+                         void *cookie)
+{
+       int cnt = 10, rc = 0;
+
+       /* Wait for statistics to end [while blocking further requests],
+        * then run supplied function 'safely'.
+        */
+       mutex_lock(&bp->stats_lock);
+
        bnx2x_stats_comp(bp);
+       while (bp->stats_pending && cnt--)
+               if (bnx2x_storm_stats_update(bp))
+                       usleep_range(1000, 2000);
+       if (bp->stats_pending) {
+               BNX2X_ERR("Failed to wait for stats pending to clear [possibly FW is stuck]\n");
+               rc = -EBUSY;
+               goto out;
+       }
+
        func_to_exec(cookie);
-       __bnx2x_stats_start(bp);
-       up(&bp->stats_sema);
+
+out:
+       /* No need to restart statistics - if they're enabled, the timer
+        * will restart the statistics.
+        */
+       mutex_unlock(&bp->stats_lock);
+
+       return rc;
 }
index 2beceaefdeea7aa5ac53f6a3028cd0fbcacbc51a..965539a9dabe7e4702e1ba6b382aefc69fb26fac 100644 (file)
@@ -539,9 +539,9 @@ struct bnx2x;
 void bnx2x_memset_stats(struct bnx2x *bp);
 void bnx2x_stats_init(struct bnx2x *bp);
 void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
-void bnx2x_stats_safe_exec(struct bnx2x *bp,
-                          void (func_to_exec)(void *cookie),
-                          void *cookie);
+int bnx2x_stats_safe_exec(struct bnx2x *bp,
+                         void (func_to_exec)(void *cookie),
+                         void *cookie);
 
 /**
  * bnx2x_save_statistics - save statistics when unloading.
index f7855a61e7ad89a2398faa0bf25b5a068bac553e..6043734ea613bdae8d1a8c0abe7f14719e3a8cbd 100644 (file)
@@ -1734,6 +1734,9 @@ static int init_umac(struct bcmgenet_priv *priv)
        } else if (priv->ext_phy) {
                int0_enable |= UMAC_IRQ_LINK_EVENT;
        } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
+               if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+                       int0_enable |= UMAC_IRQ_LINK_EVENT;
+
                reg = bcmgenet_bp_mc_get(priv);
                reg |= BIT(priv->hw_params->bp_in_en_shift);
 
@@ -2926,7 +2929,8 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
                .rdma_offset = 0x10000,
                .tdma_offset = 0x11000,
                .words_per_bd = 2,
-               .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
+               .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
+                        GENET_HAS_MOCA_LINK_DET,
        },
        [GENET_V4] = {
                .tx_queues = 4,
@@ -2944,7 +2948,8 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
                .rdma_offset = 0x2000,
                .tdma_offset = 0x4000,
                .words_per_bd = 3,
-               .flags = GENET_HAS_40BITS | GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
+               .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+                        GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
        },
 };
 
index ddaa40cb0f21d16ac553ee1402cb13f75eaebb16..6f2887a5e0be693d625b6328349a2ad3b66d19ba 100644 (file)
@@ -508,6 +508,7 @@ enum bcmgenet_version {
 #define GENET_HAS_40BITS       (1 << 0)
 #define GENET_HAS_EXT          (1 << 1)
 #define GENET_HAS_MDIO_INTR    (1 << 2)
+#define GENET_HAS_MOCA_LINK_DET        (1 << 3)
 
 /* BCMGENET hardware parameters, keep this structure nicely aligned
  * since it is going to be used in hot paths
index 6d3b66a103cced846f6fc21b3ce288671a5a2e0d..e7651b3c6c5767f7609115ef0430c13aac8d17a9 100644 (file)
@@ -462,6 +462,15 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
        return 0;
 }
 
+static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
+                                         struct fixed_phy_status *status)
+{
+       if (dev && dev->phydev && status)
+               status->link = dev->phydev->link;
+
+       return 0;
+}
+
 static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
 {
        struct device *kdev = &priv->pdev->dev;
@@ -513,6 +522,13 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
                        dev_err(kdev, "failed to register fixed PHY device\n");
                        return -ENODEV;
                }
+
+               if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) {
+                       ret = fixed_phy_set_link_update(
+                               phydev, bcmgenet_fixed_phy_link_update);
+                       if (!ret)
+                               phydev->link = 0;
+               }
        }
 
        priv->phydev = phydev;
index 448a32309dd08c79c99bca7692fea10d429c1b41..9f5387249f242374437581e6c2df7c037917f83a 100644 (file)
@@ -1956,12 +1956,12 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
                            hwstat->rx_oversize_pkts +
                            hwstat->rx_jabbers +
                            hwstat->rx_undersize_pkts +
-                           hwstat->sqe_test_errors +
                            hwstat->rx_length_mismatch);
        nstat->tx_errors = (hwstat->tx_late_cols +
                            hwstat->tx_excessive_cols +
                            hwstat->tx_underruns +
-                           hwstat->tx_carrier_errors);
+                           hwstat->tx_carrier_errors +
+                           hwstat->sqe_test_errors);
        nstat->collisions = (hwstat->tx_single_cols +
                             hwstat->tx_multiple_cols +
                             hwstat->tx_excessive_cols);
index 186566bfdbc8c579ada4136f0d71a9242e9b7629..f5f1b0b51ebd225c4d82967391e5dedda2a7848f 100644 (file)
@@ -354,7 +354,7 @@ static void set_msglevel(struct net_device *dev, u32 val)
        adapter->msg_enable = val;
 }
 
-static char stats_strings[][ETH_GSTRING_LEN] = {
+static const char stats_strings[][ETH_GSTRING_LEN] = {
        "TxOctetsOK",
        "TxOctetsBad",
        "TxUnicastFramesOK",
index db76f70404551c84b924f560957ca740975eebf2..b96e4bfcac41a8086d5fbb45bf508bd5175072ee 100644 (file)
@@ -1537,7 +1537,7 @@ static void set_msglevel(struct net_device *dev, u32 val)
        adapter->msg_enable = val;
 }
 
-static char stats_strings[][ETH_GSTRING_LEN] = {
+static const char stats_strings[][ETH_GSTRING_LEN] = {
        "TxOctetsOK         ",
        "TxFramesOK         ",
        "TxMulticastFramesOK",
index d6aa602f168d776cd1450fb10c8fdeb6c0118ae3..e4b5b057f41786733ea9883de572702aeec78e68 100644 (file)
@@ -422,7 +422,7 @@ static inline int add_one_rx_buf(void *va, unsigned int len,
 
        d->addr_lo = cpu_to_be32(mapping);
        d->addr_hi = cpu_to_be32((u64) mapping >> 32);
-       wmb();
+       dma_wmb();
        d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
        d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
        return 0;
@@ -433,7 +433,7 @@ static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
 {
        d->addr_lo = cpu_to_be32(mapping);
        d->addr_hi = cpu_to_be32((u64) mapping >> 32);
-       wmb();
+       dma_wmb();
        d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
        d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
        return 0;
@@ -579,7 +579,7 @@ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
        q->sdesc[q->pidx] = q->sdesc[idx];
        to->addr_lo = from->addr_lo;    /* already big endian */
        to->addr_hi = from->addr_hi;    /* likewise */
-       wmb();
+       dma_wmb();
        to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
        to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
 
@@ -1068,7 +1068,7 @@ static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
                sd->eop = 1;
                wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
                                   V_WR_SGLSFLT(flits)) | wr_hi;
-               wmb();
+               dma_wmb();
                wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
                                   V_WR_GEN(gen)) | wr_lo;
                wr_gen2(d, gen);
@@ -1114,7 +1114,7 @@ static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
                }
                sd->eop = 1;
                wrp->wr_hi |= htonl(F_WR_EOP);
-               wmb();
+               dma_wmb();
                wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
                wr_gen2((struct tx_desc *)wp, ogen);
                WARN_ON(ndesc != 0);
@@ -1184,7 +1184,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
                        cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
                                              V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
                                              | F_WR_SOP | F_WR_EOP | compl);
-                       wmb();
+                       dma_wmb();
                        cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
                                              V_WR_TID(q->token));
                        wr_gen2(d, gen);
@@ -1342,7 +1342,7 @@ static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
 
        to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
                                        V_WR_BCNTLFLT(len & 7));
-       wmb();
+       dma_wmb();
        to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
                                        V_WR_LEN((len + 7) / 8));
        wr_gen2(d, gen);
@@ -2271,7 +2271,7 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
                u32 len, flags;
                __be32 rss_hi, rss_lo;
 
-               rmb();
+               dma_rmb();
                eth = r->rss_hdr.opcode == CPL_RX_PKT;
                rss_hi = *(const __be32 *)r;
                rss_lo = r->rss_hdr.rss_hash_val;
@@ -2488,7 +2488,7 @@ static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
                }
                if (!is_new_response(r, q))
                        break;
-               rmb();
+               dma_rmb();
        } while (is_pure_response(r));
 
        if (sleeping)
@@ -2523,7 +2523,7 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
 
        if (!is_new_response(r, q))
                return -1;
-       rmb();
+       dma_rmb();
        if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
                t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
                             V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
index 07d9b68a4da20146f14cbc1aa418fed1dcbdabb7..ace0ab98d0f1dbc9a7534242bc0c05d860df50ef 100644 (file)
@@ -4,7 +4,7 @@
 
 obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
 
-cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o
+cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o
 cxgb4-$(CONFIG_CHELSIO_T4_DCB) +=  cxgb4_dcb.o
 cxgb4-$(CONFIG_CHELSIO_T4_FCOE) +=  cxgb4_fcoe.o
 cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
index bf46ca935e2af8c08353870ca5ce0be4344eb1af..524d11098c566d178d132fe0b3a05be557036440 100644 (file)
@@ -59,6 +59,11 @@ enum {
        PN_LEN     = 16,    /* Part Number length */
 };
 
+enum {
+       T4_REGMAP_SIZE = (160 * 1024),
+       T5_REGMAP_SIZE = (332 * 1024),
+};
+
 enum {
        MEM_EDC0,
        MEM_EDC1,
@@ -373,11 +378,20 @@ enum {
        MAX_ISCSI_QUEUES = NCHAN,     /* # of streaming iSCSI Rx queues */
 };
 
+enum {
+       MAX_TXQ_ENTRIES      = 16384,
+       MAX_CTRL_TXQ_ENTRIES = 1024,
+       MAX_RSPQ_ENTRIES     = 16384,
+       MAX_RX_BUFFERS       = 16384,
+       MIN_TXQ_ENTRIES      = 32,
+       MIN_CTRL_TXQ_ENTRIES = 32,
+       MIN_RSPQ_ENTRIES     = 128,
+       MIN_FL_ENTRIES       = 16
+};
+
 enum {
        INGQ_EXTRAS = 2,        /* firmware event queue and */
                                /*   forwarded interrupts */
-       MAX_EGRQ = MAX_ETH_QSETS*2 + MAX_OFLD_QSETS*2
-                  + MAX_CTRL_QUEUES + MAX_RDMA_QUEUES + MAX_ISCSI_QUEUES,
        MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES
                   + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS,
 };
@@ -623,11 +637,13 @@ struct sge {
        unsigned int idma_qid[2];   /* SGE IDMA Hung Ingress Queue ID */
 
        unsigned int egr_start;
+       unsigned int egr_sz;
        unsigned int ingr_start;
-       void *egr_map[MAX_EGRQ];    /* qid->queue egress queue map */
-       struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */
-       DECLARE_BITMAP(starving_fl, MAX_EGRQ);
-       DECLARE_BITMAP(txq_maperr, MAX_EGRQ);
+       unsigned int ingr_sz;
+       void **egr_map;    /* qid->queue egress queue map */
+       struct sge_rspq **ingr_map; /* qid->queue ingress queue map */
+       unsigned long *starving_fl;
+       unsigned long *txq_maperr;
        struct timer_list rx_timer; /* refills starving FLs */
        struct timer_list tx_timer; /* checks Tx queues */
 };
@@ -1000,6 +1016,30 @@ static inline bool cxgb_poll_busy_polling(struct sge_rspq *q)
 }
 #endif /* CONFIG_NET_RX_BUSY_POLL */
 
+/* Return a version number to identify the type of adapter.  The scheme is:
+ * - bits 0..9: chip version
+ * - bits 10..15: chip revision
+ * - bits 16..23: register dump version
+ */
+static inline unsigned int mk_adap_vers(struct adapter *ap)
+{
+       return CHELSIO_CHIP_VERSION(ap->params.chip) |
+               (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
+}
+
+/* Return a queue's interrupt hold-off time in us.  0 means no timer. */
+static inline unsigned int qtimer_val(const struct adapter *adap,
+                                     const struct sge_rspq *q)
+{
+       unsigned int idx = q->intr_params >> 1;
+
+       return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
+}
+
+/* driver version & name used for ethtool_drvinfo */
+extern char cxgb4_driver_name[];
+extern const char cxgb4_driver_version[];
+
 void t4_os_portmod_changed(const struct adapter *adap, int port_id);
 void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
 
@@ -1029,6 +1069,10 @@ int t4_sge_init(struct adapter *adap);
 void t4_sge_start(struct adapter *adap);
 void t4_sge_stop(struct adapter *adap);
 int cxgb_busy_poll(struct napi_struct *napi);
+int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
+                              unsigned int cnt);
+void cxgb4_set_ethtool_ops(struct net_device *netdev);
+int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
 extern int dbfifo_int_thresh;
 
 #define for_each_port(adapter, iter) \
@@ -1117,6 +1161,9 @@ static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
        return t4_memory_rw(adap, 0, mtype, addr, len, buf, 0);
 }
 
+unsigned int t4_get_regs_len(struct adapter *adapter);
+void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size);
+
 int t4_seeprom_wp(struct adapter *adapter, bool enable);
 int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
 int t4_read_flash(struct adapter *adapter, unsigned int addr,
@@ -1143,6 +1190,8 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
 
 unsigned int qtimer_val(const struct adapter *adap,
                        const struct sge_rspq *q);
+
+int t4_init_devlog_params(struct adapter *adapter);
 int t4_init_sge_params(struct adapter *adapter);
 int t4_init_tp_params(struct adapter *adap);
 int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
index 0918c16bb1548e76f97d450fefaefaa2dfb8fd23..f0285bcbe5981e1202071d536080d774566cb20e 100644 (file)
@@ -670,9 +670,13 @@ static int cctrl_tbl_show(struct seq_file *seq, void *v)
                "0.9375" };
 
        int i;
-       u16 incr[NMTUS][NCCTRL_WIN];
+       u16 (*incr)[NCCTRL_WIN];
        struct adapter *adap = seq->private;
 
+       incr = kmalloc(sizeof(*incr) * NMTUS, GFP_KERNEL);
+       if (!incr)
+               return -ENOMEM;
+
        t4_read_cong_tbl(adap, incr);
 
        for (i = 0; i < NCCTRL_WIN; ++i) {
@@ -685,6 +689,8 @@ static int cctrl_tbl_show(struct seq_file *seq, void *v)
                           adap->params.a_wnd[i],
                           dec_fac[adap->params.b_wnd[i]]);
        }
+
+       kfree(incr);
        return 0;
 }
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
new file mode 100644 (file)
index 0000000..10d82b5
--- /dev/null
@@ -0,0 +1,915 @@
+/*
+ *  Copyright (C) 2013-2015 Chelsio Communications.  All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms and conditions of the GNU General Public License,
+ *  version 2, as published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ *  more details.
+ *
+ *  The full GNU General Public License is included in this distribution in
+ *  the file called "COPYING".
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/mdio.h>
+
+#include "cxgb4.h"
+#include "t4_regs.h"
+#include "t4fw_api.h"
+
+#define EEPROM_MAGIC 0x38E2F10C
+
+static u32 get_msglevel(struct net_device *dev)
+{
+       return netdev2adap(dev)->msg_enable;
+}
+
+static void set_msglevel(struct net_device *dev, u32 val)
+{
+       netdev2adap(dev)->msg_enable = val;
+}
+
+static const char stats_strings[][ETH_GSTRING_LEN] = {
+       "TxOctetsOK         ",
+       "TxFramesOK         ",
+       "TxBroadcastFrames  ",
+       "TxMulticastFrames  ",
+       "TxUnicastFrames    ",
+       "TxErrorFrames      ",
+
+       "TxFrames64         ",
+       "TxFrames65To127    ",
+       "TxFrames128To255   ",
+       "TxFrames256To511   ",
+       "TxFrames512To1023  ",
+       "TxFrames1024To1518 ",
+       "TxFrames1519ToMax  ",
+
+       "TxFramesDropped    ",
+       "TxPauseFrames      ",
+       "TxPPP0Frames       ",
+       "TxPPP1Frames       ",
+       "TxPPP2Frames       ",
+       "TxPPP3Frames       ",
+       "TxPPP4Frames       ",
+       "TxPPP5Frames       ",
+       "TxPPP6Frames       ",
+       "TxPPP7Frames       ",
+
+       "RxOctetsOK         ",
+       "RxFramesOK         ",
+       "RxBroadcastFrames  ",
+       "RxMulticastFrames  ",
+       "RxUnicastFrames    ",
+
+       "RxFramesTooLong    ",
+       "RxJabberErrors     ",
+       "RxFCSErrors        ",
+       "RxLengthErrors     ",
+       "RxSymbolErrors     ",
+       "RxRuntFrames       ",
+
+       "RxFrames64         ",
+       "RxFrames65To127    ",
+       "RxFrames128To255   ",
+       "RxFrames256To511   ",
+       "RxFrames512To1023  ",
+       "RxFrames1024To1518 ",
+       "RxFrames1519ToMax  ",
+
+       "RxPauseFrames      ",
+       "RxPPP0Frames       ",
+       "RxPPP1Frames       ",
+       "RxPPP2Frames       ",
+       "RxPPP3Frames       ",
+       "RxPPP4Frames       ",
+       "RxPPP5Frames       ",
+       "RxPPP6Frames       ",
+       "RxPPP7Frames       ",
+
+       "RxBG0FramesDropped ",
+       "RxBG1FramesDropped ",
+       "RxBG2FramesDropped ",
+       "RxBG3FramesDropped ",
+       "RxBG0FramesTrunc   ",
+       "RxBG1FramesTrunc   ",
+       "RxBG2FramesTrunc   ",
+       "RxBG3FramesTrunc   ",
+
+       "TSO                ",
+       "TxCsumOffload      ",
+       "RxCsumGood         ",
+       "VLANextractions    ",
+       "VLANinsertions     ",
+       "GROpackets         ",
+       "GROmerged          ",
+       "WriteCoalSuccess   ",
+       "WriteCoalFail      ",
+};
+
+static int get_sset_count(struct net_device *dev, int sset)
+{
+       switch (sset) {
+       case ETH_SS_STATS:
+               return ARRAY_SIZE(stats_strings);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int get_regs_len(struct net_device *dev)
+{
+       struct adapter *adap = netdev2adap(dev);
+
+       return t4_get_regs_len(adap);
+}
+
+static int get_eeprom_len(struct net_device *dev)
+{
+       return EEPROMSIZE;
+}
+
+static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+       struct adapter *adapter = netdev2adap(dev);
+       u32 exprom_vers;
+
+       strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
+       strlcpy(info->version, cxgb4_driver_version,
+               sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(adapter->pdev),
+               sizeof(info->bus_info));
+
+       if (adapter->params.fw_vers)
+               snprintf(info->fw_version, sizeof(info->fw_version),
+                        "%u.%u.%u.%u, TP %u.%u.%u.%u",
+                        FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
+                        FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
+                        FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
+                        FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers),
+                        FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
+                        FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
+                        FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
+                        FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
+
+       if (!t4_get_exprom_version(adapter, &exprom_vers))
+               snprintf(info->erom_version, sizeof(info->erom_version),
+                        "%u.%u.%u.%u",
+                        FW_HDR_FW_VER_MAJOR_G(exprom_vers),
+                        FW_HDR_FW_VER_MINOR_G(exprom_vers),
+                        FW_HDR_FW_VER_MICRO_G(exprom_vers),
+                        FW_HDR_FW_VER_BUILD_G(exprom_vers));
+}
+
+static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+       if (stringset == ETH_SS_STATS)
+               memcpy(data, stats_strings, sizeof(stats_strings));
+}
+
+/* port stats maintained per queue of the port. They should be in the same
+ * order as in stats_strings above.
+ */
+struct queue_port_stats {
+       u64 tso;
+       u64 tx_csum;
+       u64 rx_csum;
+       u64 vlan_ex;
+       u64 vlan_ins;
+       u64 gro_pkts;
+       u64 gro_merged;
+};
+
+static void collect_sge_port_stats(const struct adapter *adap,
+                                  const struct port_info *p,
+                                  struct queue_port_stats *s)
+{
+       int i;
+       const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
+       const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
+
+       memset(s, 0, sizeof(*s));
+       for (i = 0; i < p->nqsets; i++, rx++, tx++) {
+               s->tso += tx->tso;
+               s->tx_csum += tx->tx_cso;
+               s->rx_csum += rx->stats.rx_cso;
+               s->vlan_ex += rx->stats.vlan_ex;
+               s->vlan_ins += tx->vlan_ins;
+               s->gro_pkts += rx->stats.lro_pkts;
+               s->gro_merged += rx->stats.lro_merged;
+       }
+}
+
+static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
+                     u64 *data)
+{
+       struct port_info *pi = netdev_priv(dev);
+       struct adapter *adapter = pi->adapter;
+       u32 val1, val2;
+
+       t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
+
+       data += sizeof(struct port_stats) / sizeof(u64);
+       collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
+       data += sizeof(struct queue_port_stats) / sizeof(u64);
+       if (!is_t4(adapter->params.chip)) {
+               t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7));
+               val1 = t4_read_reg(adapter, SGE_STAT_TOTAL_A);
+               val2 = t4_read_reg(adapter, SGE_STAT_MATCH_A);
+               *data = val1 - val2;
+               data++;
+               *data = val2;
+               data++;
+       } else {
+               memset(data, 0, 2 * sizeof(u64));
+               *data += 2;
+       }
+}
+
+static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
+                    void *buf)
+{
+       struct adapter *adap = netdev2adap(dev);
+       size_t buf_size;
+
+       buf_size = t4_get_regs_len(adap);
+       regs->version = mk_adap_vers(adap);
+       t4_get_regs(adap, buf, buf_size);
+}
+
+static int restart_autoneg(struct net_device *dev)
+{
+       struct port_info *p = netdev_priv(dev);
+
+       if (!netif_running(dev))
+               return -EAGAIN;
+       if (p->link_cfg.autoneg != AUTONEG_ENABLE)
+               return -EINVAL;
+       t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
+       return 0;
+}
+
+static int identify_port(struct net_device *dev,
+                        enum ethtool_phys_id_state state)
+{
+       unsigned int val;
+       struct adapter *adap = netdev2adap(dev);
+
+       if (state == ETHTOOL_ID_ACTIVE)
+               val = 0xffff;
+       else if (state == ETHTOOL_ID_INACTIVE)
+               val = 0;
+       else
+               return -EINVAL;
+
+       return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
+}
+
+static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps)
+{
+       unsigned int v = 0;
+
+       if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
+           type == FW_PORT_TYPE_BT_XAUI) {
+               v |= SUPPORTED_TP;
+               if (caps & FW_PORT_CAP_SPEED_100M)
+                       v |= SUPPORTED_100baseT_Full;
+               if (caps & FW_PORT_CAP_SPEED_1G)
+                       v |= SUPPORTED_1000baseT_Full;
+               if (caps & FW_PORT_CAP_SPEED_10G)
+                       v |= SUPPORTED_10000baseT_Full;
+       } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
+               v |= SUPPORTED_Backplane;
+               if (caps & FW_PORT_CAP_SPEED_1G)
+                       v |= SUPPORTED_1000baseKX_Full;
+               if (caps & FW_PORT_CAP_SPEED_10G)
+                       v |= SUPPORTED_10000baseKX4_Full;
+       } else if (type == FW_PORT_TYPE_KR) {
+               v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
+       } else if (type == FW_PORT_TYPE_BP_AP) {
+               v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
+                    SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
+       } else if (type == FW_PORT_TYPE_BP4_AP) {
+               v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
+                    SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
+                    SUPPORTED_10000baseKX4_Full;
+       } else if (type == FW_PORT_TYPE_FIBER_XFI ||
+                  type == FW_PORT_TYPE_FIBER_XAUI ||
+                  type == FW_PORT_TYPE_SFP ||
+                  type == FW_PORT_TYPE_QSFP_10G ||
+                  type == FW_PORT_TYPE_QSA) {
+               v |= SUPPORTED_FIBRE;
+               if (caps & FW_PORT_CAP_SPEED_1G)
+                       v |= SUPPORTED_1000baseT_Full;
+               if (caps & FW_PORT_CAP_SPEED_10G)
+                       v |= SUPPORTED_10000baseT_Full;
+       } else if (type == FW_PORT_TYPE_BP40_BA ||
+                  type == FW_PORT_TYPE_QSFP) {
+               v |= SUPPORTED_40000baseSR4_Full;
+               v |= SUPPORTED_FIBRE;
+       }
+
+       if (caps & FW_PORT_CAP_ANEG)
+               v |= SUPPORTED_Autoneg;
+       return v;
+}
+
+static unsigned int to_fw_linkcaps(unsigned int caps)
+{
+       unsigned int v = 0;
+
+       if (caps & ADVERTISED_100baseT_Full)
+               v |= FW_PORT_CAP_SPEED_100M;
+       if (caps & ADVERTISED_1000baseT_Full)
+               v |= FW_PORT_CAP_SPEED_1G;
+       if (caps & ADVERTISED_10000baseT_Full)
+               v |= FW_PORT_CAP_SPEED_10G;
+       if (caps & ADVERTISED_40000baseSR4_Full)
+               v |= FW_PORT_CAP_SPEED_40G;
+       return v;
+}
+
+static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       const struct port_info *p = netdev_priv(dev);
+
+       if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
+           p->port_type == FW_PORT_TYPE_BT_XFI ||
+           p->port_type == FW_PORT_TYPE_BT_XAUI) {
+               cmd->port = PORT_TP;
+       } else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
+                  p->port_type == FW_PORT_TYPE_FIBER_XAUI) {
+               cmd->port = PORT_FIBRE;
+       } else if (p->port_type == FW_PORT_TYPE_SFP ||
+                  p->port_type == FW_PORT_TYPE_QSFP_10G ||
+                  p->port_type == FW_PORT_TYPE_QSA ||
+                  p->port_type == FW_PORT_TYPE_QSFP) {
+               if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
+                   p->mod_type == FW_PORT_MOD_TYPE_SR ||
+                   p->mod_type == FW_PORT_MOD_TYPE_ER ||
+                   p->mod_type == FW_PORT_MOD_TYPE_LRM)
+                       cmd->port = PORT_FIBRE;
+               else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
+                        p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
+                       cmd->port = PORT_DA;
+               else
+                       cmd->port = PORT_OTHER;
+       } else {
+               cmd->port = PORT_OTHER;
+       }
+
+       if (p->mdio_addr >= 0) {
+               cmd->phy_address = p->mdio_addr;
+               cmd->transceiver = XCVR_EXTERNAL;
+               cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
+                       MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
+       } else {
+               cmd->phy_address = 0;  /* not really, but no better option */
+               cmd->transceiver = XCVR_INTERNAL;
+               cmd->mdio_support = 0;
+       }
+
+       cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
+       cmd->advertising = from_fw_linkcaps(p->port_type,
+                                           p->link_cfg.advertising);
+       ethtool_cmd_speed_set(cmd,
+                             netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
+       cmd->duplex = DUPLEX_FULL;
+       cmd->autoneg = p->link_cfg.autoneg;
+       cmd->maxtxpkt = 0;
+       cmd->maxrxpkt = 0;
+       return 0;
+}
+
+static unsigned int speed_to_caps(int speed)
+{
+       if (speed == 100)
+               return FW_PORT_CAP_SPEED_100M;
+       if (speed == 1000)
+               return FW_PORT_CAP_SPEED_1G;
+       if (speed == 10000)
+               return FW_PORT_CAP_SPEED_10G;
+       if (speed == 40000)
+               return FW_PORT_CAP_SPEED_40G;
+       return 0;
+}
+
+static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       unsigned int cap;
+       struct port_info *p = netdev_priv(dev);
+       struct link_config *lc = &p->link_cfg;
+       u32 speed = ethtool_cmd_speed(cmd);
+
+       if (cmd->duplex != DUPLEX_FULL)     /* only full-duplex supported */
+               return -EINVAL;
+
+       if (!(lc->supported & FW_PORT_CAP_ANEG)) {
+               /* PHY offers a single speed.  See if that's what's
+                * being requested.
+                */
+               if (cmd->autoneg == AUTONEG_DISABLE &&
+                   (lc->supported & speed_to_caps(speed)))
+                       return 0;
+               return -EINVAL;
+       }
+
+       if (cmd->autoneg == AUTONEG_DISABLE) {
+               cap = speed_to_caps(speed);
+
+               if (!(lc->supported & cap) ||
+                   (speed == 1000) ||
+                   (speed == 10000) ||
+                   (speed == 40000))
+                       return -EINVAL;
+               lc->requested_speed = cap;
+               lc->advertising = 0;
+       } else {
+               cap = to_fw_linkcaps(cmd->advertising);
+               if (!(lc->supported & cap))
+                       return -EINVAL;
+               lc->requested_speed = 0;
+               lc->advertising = cap | FW_PORT_CAP_ANEG;
+       }
+       lc->autoneg = cmd->autoneg;
+
+       if (netif_running(dev))
+               return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
+                                    lc);
+       return 0;
+}
+
+static void get_pauseparam(struct net_device *dev,
+                          struct ethtool_pauseparam *epause)
+{
+       struct port_info *p = netdev_priv(dev);
+
+       epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
+       epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
+       epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
+}
+
+static int set_pauseparam(struct net_device *dev,
+                         struct ethtool_pauseparam *epause)
+{
+       struct port_info *p = netdev_priv(dev);
+       struct link_config *lc = &p->link_cfg;
+
+       if (epause->autoneg == AUTONEG_DISABLE)
+               lc->requested_fc = 0;
+       else if (lc->supported & FW_PORT_CAP_ANEG)
+               lc->requested_fc = PAUSE_AUTONEG;
+       else
+               return -EINVAL;
+
+       if (epause->rx_pause)
+               lc->requested_fc |= PAUSE_RX;
+       if (epause->tx_pause)
+               lc->requested_fc |= PAUSE_TX;
+       if (netif_running(dev))
+               return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
+                                    lc);
+       return 0;
+}
+
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+{
+       const struct port_info *pi = netdev_priv(dev);
+       const struct sge *s = &pi->adapter->sge;
+
+       e->rx_max_pending = MAX_RX_BUFFERS;
+       e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
+       e->rx_jumbo_max_pending = 0;
+       e->tx_max_pending = MAX_TXQ_ENTRIES;
+
+       e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
+       e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
+       e->rx_jumbo_pending = 0;
+       e->tx_pending = s->ethtxq[pi->first_qset].q.size;
+}
+
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+{
+       int i;
+       const struct port_info *pi = netdev_priv(dev);
+       struct adapter *adapter = pi->adapter;
+       struct sge *s = &adapter->sge;
+
+       if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
+           e->tx_pending > MAX_TXQ_ENTRIES ||
+           e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
+           e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
+           e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
+               return -EINVAL;
+
+       if (adapter->flags & FULL_INIT_DONE)
+               return -EBUSY;
+
+       for (i = 0; i < pi->nqsets; ++i) {
+               s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
+               s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
+               s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
+       }
+       return 0;
+}
+
+/**
+ * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
+ * @dev: the network device
+ * @us: the hold-off time in us, or 0 to disable timer
+ * @cnt: the hold-off packet count, or 0 to disable counter
+ *
+ * Set the RX interrupt hold-off parameters for a network device.
+ */
+static int set_rx_intr_params(struct net_device *dev,
+                             unsigned int us, unsigned int cnt)
+{
+       int i, err;
+       struct port_info *pi = netdev_priv(dev);
+       struct adapter *adap = pi->adapter;
+       struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
+
+       for (i = 0; i < pi->nqsets; i++, q++) {
+               err = cxgb4_set_rspq_intr_params(&q->rspq, us, cnt);
+               if (err)
+                       return err;
+       }
+       return 0;
+}
+
+static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx)
+{
+       int i;
+       struct port_info *pi = netdev_priv(dev);
+       struct adapter *adap = pi->adapter;
+       struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
+
+       for (i = 0; i < pi->nqsets; i++, q++)
+               q->rspq.adaptive_rx = adaptive_rx;
+
+       return 0;
+}
+
+static int get_adaptive_rx_setting(struct net_device *dev)
+{
+       struct port_info *pi = netdev_priv(dev);
+       struct adapter *adap = pi->adapter;
+       struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
+
+       return q->rspq.adaptive_rx;
+}
+
+static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+       set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce);
+       return set_rx_intr_params(dev, c->rx_coalesce_usecs,
+                                 c->rx_max_coalesced_frames);
+}
+
+static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+       const struct port_info *pi = netdev_priv(dev);
+       const struct adapter *adap = pi->adapter;
+       const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
+
+       c->rx_coalesce_usecs = qtimer_val(adap, rq);
+       c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
+               adap->sge.counter_val[rq->pktcnt_idx] : 0;
+       c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
+       return 0;
+}
+
+/**
+ *     eeprom_ptov - translate a physical EEPROM address to virtual
+ *     @phys_addr: the physical EEPROM address
+ *     @fn: the PCI function number
+ *     @sz: size of function-specific area
+ *
+ *     Translate a physical EEPROM address to virtual.  The first 1K is
+ *     accessed through virtual addresses starting at 31K, the rest is
+ *     accessed through virtual addresses starting at 0.
+ *
+ *     The mapping is as follows:
+ *     [0..1K) -> [31K..32K)
+ *     [1K..1K+A) -> [31K-A..31K)
+ *     [1K+A..ES) -> [0..ES-A-1K)
+ *
+ *     where A = @fn * @sz, and ES = EEPROM size.
+ */
+static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
+{
+       fn *= sz;
+       if (phys_addr < 1024)
+               return phys_addr + (31 << 10);
+       if (phys_addr < 1024 + fn)
+               return 31744 - fn + phys_addr - 1024;
+       if (phys_addr < EEPROMSIZE)
+               return phys_addr - 1024 - fn;
+       return -EINVAL;
+}
+
+/* The next two routines implement eeprom read/write from physical addresses.
+ */
+static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
+{
+       int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
+
+       if (vaddr >= 0)
+               vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
+       return vaddr < 0 ? vaddr : 0;
+}
+
+static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
+{
+       int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
+
+       if (vaddr >= 0)
+               vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
+       return vaddr < 0 ? vaddr : 0;
+}
+
+#define EEPROM_MAGIC 0x38E2F10C
+
+static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
+                     u8 *data)
+{
+       int i, err = 0;
+       struct adapter *adapter = netdev2adap(dev);
+       u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
+
+       if (!buf)
+               return -ENOMEM;
+
+       e->magic = EEPROM_MAGIC;
+       for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
+               err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
+
+       if (!err)
+               memcpy(data, buf + e->offset, e->len);
+       kfree(buf);
+       return err;
+}
+
+static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+                     u8 *data)
+{
+       u8 *buf;
+       int err = 0;
+       u32 aligned_offset, aligned_len, *p;
+       struct adapter *adapter = netdev2adap(dev);
+
+       if (eeprom->magic != EEPROM_MAGIC)
+               return -EINVAL;
+
+       aligned_offset = eeprom->offset & ~3;
+       aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
+
+       if (adapter->fn > 0) {
+               u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
+
+               if (aligned_offset < start ||
+                   aligned_offset + aligned_len > start + EEPROMPFSIZE)
+                       return -EPERM;
+       }
+
+       if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
+               /* RMW possibly needed for first or last words.
+                */
+               buf = kmalloc(aligned_len, GFP_KERNEL);
+               if (!buf)
+                       return -ENOMEM;
+               err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
+               if (!err && aligned_len > 4)
+                       err = eeprom_rd_phys(adapter,
+                                            aligned_offset + aligned_len - 4,
+                                            (u32 *)&buf[aligned_len - 4]);
+               if (err)
+                       goto out;
+               memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
+       } else {
+               buf = data;
+       }
+
+       err = t4_seeprom_wp(adapter, false);
+       if (err)
+               goto out;
+
+       for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
+               err = eeprom_wr_phys(adapter, aligned_offset, *p);
+               aligned_offset += 4;
+       }
+
+       if (!err)
+               err = t4_seeprom_wp(adapter, true);
+out:
+       if (buf != data)
+               kfree(buf);
+       return err;
+}
+
+static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
+{
+       int ret;
+       const struct firmware *fw;
+       struct adapter *adap = netdev2adap(netdev);
+       unsigned int mbox = PCIE_FW_MASTER_M + 1;
+
+       ef->data[sizeof(ef->data) - 1] = '\0';
+       ret = request_firmware(&fw, ef->data, adap->pdev_dev);
+       if (ret < 0)
+               return ret;
+
+       /* If the adapter has been fully initialized then we'll go ahead and
+        * try to get the firmware's cooperation in upgrading to the new
+        * firmware image otherwise we'll try to do the entire job from the
+        * host ... and we always "force" the operation in this path.
+        */
+       if (adap->flags & FULL_INIT_DONE)
+               mbox = adap->mbox;
+
+       ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
+       release_firmware(fw);
+       if (!ret)
+               dev_info(adap->pdev_dev,
+                        "loaded firmware %s, reload cxgb4 driver\n", ef->data);
+       return ret;
+}
+
+#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
+#define BCAST_CRC 0xa0ccc1a6
+
+static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+       wol->supported = WAKE_BCAST | WAKE_MAGIC;
+       wol->wolopts = netdev2adap(dev)->wol;
+       memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+       int err = 0;
+       struct port_info *pi = netdev_priv(dev);
+
+       if (wol->wolopts & ~WOL_SUPPORTED)
+               return -EINVAL;
+       t4_wol_magic_enable(pi->adapter, pi->tx_chan,
+                           (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
+       if (wol->wolopts & WAKE_BCAST) {
+               err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
+                                       ~0ULL, 0, false);
+               if (!err)
+                       err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
+                                               ~6ULL, ~0ULL, BCAST_CRC, true);
+       } else {
+               t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
+       }
+       return err;
+}
+
+static u32 get_rss_table_size(struct net_device *dev)
+{
+       const struct port_info *pi = netdev_priv(dev);
+
+       return pi->rss_size;
+}
+
+static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc)
+{
+       const struct port_info *pi = netdev_priv(dev);
+       unsigned int n = pi->rss_size;
+
+       if (hfunc)
+               *hfunc = ETH_RSS_HASH_TOP;
+       if (!p)
+               return 0;
+       while (n--)
+               p[n] = pi->rss[n];
+       return 0;
+}
+
+static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
+                        const u8 hfunc)
+{
+       unsigned int i;
+       struct port_info *pi = netdev_priv(dev);
+
+       /* We require at least one supported parameter to be changed and no
+        * change in any of the unsupported parameters
+        */
+       if (key ||
+           (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+               return -EOPNOTSUPP;
+       if (!p)
+               return 0;
+
+       for (i = 0; i < pi->rss_size; i++)
+               pi->rss[i] = p[i];
+       if (pi->adapter->flags & FULL_INIT_DONE)
+               return cxgb4_write_rss(pi, pi->rss);
+       return 0;
+}
+
+static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+                    u32 *rules)
+{
+       const struct port_info *pi = netdev_priv(dev);
+
+       switch (info->cmd) {
+       case ETHTOOL_GRXFH: {
+               unsigned int v = pi->rss_mode;
+
+               info->data = 0;
+               switch (info->flow_type) {
+               case TCP_V4_FLOW:
+                       if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
+                               info->data = RXH_IP_SRC | RXH_IP_DST |
+                                            RXH_L4_B_0_1 | RXH_L4_B_2_3;
+                       else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
+                               info->data = RXH_IP_SRC | RXH_IP_DST;
+                       break;
+               case UDP_V4_FLOW:
+                       if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
+                           (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
+                               info->data = RXH_IP_SRC | RXH_IP_DST |
+                                            RXH_L4_B_0_1 | RXH_L4_B_2_3;
+                       else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
+                               info->data = RXH_IP_SRC | RXH_IP_DST;
+                       break;
+               case SCTP_V4_FLOW:
+               case AH_ESP_V4_FLOW:
+               case IPV4_FLOW:
+                       if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
+                               info->data = RXH_IP_SRC | RXH_IP_DST;
+                       break;
+               case TCP_V6_FLOW:
+                       if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
+                               info->data = RXH_IP_SRC | RXH_IP_DST |
+                                            RXH_L4_B_0_1 | RXH_L4_B_2_3;
+                       else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
+                               info->data = RXH_IP_SRC | RXH_IP_DST;
+                       break;
+               case UDP_V6_FLOW:
+                       if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
+                           (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
+                               info->data = RXH_IP_SRC | RXH_IP_DST |
+                                            RXH_L4_B_0_1 | RXH_L4_B_2_3;
+                       else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
+                               info->data = RXH_IP_SRC | RXH_IP_DST;
+                       break;
+               case SCTP_V6_FLOW:
+               case AH_ESP_V6_FLOW:
+               case IPV6_FLOW:
+                       if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
+                               info->data = RXH_IP_SRC | RXH_IP_DST;
+                       break;
+               }
+               return 0;
+       }
+       case ETHTOOL_GRXRINGS:
+               info->data = pi->nqsets;
+               return 0;
+       }
+       return -EOPNOTSUPP;
+}
+
+static const struct ethtool_ops cxgb_ethtool_ops = {
+       .get_settings      = get_settings,
+       .set_settings      = set_settings,
+       .get_drvinfo       = get_drvinfo,
+       .get_msglevel      = get_msglevel,
+       .set_msglevel      = set_msglevel,
+       .get_ringparam     = get_sge_param,
+       .set_ringparam     = set_sge_param,
+       .get_coalesce      = get_coalesce,
+       .set_coalesce      = set_coalesce,
+       .get_eeprom_len    = get_eeprom_len,
+       .get_eeprom        = get_eeprom,
+       .set_eeprom        = set_eeprom,
+       .get_pauseparam    = get_pauseparam,
+       .set_pauseparam    = set_pauseparam,
+       .get_link          = ethtool_op_get_link,
+       .get_strings       = get_strings,
+       .set_phys_id       = identify_port,
+       .nway_reset        = restart_autoneg,
+       .get_sset_count    = get_sset_count,
+       .get_ethtool_stats = get_stats,
+       .get_regs_len      = get_regs_len,
+       .get_regs          = get_regs,
+       .get_wol           = get_wol,
+       .set_wol           = set_wol,
+       .get_rxnfc         = get_rxnfc,
+       .get_rxfh_indir_size = get_rss_table_size,
+       .get_rxfh          = get_rss_table,
+       .set_rxfh          = set_rss_table,
+       .flash_device      = set_flash,
+};
+
+void cxgb4_set_ethtool_ops(struct net_device *netdev)
+{
+       netdev->ethtool_ops = &cxgb_ethtool_ops;
+}
index e40e283ff36c7f2ea9c2c4334d3a287948fc775a..24e10ea3d5efa64c5280348acd2eefa0f04205cc 100644 (file)
 #include "clip_tbl.h"
 #include "l2t.h"
 
+char cxgb4_driver_name[] = KBUILD_MODNAME;
+
 #ifdef DRV_VERSION
 #undef DRV_VERSION
 #endif
 #define DRV_VERSION "2.0.0-ko"
+const char cxgb4_driver_version[] = DRV_VERSION;
 #define DRV_DESC "Chelsio T4/T5 Network Driver"
 
-enum {
-       MAX_TXQ_ENTRIES      = 16384,
-       MAX_CTRL_TXQ_ENTRIES = 1024,
-       MAX_RSPQ_ENTRIES     = 16384,
-       MAX_RX_BUFFERS       = 16384,
-       MIN_TXQ_ENTRIES      = 32,
-       MIN_CTRL_TXQ_ENTRIES = 32,
-       MIN_RSPQ_ENTRIES     = 128,
-       MIN_FL_ENTRIES       = 16
-};
-
 /* Host shadow copy of ingress filter entry.  This is in host native format
  * and doesn't match the ordering or bit order, etc. of the hardware of the
  * firmware command.  The use of bit-field structure elements is purely to
@@ -857,14 +849,14 @@ static void free_msix_queue_irqs(struct adapter *adap)
 }
 
 /**
- *     write_rss - write the RSS table for a given port
+ *     cxgb4_write_rss - write the RSS table for a given port
  *     @pi: the port
  *     @queues: array of queue indices for RSS
  *
  *     Sets up the portion of the HW RSS table for the port's VI to distribute
  *     packets to the Rx queues in @queues.
  */
-static int write_rss(const struct port_info *pi, const u16 *queues)
+int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
 {
        u16 *rss;
        int i, err;
@@ -897,7 +889,7 @@ static int setup_rss(struct adapter *adap)
        for_each_port(adap, i) {
                const struct port_info *pi = adap2pinfo(adap, i);
 
-               err = write_rss(pi, pi->rss);
+               err = cxgb4_write_rss(pi, pi->rss);
                if (err)
                        return err;
        }
@@ -920,7 +912,7 @@ static void quiesce_rx(struct adapter *adap)
 {
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
+       for (i = 0; i < adap->sge.ingr_sz; i++) {
                struct sge_rspq *q = adap->sge.ingr_map[i];
 
                if (q && q->handler) {
@@ -934,6 +926,21 @@ static void quiesce_rx(struct adapter *adap)
        }
 }
 
+/* Disable interrupt and napi handler */
+static void disable_interrupts(struct adapter *adap)
+{
+       if (adap->flags & FULL_INIT_DONE) {
+               t4_intr_disable(adap);
+               if (adap->flags & USING_MSIX) {
+                       free_msix_queue_irqs(adap);
+                       free_irq(adap->msix_info[0].vec, adap);
+               } else {
+                       free_irq(adap->pdev->irq, adap);
+               }
+               quiesce_rx(adap);
+       }
+}
+
 /*
  * Enable NAPI scheduling and interrupt generation for all Rx queues.
  */
@@ -941,7 +948,7 @@ static void enable_rx(struct adapter *adap)
 {
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
+       for (i = 0; i < adap->sge.ingr_sz; i++) {
                struct sge_rspq *q = adap->sge.ingr_map[i];
 
                if (!q)
@@ -992,8 +999,8 @@ static int setup_sge_queues(struct adapter *adap)
        int err, msi_idx, i, j;
        struct sge *s = &adap->sge;
 
-       bitmap_zero(s->starving_fl, MAX_EGRQ);
-       bitmap_zero(s->txq_maperr, MAX_EGRQ);
+       bitmap_zero(s->starving_fl, s->egr_sz);
+       bitmap_zero(s->txq_maperr, s->egr_sz);
 
        if (adap->flags & USING_MSIX)
                msi_idx = 1;         /* vector 0 is for non-queue interrupts */
@@ -1005,6 +1012,19 @@ static int setup_sge_queues(struct adapter *adap)
                msi_idx = -((int)s->intrq.abs_id + 1);
        }
 
+       /* NOTE: If you add/delete any Ingress/Egress Queue allocations in here,
+        * don't forget to update the following which need to be
+        * synchronized to and changes here.
+        *
+        * 1. The calculations of MAX_INGQ in cxgb4.h.
+        *
+        * 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs
+        *    to accommodate any new/deleted Ingress Queues
+        *    which need MSI-X Vectors.
+        *
+        * 3. Update sge_qinfo_show() to include information on the
+        *    new/deleted queues.
+        */
        err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
                               msi_idx, NULL, fwevtq_handler);
        if (err) {
@@ -1299,1192 +1319,6 @@ static inline int is_offload(const struct adapter *adap)
        return adap->params.offload;
 }
 
-/*
- * Implementation of ethtool operations.
- */
-
-static u32 get_msglevel(struct net_device *dev)
-{
-       return netdev2adap(dev)->msg_enable;
-}
-
-static void set_msglevel(struct net_device *dev, u32 val)
-{
-       netdev2adap(dev)->msg_enable = val;
-}
-
-static char stats_strings[][ETH_GSTRING_LEN] = {
-       "TxOctetsOK         ",
-       "TxFramesOK         ",
-       "TxBroadcastFrames  ",
-       "TxMulticastFrames  ",
-       "TxUnicastFrames    ",
-       "TxErrorFrames      ",
-
-       "TxFrames64         ",
-       "TxFrames65To127    ",
-       "TxFrames128To255   ",
-       "TxFrames256To511   ",
-       "TxFrames512To1023  ",
-       "TxFrames1024To1518 ",
-       "TxFrames1519ToMax  ",
-
-       "TxFramesDropped    ",
-       "TxPauseFrames      ",
-       "TxPPP0Frames       ",
-       "TxPPP1Frames       ",
-       "TxPPP2Frames       ",
-       "TxPPP3Frames       ",
-       "TxPPP4Frames       ",
-       "TxPPP5Frames       ",
-       "TxPPP6Frames       ",
-       "TxPPP7Frames       ",
-
-       "RxOctetsOK         ",
-       "RxFramesOK         ",
-       "RxBroadcastFrames  ",
-       "RxMulticastFrames  ",
-       "RxUnicastFrames    ",
-
-       "RxFramesTooLong    ",
-       "RxJabberErrors     ",
-       "RxFCSErrors        ",
-       "RxLengthErrors     ",
-       "RxSymbolErrors     ",
-       "RxRuntFrames       ",
-
-       "RxFrames64         ",
-       "RxFrames65To127    ",
-       "RxFrames128To255   ",
-       "RxFrames256To511   ",
-       "RxFrames512To1023  ",
-       "RxFrames1024To1518 ",
-       "RxFrames1519ToMax  ",
-
-       "RxPauseFrames      ",
-       "RxPPP0Frames       ",
-       "RxPPP1Frames       ",
-       "RxPPP2Frames       ",
-       "RxPPP3Frames       ",
-       "RxPPP4Frames       ",
-       "RxPPP5Frames       ",
-       "RxPPP6Frames       ",
-       "RxPPP7Frames       ",
-
-       "RxBG0FramesDropped ",
-       "RxBG1FramesDropped ",
-       "RxBG2FramesDropped ",
-       "RxBG3FramesDropped ",
-       "RxBG0FramesTrunc   ",
-       "RxBG1FramesTrunc   ",
-       "RxBG2FramesTrunc   ",
-       "RxBG3FramesTrunc   ",
-
-       "TSO                ",
-       "TxCsumOffload      ",
-       "RxCsumGood         ",
-       "VLANextractions    ",
-       "VLANinsertions     ",
-       "GROpackets         ",
-       "GROmerged          ",
-       "WriteCoalSuccess   ",
-       "WriteCoalFail      ",
-};
-
-static int get_sset_count(struct net_device *dev, int sset)
-{
-       switch (sset) {
-       case ETH_SS_STATS:
-               return ARRAY_SIZE(stats_strings);
-       default:
-               return -EOPNOTSUPP;
-       }
-}
-
-#define T4_REGMAP_SIZE (160 * 1024)
-#define T5_REGMAP_SIZE (332 * 1024)
-
-static int get_regs_len(struct net_device *dev)
-{
-       struct adapter *adap = netdev2adap(dev);
-       if (is_t4(adap->params.chip))
-               return T4_REGMAP_SIZE;
-       else
-               return T5_REGMAP_SIZE;
-}
-
-static int get_eeprom_len(struct net_device *dev)
-{
-       return EEPROMSIZE;
-}
-
-static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
-{
-       struct adapter *adapter = netdev2adap(dev);
-       u32 exprom_vers;
-
-       strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
-       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
-       strlcpy(info->bus_info, pci_name(adapter->pdev),
-               sizeof(info->bus_info));
-
-       if (adapter->params.fw_vers)
-               snprintf(info->fw_version, sizeof(info->fw_version),
-                       "%u.%u.%u.%u, TP %u.%u.%u.%u",
-                       FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
-                       FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
-                       FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
-                       FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers),
-                       FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
-                       FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
-                       FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
-                       FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
-
-       if (!t4_get_exprom_version(adapter, &exprom_vers))
-               snprintf(info->erom_version, sizeof(info->erom_version),
-                        "%u.%u.%u.%u",
-                        FW_HDR_FW_VER_MAJOR_G(exprom_vers),
-                        FW_HDR_FW_VER_MINOR_G(exprom_vers),
-                        FW_HDR_FW_VER_MICRO_G(exprom_vers),
-                        FW_HDR_FW_VER_BUILD_G(exprom_vers));
-}
-
-static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
-{
-       if (stringset == ETH_SS_STATS)
-               memcpy(data, stats_strings, sizeof(stats_strings));
-}
-
-/*
- * port stats maintained per queue of the port.  They should be in the same
- * order as in stats_strings above.
- */
-struct queue_port_stats {
-       u64 tso;
-       u64 tx_csum;
-       u64 rx_csum;
-       u64 vlan_ex;
-       u64 vlan_ins;
-       u64 gro_pkts;
-       u64 gro_merged;
-};
-
-static void collect_sge_port_stats(const struct adapter *adap,
-               const struct port_info *p, struct queue_port_stats *s)
-{
-       int i;
-       const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
-       const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
-
-       memset(s, 0, sizeof(*s));
-       for (i = 0; i < p->nqsets; i++, rx++, tx++) {
-               s->tso += tx->tso;
-               s->tx_csum += tx->tx_cso;
-               s->rx_csum += rx->stats.rx_cso;
-               s->vlan_ex += rx->stats.vlan_ex;
-               s->vlan_ins += tx->vlan_ins;
-               s->gro_pkts += rx->stats.lro_pkts;
-               s->gro_merged += rx->stats.lro_merged;
-       }
-}
-
-static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
-                     u64 *data)
-{
-       struct port_info *pi = netdev_priv(dev);
-       struct adapter *adapter = pi->adapter;
-       u32 val1, val2;
-
-       t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
-
-       data += sizeof(struct port_stats) / sizeof(u64);
-       collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
-       data += sizeof(struct queue_port_stats) / sizeof(u64);
-       if (!is_t4(adapter->params.chip)) {
-               t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7));
-               val1 = t4_read_reg(adapter, SGE_STAT_TOTAL_A);
-               val2 = t4_read_reg(adapter, SGE_STAT_MATCH_A);
-               *data = val1 - val2;
-               data++;
-               *data = val2;
-               data++;
-       } else {
-               memset(data, 0, 2 * sizeof(u64));
-               *data += 2;
-       }
-}
-
-/*
- * Return a version number to identify the type of adapter.  The scheme is:
- * - bits 0..9: chip version
- * - bits 10..15: chip revision
- * - bits 16..23: register dump version
- */
-static inline unsigned int mk_adap_vers(const struct adapter *ap)
-{
-       return CHELSIO_CHIP_VERSION(ap->params.chip) |
-               (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
-}
-
-static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
-                          unsigned int end)
-{
-       u32 *p = buf + start;
-
-       for ( ; start <= end; start += sizeof(u32))
-               *p++ = t4_read_reg(ap, start);
-}
-
-static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
-                    void *buf)
-{
-       static const unsigned int t4_reg_ranges[] = {
-               0x1008, 0x1108,
-               0x1180, 0x11b4,
-               0x11fc, 0x123c,
-               0x1300, 0x173c,
-               0x1800, 0x18fc,
-               0x3000, 0x30d8,
-               0x30e0, 0x5924,
-               0x5960, 0x59d4,
-               0x5a00, 0x5af8,
-               0x6000, 0x6098,
-               0x6100, 0x6150,
-               0x6200, 0x6208,
-               0x6240, 0x6248,
-               0x6280, 0x6338,
-               0x6370, 0x638c,
-               0x6400, 0x643c,
-               0x6500, 0x6524,
-               0x6a00, 0x6a38,
-               0x6a60, 0x6a78,
-               0x6b00, 0x6b84,
-               0x6bf0, 0x6c84,
-               0x6cf0, 0x6d84,
-               0x6df0, 0x6e84,
-               0x6ef0, 0x6f84,
-               0x6ff0, 0x7084,
-               0x70f0, 0x7184,
-               0x71f0, 0x7284,
-               0x72f0, 0x7384,
-               0x73f0, 0x7450,
-               0x7500, 0x7530,
-               0x7600, 0x761c,
-               0x7680, 0x76cc,
-               0x7700, 0x7798,
-               0x77c0, 0x77fc,
-               0x7900, 0x79fc,
-               0x7b00, 0x7c38,
-               0x7d00, 0x7efc,
-               0x8dc0, 0x8e1c,
-               0x8e30, 0x8e78,
-               0x8ea0, 0x8f6c,
-               0x8fc0, 0x9074,
-               0x90fc, 0x90fc,
-               0x9400, 0x9458,
-               0x9600, 0x96bc,
-               0x9800, 0x9808,
-               0x9820, 0x983c,
-               0x9850, 0x9864,
-               0x9c00, 0x9c6c,
-               0x9c80, 0x9cec,
-               0x9d00, 0x9d6c,
-               0x9d80, 0x9dec,
-               0x9e00, 0x9e6c,
-               0x9e80, 0x9eec,
-               0x9f00, 0x9f6c,
-               0x9f80, 0x9fec,
-               0xd004, 0xd03c,
-               0xdfc0, 0xdfe0,
-               0xe000, 0xea7c,
-               0xf000, 0x11110,
-               0x11118, 0x11190,
-               0x19040, 0x1906c,
-               0x19078, 0x19080,
-               0x1908c, 0x19124,
-               0x19150, 0x191b0,
-               0x191d0, 0x191e8,
-               0x19238, 0x1924c,
-               0x193f8, 0x19474,
-               0x19490, 0x194f8,
-               0x19800, 0x19f30,
-               0x1a000, 0x1a06c,
-               0x1a0b0, 0x1a120,
-               0x1a128, 0x1a138,
-               0x1a190, 0x1a1c4,
-               0x1a1fc, 0x1a1fc,
-               0x1e040, 0x1e04c,
-               0x1e284, 0x1e28c,
-               0x1e2c0, 0x1e2c0,
-               0x1e2e0, 0x1e2e0,
-               0x1e300, 0x1e384,
-               0x1e3c0, 0x1e3c8,
-               0x1e440, 0x1e44c,
-               0x1e684, 0x1e68c,
-               0x1e6c0, 0x1e6c0,
-               0x1e6e0, 0x1e6e0,
-               0x1e700, 0x1e784,
-               0x1e7c0, 0x1e7c8,
-               0x1e840, 0x1e84c,
-               0x1ea84, 0x1ea8c,
-               0x1eac0, 0x1eac0,
-               0x1eae0, 0x1eae0,
-               0x1eb00, 0x1eb84,
-               0x1ebc0, 0x1ebc8,
-               0x1ec40, 0x1ec4c,
-               0x1ee84, 0x1ee8c,
-               0x1eec0, 0x1eec0,
-               0x1eee0, 0x1eee0,
-               0x1ef00, 0x1ef84,
-               0x1efc0, 0x1efc8,
-               0x1f040, 0x1f04c,
-               0x1f284, 0x1f28c,
-               0x1f2c0, 0x1f2c0,
-               0x1f2e0, 0x1f2e0,
-               0x1f300, 0x1f384,
-               0x1f3c0, 0x1f3c8,
-               0x1f440, 0x1f44c,
-               0x1f684, 0x1f68c,
-               0x1f6c0, 0x1f6c0,
-               0x1f6e0, 0x1f6e0,
-               0x1f700, 0x1f784,
-               0x1f7c0, 0x1f7c8,
-               0x1f840, 0x1f84c,
-               0x1fa84, 0x1fa8c,
-               0x1fac0, 0x1fac0,
-               0x1fae0, 0x1fae0,
-               0x1fb00, 0x1fb84,
-               0x1fbc0, 0x1fbc8,
-               0x1fc40, 0x1fc4c,
-               0x1fe84, 0x1fe8c,
-               0x1fec0, 0x1fec0,
-               0x1fee0, 0x1fee0,
-               0x1ff00, 0x1ff84,
-               0x1ffc0, 0x1ffc8,
-               0x20000, 0x2002c,
-               0x20100, 0x2013c,
-               0x20190, 0x201c8,
-               0x20200, 0x20318,
-               0x20400, 0x20528,
-               0x20540, 0x20614,
-               0x21000, 0x21040,
-               0x2104c, 0x21060,
-               0x210c0, 0x210ec,
-               0x21200, 0x21268,
-               0x21270, 0x21284,
-               0x212fc, 0x21388,
-               0x21400, 0x21404,
-               0x21500, 0x21518,
-               0x2152c, 0x2153c,
-               0x21550, 0x21554,
-               0x21600, 0x21600,
-               0x21608, 0x21628,
-               0x21630, 0x2163c,
-               0x21700, 0x2171c,
-               0x21780, 0x2178c,
-               0x21800, 0x21c38,
-               0x21c80, 0x21d7c,
-               0x21e00, 0x21e04,
-               0x22000, 0x2202c,
-               0x22100, 0x2213c,
-               0x22190, 0x221c8,
-               0x22200, 0x22318,
-               0x22400, 0x22528,
-               0x22540, 0x22614,
-               0x23000, 0x23040,
-               0x2304c, 0x23060,
-               0x230c0, 0x230ec,
-               0x23200, 0x23268,
-               0x23270, 0x23284,
-               0x232fc, 0x23388,
-               0x23400, 0x23404,
-               0x23500, 0x23518,
-               0x2352c, 0x2353c,
-               0x23550, 0x23554,
-               0x23600, 0x23600,
-               0x23608, 0x23628,
-               0x23630, 0x2363c,
-               0x23700, 0x2371c,
-               0x23780, 0x2378c,
-               0x23800, 0x23c38,
-               0x23c80, 0x23d7c,
-               0x23e00, 0x23e04,
-               0x24000, 0x2402c,
-               0x24100, 0x2413c,
-               0x24190, 0x241c8,
-               0x24200, 0x24318,
-               0x24400, 0x24528,
-               0x24540, 0x24614,
-               0x25000, 0x25040,
-               0x2504c, 0x25060,
-               0x250c0, 0x250ec,
-               0x25200, 0x25268,
-               0x25270, 0x25284,
-               0x252fc, 0x25388,
-               0x25400, 0x25404,
-               0x25500, 0x25518,
-               0x2552c, 0x2553c,
-               0x25550, 0x25554,
-               0x25600, 0x25600,
-               0x25608, 0x25628,
-               0x25630, 0x2563c,
-               0x25700, 0x2571c,
-               0x25780, 0x2578c,
-               0x25800, 0x25c38,
-               0x25c80, 0x25d7c,
-               0x25e00, 0x25e04,
-               0x26000, 0x2602c,
-               0x26100, 0x2613c,
-               0x26190, 0x261c8,
-               0x26200, 0x26318,
-               0x26400, 0x26528,
-               0x26540, 0x26614,
-               0x27000, 0x27040,
-               0x2704c, 0x27060,
-               0x270c0, 0x270ec,
-               0x27200, 0x27268,
-               0x27270, 0x27284,
-               0x272fc, 0x27388,
-               0x27400, 0x27404,
-               0x27500, 0x27518,
-               0x2752c, 0x2753c,
-               0x27550, 0x27554,
-               0x27600, 0x27600,
-               0x27608, 0x27628,
-               0x27630, 0x2763c,
-               0x27700, 0x2771c,
-               0x27780, 0x2778c,
-               0x27800, 0x27c38,
-               0x27c80, 0x27d7c,
-               0x27e00, 0x27e04
-       };
-
-       static const unsigned int t5_reg_ranges[] = {
-               0x1008, 0x1148,
-               0x1180, 0x11b4,
-               0x11fc, 0x123c,
-               0x1280, 0x173c,
-               0x1800, 0x18fc,
-               0x3000, 0x3028,
-               0x3060, 0x30d8,
-               0x30e0, 0x30fc,
-               0x3140, 0x357c,
-               0x35a8, 0x35cc,
-               0x35ec, 0x35ec,
-               0x3600, 0x5624,
-               0x56cc, 0x575c,
-               0x580c, 0x5814,
-               0x5890, 0x58bc,
-               0x5940, 0x59dc,
-               0x59fc, 0x5a18,
-               0x5a60, 0x5a9c,
-               0x5b9c, 0x5bfc,
-               0x6000, 0x6040,
-               0x6058, 0x614c,
-               0x7700, 0x7798,
-               0x77c0, 0x78fc,
-               0x7b00, 0x7c54,
-               0x7d00, 0x7efc,
-               0x8dc0, 0x8de0,
-               0x8df8, 0x8e84,
-               0x8ea0, 0x8f84,
-               0x8fc0, 0x90f8,
-               0x9400, 0x9470,
-               0x9600, 0x96f4,
-               0x9800, 0x9808,
-               0x9820, 0x983c,
-               0x9850, 0x9864,
-               0x9c00, 0x9c6c,
-               0x9c80, 0x9cec,
-               0x9d00, 0x9d6c,
-               0x9d80, 0x9dec,
-               0x9e00, 0x9e6c,
-               0x9e80, 0x9eec,
-               0x9f00, 0x9f6c,
-               0x9f80, 0xa020,
-               0xd004, 0xd03c,
-               0xdfc0, 0xdfe0,
-               0xe000, 0x11088,
-               0x1109c, 0x11110,
-               0x11118, 0x1117c,
-               0x11190, 0x11204,
-               0x19040, 0x1906c,
-               0x19078, 0x19080,
-               0x1908c, 0x19124,
-               0x19150, 0x191b0,
-               0x191d0, 0x191e8,
-               0x19238, 0x19290,
-               0x193f8, 0x19474,
-               0x19490, 0x194cc,
-               0x194f0, 0x194f8,
-               0x19c00, 0x19c60,
-               0x19c94, 0x19e10,
-               0x19e50, 0x19f34,
-               0x19f40, 0x19f50,
-               0x19f90, 0x19fe4,
-               0x1a000, 0x1a06c,
-               0x1a0b0, 0x1a120,
-               0x1a128, 0x1a138,
-               0x1a190, 0x1a1c4,
-               0x1a1fc, 0x1a1fc,
-               0x1e008, 0x1e00c,
-               0x1e040, 0x1e04c,
-               0x1e284, 0x1e290,
-               0x1e2c0, 0x1e2c0,
-               0x1e2e0, 0x1e2e0,
-               0x1e300, 0x1e384,
-               0x1e3c0, 0x1e3c8,
-               0x1e408, 0x1e40c,
-               0x1e440, 0x1e44c,
-               0x1e684, 0x1e690,
-               0x1e6c0, 0x1e6c0,
-               0x1e6e0, 0x1e6e0,
-               0x1e700, 0x1e784,
-               0x1e7c0, 0x1e7c8,
-               0x1e808, 0x1e80c,
-               0x1e840, 0x1e84c,
-               0x1ea84, 0x1ea90,
-               0x1eac0, 0x1eac0,
-               0x1eae0, 0x1eae0,
-               0x1eb00, 0x1eb84,
-               0x1ebc0, 0x1ebc8,
-               0x1ec08, 0x1ec0c,
-               0x1ec40, 0x1ec4c,
-               0x1ee84, 0x1ee90,
-               0x1eec0, 0x1eec0,
-               0x1eee0, 0x1eee0,
-               0x1ef00, 0x1ef84,
-               0x1efc0, 0x1efc8,
-               0x1f008, 0x1f00c,
-               0x1f040, 0x1f04c,
-               0x1f284, 0x1f290,
-               0x1f2c0, 0x1f2c0,
-               0x1f2e0, 0x1f2e0,
-               0x1f300, 0x1f384,
-               0x1f3c0, 0x1f3c8,
-               0x1f408, 0x1f40c,
-               0x1f440, 0x1f44c,
-               0x1f684, 0x1f690,
-               0x1f6c0, 0x1f6c0,
-               0x1f6e0, 0x1f6e0,
-               0x1f700, 0x1f784,
-               0x1f7c0, 0x1f7c8,
-               0x1f808, 0x1f80c,
-               0x1f840, 0x1f84c,
-               0x1fa84, 0x1fa90,
-               0x1fac0, 0x1fac0,
-               0x1fae0, 0x1fae0,
-               0x1fb00, 0x1fb84,
-               0x1fbc0, 0x1fbc8,
-               0x1fc08, 0x1fc0c,
-               0x1fc40, 0x1fc4c,
-               0x1fe84, 0x1fe90,
-               0x1fec0, 0x1fec0,
-               0x1fee0, 0x1fee0,
-               0x1ff00, 0x1ff84,
-               0x1ffc0, 0x1ffc8,
-               0x30000, 0x30030,
-               0x30100, 0x30144,
-               0x30190, 0x301d0,
-               0x30200, 0x30318,
-               0x30400, 0x3052c,
-               0x30540, 0x3061c,
-               0x30800, 0x30834,
-               0x308c0, 0x30908,
-               0x30910, 0x309ac,
-               0x30a00, 0x30a04,
-               0x30a0c, 0x30a2c,
-               0x30a44, 0x30a50,
-               0x30a74, 0x30c24,
-               0x30d08, 0x30d14,
-               0x30d1c, 0x30d20,
-               0x30d3c, 0x30d50,
-               0x31200, 0x3120c,
-               0x31220, 0x31220,
-               0x31240, 0x31240,
-               0x31600, 0x31600,
-               0x31608, 0x3160c,
-               0x31a00, 0x31a1c,
-               0x31e04, 0x31e20,
-               0x31e38, 0x31e3c,
-               0x31e80, 0x31e80,
-               0x31e88, 0x31ea8,
-               0x31eb0, 0x31eb4,
-               0x31ec8, 0x31ed4,
-               0x31fb8, 0x32004,
-               0x32208, 0x3223c,
-               0x32600, 0x32630,
-               0x32a00, 0x32abc,
-               0x32b00, 0x32b70,
-               0x33000, 0x33048,
-               0x33060, 0x3309c,
-               0x330f0, 0x33148,
-               0x33160, 0x3319c,
-               0x331f0, 0x332e4,
-               0x332f8, 0x333e4,
-               0x333f8, 0x33448,
-               0x33460, 0x3349c,
-               0x334f0, 0x33548,
-               0x33560, 0x3359c,
-               0x335f0, 0x336e4,
-               0x336f8, 0x337e4,
-               0x337f8, 0x337fc,
-               0x33814, 0x33814,
-               0x3382c, 0x3382c,
-               0x33880, 0x3388c,
-               0x338e8, 0x338ec,
-               0x33900, 0x33948,
-               0x33960, 0x3399c,
-               0x339f0, 0x33ae4,
-               0x33af8, 0x33b10,
-               0x33b28, 0x33b28,
-               0x33b3c, 0x33b50,
-               0x33bf0, 0x33c10,
-               0x33c28, 0x33c28,
-               0x33c3c, 0x33c50,
-               0x33cf0, 0x33cfc,
-               0x34000, 0x34030,
-               0x34100, 0x34144,
-               0x34190, 0x341d0,
-               0x34200, 0x34318,
-               0x34400, 0x3452c,
-               0x34540, 0x3461c,
-               0x34800, 0x34834,
-               0x348c0, 0x34908,
-               0x34910, 0x349ac,
-               0x34a00, 0x34a04,
-               0x34a0c, 0x34a2c,
-               0x34a44, 0x34a50,
-               0x34a74, 0x34c24,
-               0x34d08, 0x34d14,
-               0x34d1c, 0x34d20,
-               0x34d3c, 0x34d50,
-               0x35200, 0x3520c,
-               0x35220, 0x35220,
-               0x35240, 0x35240,
-               0x35600, 0x35600,
-               0x35608, 0x3560c,
-               0x35a00, 0x35a1c,
-               0x35e04, 0x35e20,
-               0x35e38, 0x35e3c,
-               0x35e80, 0x35e80,
-               0x35e88, 0x35ea8,
-               0x35eb0, 0x35eb4,
-               0x35ec8, 0x35ed4,
-               0x35fb8, 0x36004,
-               0x36208, 0x3623c,
-               0x36600, 0x36630,
-               0x36a00, 0x36abc,
-               0x36b00, 0x36b70,
-               0x37000, 0x37048,
-               0x37060, 0x3709c,
-               0x370f0, 0x37148,
-               0x37160, 0x3719c,
-               0x371f0, 0x372e4,
-               0x372f8, 0x373e4,
-               0x373f8, 0x37448,
-               0x37460, 0x3749c,
-               0x374f0, 0x37548,
-               0x37560, 0x3759c,
-               0x375f0, 0x376e4,
-               0x376f8, 0x377e4,
-               0x377f8, 0x377fc,
-               0x37814, 0x37814,
-               0x3782c, 0x3782c,
-               0x37880, 0x3788c,
-               0x378e8, 0x378ec,
-               0x37900, 0x37948,
-               0x37960, 0x3799c,
-               0x379f0, 0x37ae4,
-               0x37af8, 0x37b10,
-               0x37b28, 0x37b28,
-               0x37b3c, 0x37b50,
-               0x37bf0, 0x37c10,
-               0x37c28, 0x37c28,
-               0x37c3c, 0x37c50,
-               0x37cf0, 0x37cfc,
-               0x38000, 0x38030,
-               0x38100, 0x38144,
-               0x38190, 0x381d0,
-               0x38200, 0x38318,
-               0x38400, 0x3852c,
-               0x38540, 0x3861c,
-               0x38800, 0x38834,
-               0x388c0, 0x38908,
-               0x38910, 0x389ac,
-               0x38a00, 0x38a04,
-               0x38a0c, 0x38a2c,
-               0x38a44, 0x38a50,
-               0x38a74, 0x38c24,
-               0x38d08, 0x38d14,
-               0x38d1c, 0x38d20,
-               0x38d3c, 0x38d50,
-               0x39200, 0x3920c,
-               0x39220, 0x39220,
-               0x39240, 0x39240,
-               0x39600, 0x39600,
-               0x39608, 0x3960c,
-               0x39a00, 0x39a1c,
-               0x39e04, 0x39e20,
-               0x39e38, 0x39e3c,
-               0x39e80, 0x39e80,
-               0x39e88, 0x39ea8,
-               0x39eb0, 0x39eb4,
-               0x39ec8, 0x39ed4,
-               0x39fb8, 0x3a004,
-               0x3a208, 0x3a23c,
-               0x3a600, 0x3a630,
-               0x3aa00, 0x3aabc,
-               0x3ab00, 0x3ab70,
-               0x3b000, 0x3b048,
-               0x3b060, 0x3b09c,
-               0x3b0f0, 0x3b148,
-               0x3b160, 0x3b19c,
-               0x3b1f0, 0x3b2e4,
-               0x3b2f8, 0x3b3e4,
-               0x3b3f8, 0x3b448,
-               0x3b460, 0x3b49c,
-               0x3b4f0, 0x3b548,
-               0x3b560, 0x3b59c,
-               0x3b5f0, 0x3b6e4,
-               0x3b6f8, 0x3b7e4,
-               0x3b7f8, 0x3b7fc,
-               0x3b814, 0x3b814,
-               0x3b82c, 0x3b82c,
-               0x3b880, 0x3b88c,
-               0x3b8e8, 0x3b8ec,
-               0x3b900, 0x3b948,
-               0x3b960, 0x3b99c,
-               0x3b9f0, 0x3bae4,
-               0x3baf8, 0x3bb10,
-               0x3bb28, 0x3bb28,
-               0x3bb3c, 0x3bb50,
-               0x3bbf0, 0x3bc10,
-               0x3bc28, 0x3bc28,
-               0x3bc3c, 0x3bc50,
-               0x3bcf0, 0x3bcfc,
-               0x3c000, 0x3c030,
-               0x3c100, 0x3c144,
-               0x3c190, 0x3c1d0,
-               0x3c200, 0x3c318,
-               0x3c400, 0x3c52c,
-               0x3c540, 0x3c61c,
-               0x3c800, 0x3c834,
-               0x3c8c0, 0x3c908,
-               0x3c910, 0x3c9ac,
-               0x3ca00, 0x3ca04,
-               0x3ca0c, 0x3ca2c,
-               0x3ca44, 0x3ca50,
-               0x3ca74, 0x3cc24,
-               0x3cd08, 0x3cd14,
-               0x3cd1c, 0x3cd20,
-               0x3cd3c, 0x3cd50,
-               0x3d200, 0x3d20c,
-               0x3d220, 0x3d220,
-               0x3d240, 0x3d240,
-               0x3d600, 0x3d600,
-               0x3d608, 0x3d60c,
-               0x3da00, 0x3da1c,
-               0x3de04, 0x3de20,
-               0x3de38, 0x3de3c,
-               0x3de80, 0x3de80,
-               0x3de88, 0x3dea8,
-               0x3deb0, 0x3deb4,
-               0x3dec8, 0x3ded4,
-               0x3dfb8, 0x3e004,
-               0x3e208, 0x3e23c,
-               0x3e600, 0x3e630,
-               0x3ea00, 0x3eabc,
-               0x3eb00, 0x3eb70,
-               0x3f000, 0x3f048,
-               0x3f060, 0x3f09c,
-               0x3f0f0, 0x3f148,
-               0x3f160, 0x3f19c,
-               0x3f1f0, 0x3f2e4,
-               0x3f2f8, 0x3f3e4,
-               0x3f3f8, 0x3f448,
-               0x3f460, 0x3f49c,
-               0x3f4f0, 0x3f548,
-               0x3f560, 0x3f59c,
-               0x3f5f0, 0x3f6e4,
-               0x3f6f8, 0x3f7e4,
-               0x3f7f8, 0x3f7fc,
-               0x3f814, 0x3f814,
-               0x3f82c, 0x3f82c,
-               0x3f880, 0x3f88c,
-               0x3f8e8, 0x3f8ec,
-               0x3f900, 0x3f948,
-               0x3f960, 0x3f99c,
-               0x3f9f0, 0x3fae4,
-               0x3faf8, 0x3fb10,
-               0x3fb28, 0x3fb28,
-               0x3fb3c, 0x3fb50,
-               0x3fbf0, 0x3fc10,
-               0x3fc28, 0x3fc28,
-               0x3fc3c, 0x3fc50,
-               0x3fcf0, 0x3fcfc,
-               0x40000, 0x4000c,
-               0x40040, 0x40068,
-               0x40080, 0x40144,
-               0x40180, 0x4018c,
-               0x40200, 0x40298,
-               0x402ac, 0x4033c,
-               0x403f8, 0x403fc,
-               0x41304, 0x413c4,
-               0x41400, 0x4141c,
-               0x41480, 0x414d0,
-               0x44000, 0x44078,
-               0x440c0, 0x44278,
-               0x442c0, 0x44478,
-               0x444c0, 0x44678,
-               0x446c0, 0x44878,
-               0x448c0, 0x449fc,
-               0x45000, 0x45068,
-               0x45080, 0x45084,
-               0x450a0, 0x450b0,
-               0x45200, 0x45268,
-               0x45280, 0x45284,
-               0x452a0, 0x452b0,
-               0x460c0, 0x460e4,
-               0x47000, 0x4708c,
-               0x47200, 0x47250,
-               0x47400, 0x47420,
-               0x47600, 0x47618,
-               0x47800, 0x47814,
-               0x48000, 0x4800c,
-               0x48040, 0x48068,
-               0x48080, 0x48144,
-               0x48180, 0x4818c,
-               0x48200, 0x48298,
-               0x482ac, 0x4833c,
-               0x483f8, 0x483fc,
-               0x49304, 0x493c4,
-               0x49400, 0x4941c,
-               0x49480, 0x494d0,
-               0x4c000, 0x4c078,
-               0x4c0c0, 0x4c278,
-               0x4c2c0, 0x4c478,
-               0x4c4c0, 0x4c678,
-               0x4c6c0, 0x4c878,
-               0x4c8c0, 0x4c9fc,
-               0x4d000, 0x4d068,
-               0x4d080, 0x4d084,
-               0x4d0a0, 0x4d0b0,
-               0x4d200, 0x4d268,
-               0x4d280, 0x4d284,
-               0x4d2a0, 0x4d2b0,
-               0x4e0c0, 0x4e0e4,
-               0x4f000, 0x4f08c,
-               0x4f200, 0x4f250,
-               0x4f400, 0x4f420,
-               0x4f600, 0x4f618,
-               0x4f800, 0x4f814,
-               0x50000, 0x500cc,
-               0x50400, 0x50400,
-               0x50800, 0x508cc,
-               0x50c00, 0x50c00,
-               0x51000, 0x5101c,
-               0x51300, 0x51308,
-       };
-
-       int i;
-       struct adapter *ap = netdev2adap(dev);
-       static const unsigned int *reg_ranges;
-       int arr_size = 0, buf_size = 0;
-
-       if (is_t4(ap->params.chip)) {
-               reg_ranges = &t4_reg_ranges[0];
-               arr_size = ARRAY_SIZE(t4_reg_ranges);
-               buf_size = T4_REGMAP_SIZE;
-       } else {
-               reg_ranges = &t5_reg_ranges[0];
-               arr_size = ARRAY_SIZE(t5_reg_ranges);
-               buf_size = T5_REGMAP_SIZE;
-       }
-
-       regs->version = mk_adap_vers(ap);
-
-       memset(buf, 0, buf_size);
-       for (i = 0; i < arr_size; i += 2)
-               reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
-}
-
-static int restart_autoneg(struct net_device *dev)
-{
-       struct port_info *p = netdev_priv(dev);
-
-       if (!netif_running(dev))
-               return -EAGAIN;
-       if (p->link_cfg.autoneg != AUTONEG_ENABLE)
-               return -EINVAL;
-       t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
-       return 0;
-}
-
-static int identify_port(struct net_device *dev,
-                        enum ethtool_phys_id_state state)
-{
-       unsigned int val;
-       struct adapter *adap = netdev2adap(dev);
-
-       if (state == ETHTOOL_ID_ACTIVE)
-               val = 0xffff;
-       else if (state == ETHTOOL_ID_INACTIVE)
-               val = 0;
-       else
-               return -EINVAL;
-
-       return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
-}
-
-static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps)
-{
-       unsigned int v = 0;
-
-       if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
-           type == FW_PORT_TYPE_BT_XAUI) {
-               v |= SUPPORTED_TP;
-               if (caps & FW_PORT_CAP_SPEED_100M)
-                       v |= SUPPORTED_100baseT_Full;
-               if (caps & FW_PORT_CAP_SPEED_1G)
-                       v |= SUPPORTED_1000baseT_Full;
-               if (caps & FW_PORT_CAP_SPEED_10G)
-                       v |= SUPPORTED_10000baseT_Full;
-       } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
-               v |= SUPPORTED_Backplane;
-               if (caps & FW_PORT_CAP_SPEED_1G)
-                       v |= SUPPORTED_1000baseKX_Full;
-               if (caps & FW_PORT_CAP_SPEED_10G)
-                       v |= SUPPORTED_10000baseKX4_Full;
-       } else if (type == FW_PORT_TYPE_KR)
-               v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
-       else if (type == FW_PORT_TYPE_BP_AP)
-               v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
-                    SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
-       else if (type == FW_PORT_TYPE_BP4_AP)
-               v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
-                    SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
-                    SUPPORTED_10000baseKX4_Full;
-       else if (type == FW_PORT_TYPE_FIBER_XFI ||
-                type == FW_PORT_TYPE_FIBER_XAUI ||
-                type == FW_PORT_TYPE_SFP ||
-                type == FW_PORT_TYPE_QSFP_10G ||
-                type == FW_PORT_TYPE_QSA) {
-               v |= SUPPORTED_FIBRE;
-               if (caps & FW_PORT_CAP_SPEED_1G)
-                       v |= SUPPORTED_1000baseT_Full;
-               if (caps & FW_PORT_CAP_SPEED_10G)
-                       v |= SUPPORTED_10000baseT_Full;
-       } else if (type == FW_PORT_TYPE_BP40_BA ||
-                  type == FW_PORT_TYPE_QSFP) {
-               v |= SUPPORTED_40000baseSR4_Full;
-               v |= SUPPORTED_FIBRE;
-       }
-
-       if (caps & FW_PORT_CAP_ANEG)
-               v |= SUPPORTED_Autoneg;
-       return v;
-}
-
-static unsigned int to_fw_linkcaps(unsigned int caps)
-{
-       unsigned int v = 0;
-
-       if (caps & ADVERTISED_100baseT_Full)
-               v |= FW_PORT_CAP_SPEED_100M;
-       if (caps & ADVERTISED_1000baseT_Full)
-               v |= FW_PORT_CAP_SPEED_1G;
-       if (caps & ADVERTISED_10000baseT_Full)
-               v |= FW_PORT_CAP_SPEED_10G;
-       if (caps & ADVERTISED_40000baseSR4_Full)
-               v |= FW_PORT_CAP_SPEED_40G;
-       return v;
-}
-
-static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-       const struct port_info *p = netdev_priv(dev);
-
-       if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
-           p->port_type == FW_PORT_TYPE_BT_XFI ||
-           p->port_type == FW_PORT_TYPE_BT_XAUI)
-               cmd->port = PORT_TP;
-       else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
-                p->port_type == FW_PORT_TYPE_FIBER_XAUI)
-               cmd->port = PORT_FIBRE;
-       else if (p->port_type == FW_PORT_TYPE_SFP ||
-                p->port_type == FW_PORT_TYPE_QSFP_10G ||
-                p->port_type == FW_PORT_TYPE_QSA ||
-                p->port_type == FW_PORT_TYPE_QSFP) {
-               if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
-                   p->mod_type == FW_PORT_MOD_TYPE_SR ||
-                   p->mod_type == FW_PORT_MOD_TYPE_ER ||
-                   p->mod_type == FW_PORT_MOD_TYPE_LRM)
-                       cmd->port = PORT_FIBRE;
-               else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
-                        p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
-                       cmd->port = PORT_DA;
-               else
-                       cmd->port = PORT_OTHER;
-       } else
-               cmd->port = PORT_OTHER;
-
-       if (p->mdio_addr >= 0) {
-               cmd->phy_address = p->mdio_addr;
-               cmd->transceiver = XCVR_EXTERNAL;
-               cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
-                       MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
-       } else {
-               cmd->phy_address = 0;  /* not really, but no better option */
-               cmd->transceiver = XCVR_INTERNAL;
-               cmd->mdio_support = 0;
-       }
-
-       cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
-       cmd->advertising = from_fw_linkcaps(p->port_type,
-                                           p->link_cfg.advertising);
-       ethtool_cmd_speed_set(cmd,
-                             netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
-       cmd->duplex = DUPLEX_FULL;
-       cmd->autoneg = p->link_cfg.autoneg;
-       cmd->maxtxpkt = 0;
-       cmd->maxrxpkt = 0;
-       return 0;
-}
-
-static unsigned int speed_to_caps(int speed)
-{
-       if (speed == 100)
-               return FW_PORT_CAP_SPEED_100M;
-       if (speed == 1000)
-               return FW_PORT_CAP_SPEED_1G;
-       if (speed == 10000)
-               return FW_PORT_CAP_SPEED_10G;
-       if (speed == 40000)
-               return FW_PORT_CAP_SPEED_40G;
-       return 0;
-}
-
-static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-       unsigned int cap;
-       struct port_info *p = netdev_priv(dev);
-       struct link_config *lc = &p->link_cfg;
-       u32 speed = ethtool_cmd_speed(cmd);
-
-       if (cmd->duplex != DUPLEX_FULL)     /* only full-duplex supported */
-               return -EINVAL;
-
-       if (!(lc->supported & FW_PORT_CAP_ANEG)) {
-               /*
-                * PHY offers a single speed.  See if that's what's
-                * being requested.
-                */
-               if (cmd->autoneg == AUTONEG_DISABLE &&
-                   (lc->supported & speed_to_caps(speed)))
-                       return 0;
-               return -EINVAL;
-       }
-
-       if (cmd->autoneg == AUTONEG_DISABLE) {
-               cap = speed_to_caps(speed);
-
-               if (!(lc->supported & cap) ||
-                   (speed == 1000) ||
-                   (speed == 10000) ||
-                   (speed == 40000))
-                       return -EINVAL;
-               lc->requested_speed = cap;
-               lc->advertising = 0;
-       } else {
-               cap = to_fw_linkcaps(cmd->advertising);
-               if (!(lc->supported & cap))
-                       return -EINVAL;
-               lc->requested_speed = 0;
-               lc->advertising = cap | FW_PORT_CAP_ANEG;
-       }
-       lc->autoneg = cmd->autoneg;
-
-       if (netif_running(dev))
-               return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
-                                    lc);
-       return 0;
-}
-
-static void get_pauseparam(struct net_device *dev,
-                          struct ethtool_pauseparam *epause)
-{
-       struct port_info *p = netdev_priv(dev);
-
-       epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
-       epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
-       epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
-}
-
-static int set_pauseparam(struct net_device *dev,
-                         struct ethtool_pauseparam *epause)
-{
-       struct port_info *p = netdev_priv(dev);
-       struct link_config *lc = &p->link_cfg;
-
-       if (epause->autoneg == AUTONEG_DISABLE)
-               lc->requested_fc = 0;
-       else if (lc->supported & FW_PORT_CAP_ANEG)
-               lc->requested_fc = PAUSE_AUTONEG;
-       else
-               return -EINVAL;
-
-       if (epause->rx_pause)
-               lc->requested_fc |= PAUSE_RX;
-       if (epause->tx_pause)
-               lc->requested_fc |= PAUSE_TX;
-       if (netif_running(dev))
-               return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
-                                    lc);
-       return 0;
-}
-
-static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
-{
-       const struct port_info *pi = netdev_priv(dev);
-       const struct sge *s = &pi->adapter->sge;
-
-       e->rx_max_pending = MAX_RX_BUFFERS;
-       e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
-       e->rx_jumbo_max_pending = 0;
-       e->tx_max_pending = MAX_TXQ_ENTRIES;
-
-       e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
-       e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
-       e->rx_jumbo_pending = 0;
-       e->tx_pending = s->ethtxq[pi->first_qset].q.size;
-}
-
-static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
-{
-       int i;
-       const struct port_info *pi = netdev_priv(dev);
-       struct adapter *adapter = pi->adapter;
-       struct sge *s = &adapter->sge;
-
-       if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
-           e->tx_pending > MAX_TXQ_ENTRIES ||
-           e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
-           e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
-           e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
-               return -EINVAL;
-
-       if (adapter->flags & FULL_INIT_DONE)
-               return -EBUSY;
-
-       for (i = 0; i < pi->nqsets; ++i) {
-               s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
-               s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
-               s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
-       }
-       return 0;
-}
-
 static int closest_timer(const struct sge *s, int time)
 {
        int i, delta, match = 0, min_delta = INT_MAX;
@@ -2517,19 +1351,8 @@ static int closest_thres(const struct sge *s, int thres)
        return match;
 }
 
-/*
- * Return a queue's interrupt hold-off time in us.  0 means no timer.
- */
-unsigned int qtimer_val(const struct adapter *adap,
-                       const struct sge_rspq *q)
-{
-       unsigned int idx = q->intr_params >> 1;
-
-       return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
-}
-
 /**
- *     set_rspq_intr_params - set a queue's interrupt holdoff parameters
+ *     cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
  *     @q: the Rx queue
  *     @us: the hold-off time in us, or 0 to disable timer
  *     @cnt: the hold-off packet count, or 0 to disable counter
@@ -2537,8 +1360,8 @@ unsigned int qtimer_val(const struct adapter *adap,
  *     Sets an Rx queue's interrupt hold-off time and packet count.  At least
  *     one of the two needs to be enabled for the queue to generate interrupts.
  */
-static int set_rspq_intr_params(struct sge_rspq *q,
-                               unsigned int us, unsigned int cnt)
+int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
+                              unsigned int us, unsigned int cnt)
 {
        struct adapter *adap = q->adap;
 
@@ -2569,259 +1392,6 @@ static int set_rspq_intr_params(struct sge_rspq *q,
        return 0;
 }
 
-/**
- * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
- * @dev: the network device
- * @us: the hold-off time in us, or 0 to disable timer
- * @cnt: the hold-off packet count, or 0 to disable counter
- *
- * Set the RX interrupt hold-off parameters for a network device.
- */
-static int set_rx_intr_params(struct net_device *dev,
-                             unsigned int us, unsigned int cnt)
-{
-       int i, err;
-       struct port_info *pi = netdev_priv(dev);
-       struct adapter *adap = pi->adapter;
-       struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
-
-       for (i = 0; i < pi->nqsets; i++, q++) {
-               err = set_rspq_intr_params(&q->rspq, us, cnt);
-               if (err)
-                       return err;
-       }
-       return 0;
-}
-
-static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx)
-{
-       int i;
-       struct port_info *pi = netdev_priv(dev);
-       struct adapter *adap = pi->adapter;
-       struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
-
-       for (i = 0; i < pi->nqsets; i++, q++)
-               q->rspq.adaptive_rx = adaptive_rx;
-
-       return 0;
-}
-
-static int get_adaptive_rx_setting(struct net_device *dev)
-{
-       struct port_info *pi = netdev_priv(dev);
-       struct adapter *adap = pi->adapter;
-       struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
-
-       return q->rspq.adaptive_rx;
-}
-
-static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
-{
-       set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce);
-       return set_rx_intr_params(dev, c->rx_coalesce_usecs,
-                                 c->rx_max_coalesced_frames);
-}
-
-static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
-{
-       const struct port_info *pi = netdev_priv(dev);
-       const struct adapter *adap = pi->adapter;
-       const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
-
-       c->rx_coalesce_usecs = qtimer_val(adap, rq);
-       c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
-               adap->sge.counter_val[rq->pktcnt_idx] : 0;
-       c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
-       return 0;
-}
-
-/**
- *     eeprom_ptov - translate a physical EEPROM address to virtual
- *     @phys_addr: the physical EEPROM address
- *     @fn: the PCI function number
- *     @sz: size of function-specific area
- *
- *     Translate a physical EEPROM address to virtual.  The first 1K is
- *     accessed through virtual addresses starting at 31K, the rest is
- *     accessed through virtual addresses starting at 0.
- *
- *     The mapping is as follows:
- *     [0..1K) -> [31K..32K)
- *     [1K..1K+A) -> [31K-A..31K)
- *     [1K+A..ES) -> [0..ES-A-1K)
- *
- *     where A = @fn * @sz, and ES = EEPROM size.
- */
-static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
-{
-       fn *= sz;
-       if (phys_addr < 1024)
-               return phys_addr + (31 << 10);
-       if (phys_addr < 1024 + fn)
-               return 31744 - fn + phys_addr - 1024;
-       if (phys_addr < EEPROMSIZE)
-               return phys_addr - 1024 - fn;
-       return -EINVAL;
-}
-
-/*
- * The next two routines implement eeprom read/write from physical addresses.
- */
-static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
-{
-       int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
-
-       if (vaddr >= 0)
-               vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
-       return vaddr < 0 ? vaddr : 0;
-}
-
-static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
-{
-       int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
-
-       if (vaddr >= 0)
-               vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
-       return vaddr < 0 ? vaddr : 0;
-}
-
-#define EEPROM_MAGIC 0x38E2F10C
-
-static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
-                     u8 *data)
-{
-       int i, err = 0;
-       struct adapter *adapter = netdev2adap(dev);
-
-       u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-
-       e->magic = EEPROM_MAGIC;
-       for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
-               err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
-
-       if (!err)
-               memcpy(data, buf + e->offset, e->len);
-       kfree(buf);
-       return err;
-}
-
-static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
-                     u8 *data)
-{
-       u8 *buf;
-       int err = 0;
-       u32 aligned_offset, aligned_len, *p;
-       struct adapter *adapter = netdev2adap(dev);
-
-       if (eeprom->magic != EEPROM_MAGIC)
-               return -EINVAL;
-
-       aligned_offset = eeprom->offset & ~3;
-       aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
-
-       if (adapter->fn > 0) {
-               u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
-
-               if (aligned_offset < start ||
-                   aligned_offset + aligned_len > start + EEPROMPFSIZE)
-                       return -EPERM;
-       }
-
-       if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
-               /*
-                * RMW possibly needed for first or last words.
-                */
-               buf = kmalloc(aligned_len, GFP_KERNEL);
-               if (!buf)
-                       return -ENOMEM;
-               err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
-               if (!err && aligned_len > 4)
-                       err = eeprom_rd_phys(adapter,
-                                            aligned_offset + aligned_len - 4,
-                                            (u32 *)&buf[aligned_len - 4]);
-               if (err)
-                       goto out;
-               memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
-       } else
-               buf = data;
-
-       err = t4_seeprom_wp(adapter, false);
-       if (err)
-               goto out;
-
-       for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
-               err = eeprom_wr_phys(adapter, aligned_offset, *p);
-               aligned_offset += 4;
-       }
-
-       if (!err)
-               err = t4_seeprom_wp(adapter, true);
-out:
-       if (buf != data)
-               kfree(buf);
-       return err;
-}
-
-static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
-{
-       int ret;
-       const struct firmware *fw;
-       struct adapter *adap = netdev2adap(netdev);
-       unsigned int mbox = PCIE_FW_MASTER_M + 1;
-
-       ef->data[sizeof(ef->data) - 1] = '\0';
-       ret = request_firmware(&fw, ef->data, adap->pdev_dev);
-       if (ret < 0)
-               return ret;
-
-       /* If the adapter has been fully initialized then we'll go ahead and
-        * try to get the firmware's cooperation in upgrading to the new
-        * firmware image otherwise we'll try to do the entire job from the
-        * host ... and we always "force" the operation in this path.
-        */
-       if (adap->flags & FULL_INIT_DONE)
-               mbox = adap->mbox;
-
-       ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
-       release_firmware(fw);
-       if (!ret)
-               dev_info(adap->pdev_dev, "loaded firmware %s,"
-                        " reload cxgb4 driver\n", ef->data);
-       return ret;
-}
-
-#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
-#define BCAST_CRC 0xa0ccc1a6
-
-static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-{
-       wol->supported = WAKE_BCAST | WAKE_MAGIC;
-       wol->wolopts = netdev2adap(dev)->wol;
-       memset(&wol->sopass, 0, sizeof(wol->sopass));
-}
-
-static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-{
-       int err = 0;
-       struct port_info *pi = netdev_priv(dev);
-
-       if (wol->wolopts & ~WOL_SUPPORTED)
-               return -EINVAL;
-       t4_wol_magic_enable(pi->adapter, pi->tx_chan,
-                           (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
-       if (wol->wolopts & WAKE_BCAST) {
-               err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
-                                       ~0ULL, 0, false);
-               if (!err)
-                       err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
-                                               ~6ULL, ~0ULL, BCAST_CRC, true);
-       } else
-               t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
-       return err;
-}
-
 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
 {
        const struct port_info *pi = netdev_priv(dev);
@@ -2839,144 +1409,6 @@ static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
        return err;
 }
 
-static u32 get_rss_table_size(struct net_device *dev)
-{
-       const struct port_info *pi = netdev_priv(dev);
-
-       return pi->rss_size;
-}
-
-static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc)
-{
-       const struct port_info *pi = netdev_priv(dev);
-       unsigned int n = pi->rss_size;
-
-       if (hfunc)
-               *hfunc = ETH_RSS_HASH_TOP;
-       if (!p)
-               return 0;
-       while (n--)
-               p[n] = pi->rss[n];
-       return 0;
-}
-
-static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
-                        const u8 hfunc)
-{
-       unsigned int i;
-       struct port_info *pi = netdev_priv(dev);
-
-       /* We require at least one supported parameter to be changed and no
-        * change in any of the unsupported parameters
-        */
-       if (key ||
-           (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
-               return -EOPNOTSUPP;
-       if (!p)
-               return 0;
-
-       for (i = 0; i < pi->rss_size; i++)
-               pi->rss[i] = p[i];
-       if (pi->adapter->flags & FULL_INIT_DONE)
-               return write_rss(pi, pi->rss);
-       return 0;
-}
-
-static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
-                    u32 *rules)
-{
-       const struct port_info *pi = netdev_priv(dev);
-
-       switch (info->cmd) {
-       case ETHTOOL_GRXFH: {
-               unsigned int v = pi->rss_mode;
-
-               info->data = 0;
-               switch (info->flow_type) {
-               case TCP_V4_FLOW:
-                       if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
-                               info->data = RXH_IP_SRC | RXH_IP_DST |
-                                            RXH_L4_B_0_1 | RXH_L4_B_2_3;
-                       else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
-                               info->data = RXH_IP_SRC | RXH_IP_DST;
-                       break;
-               case UDP_V4_FLOW:
-                       if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
-                           (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
-                               info->data = RXH_IP_SRC | RXH_IP_DST |
-                                            RXH_L4_B_0_1 | RXH_L4_B_2_3;
-                       else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
-                               info->data = RXH_IP_SRC | RXH_IP_DST;
-                       break;
-               case SCTP_V4_FLOW:
-               case AH_ESP_V4_FLOW:
-               case IPV4_FLOW:
-                       if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
-                               info->data = RXH_IP_SRC | RXH_IP_DST;
-                       break;
-               case TCP_V6_FLOW:
-                       if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
-                               info->data = RXH_IP_SRC | RXH_IP_DST |
-                                            RXH_L4_B_0_1 | RXH_L4_B_2_3;
-                       else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
-                               info->data = RXH_IP_SRC | RXH_IP_DST;
-                       break;
-               case UDP_V6_FLOW:
-                       if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
-                           (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
-                               info->data = RXH_IP_SRC | RXH_IP_DST |
-                                            RXH_L4_B_0_1 | RXH_L4_B_2_3;
-                       else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
-                               info->data = RXH_IP_SRC | RXH_IP_DST;
-                       break;
-               case SCTP_V6_FLOW:
-               case AH_ESP_V6_FLOW:
-               case IPV6_FLOW:
-                       if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
-                               info->data = RXH_IP_SRC | RXH_IP_DST;
-                       break;
-               }
-               return 0;
-       }
-       case ETHTOOL_GRXRINGS:
-               info->data = pi->nqsets;
-               return 0;
-       }
-       return -EOPNOTSUPP;
-}
-
-static const struct ethtool_ops cxgb_ethtool_ops = {
-       .get_settings      = get_settings,
-       .set_settings      = set_settings,
-       .get_drvinfo       = get_drvinfo,
-       .get_msglevel      = get_msglevel,
-       .set_msglevel      = set_msglevel,
-       .get_ringparam     = get_sge_param,
-       .set_ringparam     = set_sge_param,
-       .get_coalesce      = get_coalesce,
-       .set_coalesce      = set_coalesce,
-       .get_eeprom_len    = get_eeprom_len,
-       .get_eeprom        = get_eeprom,
-       .set_eeprom        = set_eeprom,
-       .get_pauseparam    = get_pauseparam,
-       .set_pauseparam    = set_pauseparam,
-       .get_link          = ethtool_op_get_link,
-       .get_strings       = get_strings,
-       .set_phys_id       = identify_port,
-       .nway_reset        = restart_autoneg,
-       .get_sset_count    = get_sset_count,
-       .get_ethtool_stats = get_stats,
-       .get_regs_len      = get_regs_len,
-       .get_regs          = get_regs,
-       .get_wol           = get_wol,
-       .set_wol           = set_wol,
-       .get_rxnfc         = get_rxnfc,
-       .get_rxfh_indir_size = get_rss_table_size,
-       .get_rxfh          = get_rss_table,
-       .set_rxfh          = set_rss_table,
-       .flash_device      = set_flash,
-};
-
 static int setup_debugfs(struct adapter *adap)
 {
        if (IS_ERR_OR_NULL(adap->debugfs_root))
@@ -4246,19 +2678,12 @@ static int cxgb_up(struct adapter *adap)
 
 static void cxgb_down(struct adapter *adapter)
 {
-       t4_intr_disable(adapter);
        cancel_work_sync(&adapter->tid_release_task);
        cancel_work_sync(&adapter->db_full_task);
        cancel_work_sync(&adapter->db_drop_task);
        adapter->tid_release_task_busy = false;
        adapter->tid_release_head = NULL;
 
-       if (adapter->flags & USING_MSIX) {
-               free_msix_queue_irqs(adapter);
-               free_irq(adapter->msix_info[0].vec, adapter);
-       } else
-               free_irq(adapter->pdev->irq, adapter);
-       quiesce_rx(adapter);
        t4_sge_stop(adapter);
        t4_free_sge_resources(adapter);
        adapter->flags &= ~FULL_INIT_DONE;
@@ -4739,8 +3164,9 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
        if (ret < 0)
                return ret;
 
-       ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
-                         0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
+       ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, adap->sge.egr_sz, 64,
+                         MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
+                         FW_CMD_CAP_PF);
        if (ret < 0)
                return ret;
 
@@ -5094,10 +3520,15 @@ static int adap_init0(struct adapter *adap)
        enum dev_state state;
        u32 params[7], val[7];
        struct fw_caps_config_cmd caps_cmd;
-       struct fw_devlog_cmd devlog_cmd;
-       u32 devlog_meminfo;
        int reset = 1;
 
+       /* Grab Firmware Device Log parameters as early as possible so we have
+        * access to it for debugging, etc.
+        */
+       ret = t4_init_devlog_params(adap);
+       if (ret < 0)
+               return ret;
+
        /* Contact FW, advertising Master capability */
        ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
        if (ret < 0) {
@@ -5175,30 +3606,6 @@ static int adap_init0(struct adapter *adap)
        if (ret < 0)
                goto bye;
 
-       /* Read firmware device log parameters.  We really need to find a way
-        * to get these parameters initialized with some default values (which
-        * are likely to be correct) for the case where we either don't
-        * attache to the firmware or it's crashed when we probe the adapter.
-        * That way we'll still be able to perform early firmware startup
-        * debugging ...  If the request to get the Firmware's Device Log
-        * parameters fails, we'll live so we don't make that a fatal error.
-        */
-       memset(&devlog_cmd, 0, sizeof(devlog_cmd));
-       devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
-                                      FW_CMD_REQUEST_F | FW_CMD_READ_F);
-       devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
-       ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
-                        &devlog_cmd);
-       if (ret == 0) {
-               devlog_meminfo =
-                       ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
-               adap->params.devlog.memtype =
-                       FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
-               adap->params.devlog.start =
-                       FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
-               adap->params.devlog.size = ntohl(devlog_cmd.memsize_devlog);
-       }
-
        /*
         * Find out what ports are available to us.  Note that we need to do
         * this before calling adap_init0_no_config() since it needs nports
@@ -5299,6 +3706,51 @@ static int adap_init0(struct adapter *adap)
        adap->tids.nftids = val[4] - val[3] + 1;
        adap->sge.ingr_start = val[5];
 
+       /* qids (ingress/egress) returned from firmware can be anywhere
+        * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
+        * Hence driver needs to allocate memory for this range to
+        * store the queue info. Get the highest IQFLINT/EQ index returned
+        * in FW_EQ_*_CMD.alloc command.
+        */
+       params[0] = FW_PARAM_PFVF(EQ_END);
+       params[1] = FW_PARAM_PFVF(IQFLINT_END);
+       ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
+       if (ret < 0)
+               goto bye;
+       adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
+       adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
+
+       adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
+                                   sizeof(*adap->sge.egr_map), GFP_KERNEL);
+       if (!adap->sge.egr_map) {
+               ret = -ENOMEM;
+               goto bye;
+       }
+
+       adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
+                                    sizeof(*adap->sge.ingr_map), GFP_KERNEL);
+       if (!adap->sge.ingr_map) {
+               ret = -ENOMEM;
+               goto bye;
+       }
+
+       /* Allocate the memory for the vaious egress queue bitmaps
+        * ie starving_fl and txq_maperr.
+        */
+       adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
+                                       sizeof(long), GFP_KERNEL);
+       if (!adap->sge.starving_fl) {
+               ret = -ENOMEM;
+               goto bye;
+       }
+
+       adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
+                                      sizeof(long), GFP_KERNEL);
+       if (!adap->sge.txq_maperr) {
+               ret = -ENOMEM;
+               goto bye;
+       }
+
        params[0] = FW_PARAM_PFVF(CLIP_START);
        params[1] = FW_PARAM_PFVF(CLIP_END);
        ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
@@ -5507,6 +3959,10 @@ static int adap_init0(struct adapter *adap)
         * happened to HW/FW, stop issuing commands.
         */
 bye:
+       kfree(adap->sge.egr_map);
+       kfree(adap->sge.ingr_map);
+       kfree(adap->sge.starving_fl);
+       kfree(adap->sge.txq_maperr);
        if (ret != -ETIMEDOUT && ret != -EIO)
                t4_fw_bye(adap, adap->mbox);
        return ret;
@@ -5534,6 +3990,7 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
                netif_carrier_off(dev);
        }
        spin_unlock(&adap->stats_lock);
+       disable_interrupts(adap);
        if (adap->flags & FULL_INIT_DONE)
                cxgb_down(adap);
        rtnl_unlock();
@@ -5636,7 +4093,7 @@ static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
                             unsigned int size, unsigned int iqe_size)
 {
        q->adap = adap;
-       set_rspq_intr_params(q, us, cnt);
+       cxgb4_set_rspq_intr_params(q, us, cnt);
        q->iqe_len = iqe_size;
        q->size = size;
 }
@@ -5942,6 +4399,10 @@ static void free_some_resources(struct adapter *adapter)
 
        t4_free_mem(adapter->l2t);
        t4_free_mem(adapter->tids.tid_tab);
+       kfree(adapter->sge.egr_map);
+       kfree(adapter->sge.ingr_map);
+       kfree(adapter->sge.starving_fl);
+       kfree(adapter->sge.txq_maperr);
        disable_msi(adapter);
 
        for_each_port(adapter, i)
@@ -6127,7 +4588,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                netdev->dcbnl_ops = &cxgb4_dcb_ops;
                cxgb4_dcb_state_init(netdev);
 #endif
-               netdev->ethtool_ops = &cxgb_ethtool_ops;
+               cxgb4_set_ethtool_ops(netdev);
        }
 
        pci_set_drvdata(pdev, adapter);
@@ -6267,6 +4728,8 @@ static void remove_one(struct pci_dev *pdev)
                if (is_offload(adapter))
                        detach_ulds(adapter);
 
+               disable_interrupts(adapter);
+
                for_each_port(adapter, i)
                        if (adapter->port[i]->reg_state == NETREG_REGISTERED)
                                unregister_netdev(adapter->port[i]);
index c46e7a9383179b9874566f2a993ab83385bb2437..e622214e2eca03266235de22adbd5412b0b7f4d3 100644 (file)
@@ -1968,7 +1968,7 @@ static int process_responses(struct sge_rspq *q, int budget)
                if (!is_new_response(rc, q))
                        break;
 
-               rmb();
+               dma_rmb();
                rsp_type = RSPD_TYPE(rc->type_gen);
                if (likely(rsp_type == RSP_TYPE_FLBUF)) {
                        struct page_frag *fp;
@@ -2160,7 +2160,7 @@ static unsigned int process_intrq(struct adapter *adap)
                if (!is_new_response(rc, q))
                        break;
 
-               rmb();
+               dma_rmb();
                if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
                        unsigned int qid = ntohl(rc->pldbuflen_qid);
 
@@ -2239,7 +2239,7 @@ static void sge_rx_timer_cb(unsigned long data)
        struct adapter *adap = (struct adapter *)data;
        struct sge *s = &adap->sge;
 
-       for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++)
+       for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
                for (m = s->starving_fl[i]; m; m &= m - 1) {
                        struct sge_eth_rxq *rxq;
                        unsigned int id = __ffs(m) + i * BITS_PER_LONG;
@@ -2327,7 +2327,7 @@ static void sge_tx_timer_cb(unsigned long data)
        struct adapter *adap = (struct adapter *)data;
        struct sge *s = &adap->sge;
 
-       for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++)
+       for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
                for (m = s->txq_maperr[i]; m; m &= m - 1) {
                        unsigned long id = __ffs(m) + i * BITS_PER_LONG;
                        struct sge_ofld_txq *txq = s->egr_map[id];
@@ -2809,7 +2809,8 @@ void t4_free_sge_resources(struct adapter *adap)
                free_rspq_fl(adap, &adap->sge.intrq, NULL);
 
        /* clear the reverse egress queue map */
-       memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map));
+       memset(adap->sge.egr_map, 0,
+              adap->sge.egr_sz * sizeof(*adap->sge.egr_map));
 }
 
 void t4_sge_start(struct adapter *adap)
index afbe1682ff4876d147fd28b434b9be8a28d05299..5959e3ae72da213e11587e8cd27a2bc9759755d0 100644 (file)
@@ -625,6 +625,734 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
        return 0;
 }
 
+/**
+ *     t4_get_regs_len - return the size of the chips register set
+ *     @adapter: the adapter
+ *
+ *     Returns the size of the chip's BAR0 register space.
+ */
+unsigned int t4_get_regs_len(struct adapter *adapter)
+{
+       unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
+
+       switch (chip_version) {
+       case CHELSIO_T4:
+               return T4_REGMAP_SIZE;
+
+       case CHELSIO_T5:
+               return T5_REGMAP_SIZE;
+       }
+
+       dev_err(adapter->pdev_dev,
+               "Unsupported chip version %d\n", chip_version);
+       return 0;
+}
+
+/**
+ *     t4_get_regs - read chip registers into provided buffer
+ *     @adap: the adapter
+ *     @buf: register buffer
+ *     @buf_size: size (in bytes) of register buffer
+ *
+ *     If the provided register buffer isn't large enough for the chip's
+ *     full register range, the register dump will be truncated to the
+ *     register buffer's size.
+ */
+void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
+{
+       static const unsigned int t4_reg_ranges[] = {
+               0x1008, 0x1108,
+               0x1180, 0x11b4,
+               0x11fc, 0x123c,
+               0x1300, 0x173c,
+               0x1800, 0x18fc,
+               0x3000, 0x30d8,
+               0x30e0, 0x5924,
+               0x5960, 0x59d4,
+               0x5a00, 0x5af8,
+               0x6000, 0x6098,
+               0x6100, 0x6150,
+               0x6200, 0x6208,
+               0x6240, 0x6248,
+               0x6280, 0x6338,
+               0x6370, 0x638c,
+               0x6400, 0x643c,
+               0x6500, 0x6524,
+               0x6a00, 0x6a38,
+               0x6a60, 0x6a78,
+               0x6b00, 0x6b84,
+               0x6bf0, 0x6c84,
+               0x6cf0, 0x6d84,
+               0x6df0, 0x6e84,
+               0x6ef0, 0x6f84,
+               0x6ff0, 0x7084,
+               0x70f0, 0x7184,
+               0x71f0, 0x7284,
+               0x72f0, 0x7384,
+               0x73f0, 0x7450,
+               0x7500, 0x7530,
+               0x7600, 0x761c,
+               0x7680, 0x76cc,
+               0x7700, 0x7798,
+               0x77c0, 0x77fc,
+               0x7900, 0x79fc,
+               0x7b00, 0x7c38,
+               0x7d00, 0x7efc,
+               0x8dc0, 0x8e1c,
+               0x8e30, 0x8e78,
+               0x8ea0, 0x8f6c,
+               0x8fc0, 0x9074,
+               0x90fc, 0x90fc,
+               0x9400, 0x9458,
+               0x9600, 0x96bc,
+               0x9800, 0x9808,
+               0x9820, 0x983c,
+               0x9850, 0x9864,
+               0x9c00, 0x9c6c,
+               0x9c80, 0x9cec,
+               0x9d00, 0x9d6c,
+               0x9d80, 0x9dec,
+               0x9e00, 0x9e6c,
+               0x9e80, 0x9eec,
+               0x9f00, 0x9f6c,
+               0x9f80, 0x9fec,
+               0xd004, 0xd03c,
+               0xdfc0, 0xdfe0,
+               0xe000, 0xea7c,
+               0xf000, 0x11110,
+               0x11118, 0x11190,
+               0x19040, 0x1906c,
+               0x19078, 0x19080,
+               0x1908c, 0x19124,
+               0x19150, 0x191b0,
+               0x191d0, 0x191e8,
+               0x19238, 0x1924c,
+               0x193f8, 0x19474,
+               0x19490, 0x194f8,
+               0x19800, 0x19f30,
+               0x1a000, 0x1a06c,
+               0x1a0b0, 0x1a120,
+               0x1a128, 0x1a138,
+               0x1a190, 0x1a1c4,
+               0x1a1fc, 0x1a1fc,
+               0x1e040, 0x1e04c,
+               0x1e284, 0x1e28c,
+               0x1e2c0, 0x1e2c0,
+               0x1e2e0, 0x1e2e0,
+               0x1e300, 0x1e384,
+               0x1e3c0, 0x1e3c8,
+               0x1e440, 0x1e44c,
+               0x1e684, 0x1e68c,
+               0x1e6c0, 0x1e6c0,
+               0x1e6e0, 0x1e6e0,
+               0x1e700, 0x1e784,
+               0x1e7c0, 0x1e7c8,
+               0x1e840, 0x1e84c,
+               0x1ea84, 0x1ea8c,
+               0x1eac0, 0x1eac0,
+               0x1eae0, 0x1eae0,
+               0x1eb00, 0x1eb84,
+               0x1ebc0, 0x1ebc8,
+               0x1ec40, 0x1ec4c,
+               0x1ee84, 0x1ee8c,
+               0x1eec0, 0x1eec0,
+               0x1eee0, 0x1eee0,
+               0x1ef00, 0x1ef84,
+               0x1efc0, 0x1efc8,
+               0x1f040, 0x1f04c,
+               0x1f284, 0x1f28c,
+               0x1f2c0, 0x1f2c0,
+               0x1f2e0, 0x1f2e0,
+               0x1f300, 0x1f384,
+               0x1f3c0, 0x1f3c8,
+               0x1f440, 0x1f44c,
+               0x1f684, 0x1f68c,
+               0x1f6c0, 0x1f6c0,
+               0x1f6e0, 0x1f6e0,
+               0x1f700, 0x1f784,
+               0x1f7c0, 0x1f7c8,
+               0x1f840, 0x1f84c,
+               0x1fa84, 0x1fa8c,
+               0x1fac0, 0x1fac0,
+               0x1fae0, 0x1fae0,
+               0x1fb00, 0x1fb84,
+               0x1fbc0, 0x1fbc8,
+               0x1fc40, 0x1fc4c,
+               0x1fe84, 0x1fe8c,
+               0x1fec0, 0x1fec0,
+               0x1fee0, 0x1fee0,
+               0x1ff00, 0x1ff84,
+               0x1ffc0, 0x1ffc8,
+               0x20000, 0x2002c,
+               0x20100, 0x2013c,
+               0x20190, 0x201c8,
+               0x20200, 0x20318,
+               0x20400, 0x20528,
+               0x20540, 0x20614,
+               0x21000, 0x21040,
+               0x2104c, 0x21060,
+               0x210c0, 0x210ec,
+               0x21200, 0x21268,
+               0x21270, 0x21284,
+               0x212fc, 0x21388,
+               0x21400, 0x21404,
+               0x21500, 0x21518,
+               0x2152c, 0x2153c,
+               0x21550, 0x21554,
+               0x21600, 0x21600,
+               0x21608, 0x21628,
+               0x21630, 0x2163c,
+               0x21700, 0x2171c,
+               0x21780, 0x2178c,
+               0x21800, 0x21c38,
+               0x21c80, 0x21d7c,
+               0x21e00, 0x21e04,
+               0x22000, 0x2202c,
+               0x22100, 0x2213c,
+               0x22190, 0x221c8,
+               0x22200, 0x22318,
+               0x22400, 0x22528,
+               0x22540, 0x22614,
+               0x23000, 0x23040,
+               0x2304c, 0x23060,
+               0x230c0, 0x230ec,
+               0x23200, 0x23268,
+               0x23270, 0x23284,
+               0x232fc, 0x23388,
+               0x23400, 0x23404,
+               0x23500, 0x23518,
+               0x2352c, 0x2353c,
+               0x23550, 0x23554,
+               0x23600, 0x23600,
+               0x23608, 0x23628,
+               0x23630, 0x2363c,
+               0x23700, 0x2371c,
+               0x23780, 0x2378c,
+               0x23800, 0x23c38,
+               0x23c80, 0x23d7c,
+               0x23e00, 0x23e04,
+               0x24000, 0x2402c,
+               0x24100, 0x2413c,
+               0x24190, 0x241c8,
+               0x24200, 0x24318,
+               0x24400, 0x24528,
+               0x24540, 0x24614,
+               0x25000, 0x25040,
+               0x2504c, 0x25060,
+               0x250c0, 0x250ec,
+               0x25200, 0x25268,
+               0x25270, 0x25284,
+               0x252fc, 0x25388,
+               0x25400, 0x25404,
+               0x25500, 0x25518,
+               0x2552c, 0x2553c,
+               0x25550, 0x25554,
+               0x25600, 0x25600,
+               0x25608, 0x25628,
+               0x25630, 0x2563c,
+               0x25700, 0x2571c,
+               0x25780, 0x2578c,
+               0x25800, 0x25c38,
+               0x25c80, 0x25d7c,
+               0x25e00, 0x25e04,
+               0x26000, 0x2602c,
+               0x26100, 0x2613c,
+               0x26190, 0x261c8,
+               0x26200, 0x26318,
+               0x26400, 0x26528,
+               0x26540, 0x26614,
+               0x27000, 0x27040,
+               0x2704c, 0x27060,
+               0x270c0, 0x270ec,
+               0x27200, 0x27268,
+               0x27270, 0x27284,
+               0x272fc, 0x27388,
+               0x27400, 0x27404,
+               0x27500, 0x27518,
+               0x2752c, 0x2753c,
+               0x27550, 0x27554,
+               0x27600, 0x27600,
+               0x27608, 0x27628,
+               0x27630, 0x2763c,
+               0x27700, 0x2771c,
+               0x27780, 0x2778c,
+               0x27800, 0x27c38,
+               0x27c80, 0x27d7c,
+               0x27e00, 0x27e04
+       };
+
+       static const unsigned int t5_reg_ranges[] = {
+               0x1008, 0x1148,
+               0x1180, 0x11b4,
+               0x11fc, 0x123c,
+               0x1280, 0x173c,
+               0x1800, 0x18fc,
+               0x3000, 0x3028,
+               0x3060, 0x30d8,
+               0x30e0, 0x30fc,
+               0x3140, 0x357c,
+               0x35a8, 0x35cc,
+               0x35ec, 0x35ec,
+               0x3600, 0x5624,
+               0x56cc, 0x575c,
+               0x580c, 0x5814,
+               0x5890, 0x58bc,
+               0x5940, 0x59dc,
+               0x59fc, 0x5a18,
+               0x5a60, 0x5a9c,
+               0x5b9c, 0x5bfc,
+               0x6000, 0x6040,
+               0x6058, 0x614c,
+               0x7700, 0x7798,
+               0x77c0, 0x78fc,
+               0x7b00, 0x7c54,
+               0x7d00, 0x7efc,
+               0x8dc0, 0x8de0,
+               0x8df8, 0x8e84,
+               0x8ea0, 0x8f84,
+               0x8fc0, 0x90f8,
+               0x9400, 0x9470,
+               0x9600, 0x96f4,
+               0x9800, 0x9808,
+               0x9820, 0x983c,
+               0x9850, 0x9864,
+               0x9c00, 0x9c6c,
+               0x9c80, 0x9cec,
+               0x9d00, 0x9d6c,
+               0x9d80, 0x9dec,
+               0x9e00, 0x9e6c,
+               0x9e80, 0x9eec,
+               0x9f00, 0x9f6c,
+               0x9f80, 0xa020,
+               0xd004, 0xd03c,
+               0xdfc0, 0xdfe0,
+               0xe000, 0x11088,
+               0x1109c, 0x11110,
+               0x11118, 0x1117c,
+               0x11190, 0x11204,
+               0x19040, 0x1906c,
+               0x19078, 0x19080,
+               0x1908c, 0x19124,
+               0x19150, 0x191b0,
+               0x191d0, 0x191e8,
+               0x19238, 0x19290,
+               0x193f8, 0x19474,
+               0x19490, 0x194cc,
+               0x194f0, 0x194f8,
+               0x19c00, 0x19c60,
+               0x19c94, 0x19e10,
+               0x19e50, 0x19f34,
+               0x19f40, 0x19f50,
+               0x19f90, 0x19fe4,
+               0x1a000, 0x1a06c,
+               0x1a0b0, 0x1a120,
+               0x1a128, 0x1a138,
+               0x1a190, 0x1a1c4,
+               0x1a1fc, 0x1a1fc,
+               0x1e008, 0x1e00c,
+               0x1e040, 0x1e04c,
+               0x1e284, 0x1e290,
+               0x1e2c0, 0x1e2c0,
+               0x1e2e0, 0x1e2e0,
+               0x1e300, 0x1e384,
+               0x1e3c0, 0x1e3c8,
+               0x1e408, 0x1e40c,
+               0x1e440, 0x1e44c,
+               0x1e684, 0x1e690,
+               0x1e6c0, 0x1e6c0,
+               0x1e6e0, 0x1e6e0,
+               0x1e700, 0x1e784,
+               0x1e7c0, 0x1e7c8,
+               0x1e808, 0x1e80c,
+               0x1e840, 0x1e84c,
+               0x1ea84, 0x1ea90,
+               0x1eac0, 0x1eac0,
+               0x1eae0, 0x1eae0,
+               0x1eb00, 0x1eb84,
+               0x1ebc0, 0x1ebc8,
+               0x1ec08, 0x1ec0c,
+               0x1ec40, 0x1ec4c,
+               0x1ee84, 0x1ee90,
+               0x1eec0, 0x1eec0,
+               0x1eee0, 0x1eee0,
+               0x1ef00, 0x1ef84,
+               0x1efc0, 0x1efc8,
+               0x1f008, 0x1f00c,
+               0x1f040, 0x1f04c,
+               0x1f284, 0x1f290,
+               0x1f2c0, 0x1f2c0,
+               0x1f2e0, 0x1f2e0,
+               0x1f300, 0x1f384,
+               0x1f3c0, 0x1f3c8,
+               0x1f408, 0x1f40c,
+               0x1f440, 0x1f44c,
+               0x1f684, 0x1f690,
+               0x1f6c0, 0x1f6c0,
+               0x1f6e0, 0x1f6e0,
+               0x1f700, 0x1f784,
+               0x1f7c0, 0x1f7c8,
+               0x1f808, 0x1f80c,
+               0x1f840, 0x1f84c,
+               0x1fa84, 0x1fa90,
+               0x1fac0, 0x1fac0,
+               0x1fae0, 0x1fae0,
+               0x1fb00, 0x1fb84,
+               0x1fbc0, 0x1fbc8,
+               0x1fc08, 0x1fc0c,
+               0x1fc40, 0x1fc4c,
+               0x1fe84, 0x1fe90,
+               0x1fec0, 0x1fec0,
+               0x1fee0, 0x1fee0,
+               0x1ff00, 0x1ff84,
+               0x1ffc0, 0x1ffc8,
+               0x30000, 0x30030,
+               0x30100, 0x30144,
+               0x30190, 0x301d0,
+               0x30200, 0x30318,
+               0x30400, 0x3052c,
+               0x30540, 0x3061c,
+               0x30800, 0x30834,
+               0x308c0, 0x30908,
+               0x30910, 0x309ac,
+               0x30a00, 0x30a04,
+               0x30a0c, 0x30a2c,
+               0x30a44, 0x30a50,
+               0x30a74, 0x30c24,
+               0x30d08, 0x30d14,
+               0x30d1c, 0x30d20,
+               0x30d3c, 0x30d50,
+               0x31200, 0x3120c,
+               0x31220, 0x31220,
+               0x31240, 0x31240,
+               0x31600, 0x31600,
+               0x31608, 0x3160c,
+               0x31a00, 0x31a1c,
+               0x31e04, 0x31e20,
+               0x31e38, 0x31e3c,
+               0x31e80, 0x31e80,
+               0x31e88, 0x31ea8,
+               0x31eb0, 0x31eb4,
+               0x31ec8, 0x31ed4,
+               0x31fb8, 0x32004,
+               0x32208, 0x3223c,
+               0x32600, 0x32630,
+               0x32a00, 0x32abc,
+               0x32b00, 0x32b70,
+               0x33000, 0x33048,
+               0x33060, 0x3309c,
+               0x330f0, 0x33148,
+               0x33160, 0x3319c,
+               0x331f0, 0x332e4,
+               0x332f8, 0x333e4,
+               0x333f8, 0x33448,
+               0x33460, 0x3349c,
+               0x334f0, 0x33548,
+               0x33560, 0x3359c,
+               0x335f0, 0x336e4,
+               0x336f8, 0x337e4,
+               0x337f8, 0x337fc,
+               0x33814, 0x33814,
+               0x3382c, 0x3382c,
+               0x33880, 0x3388c,
+               0x338e8, 0x338ec,
+               0x33900, 0x33948,
+               0x33960, 0x3399c,
+               0x339f0, 0x33ae4,
+               0x33af8, 0x33b10,
+               0x33b28, 0x33b28,
+               0x33b3c, 0x33b50,
+               0x33bf0, 0x33c10,
+               0x33c28, 0x33c28,
+               0x33c3c, 0x33c50,
+               0x33cf0, 0x33cfc,
+               0x34000, 0x34030,
+               0x34100, 0x34144,
+               0x34190, 0x341d0,
+               0x34200, 0x34318,
+               0x34400, 0x3452c,
+               0x34540, 0x3461c,
+               0x34800, 0x34834,
+               0x348c0, 0x34908,
+               0x34910, 0x349ac,
+               0x34a00, 0x34a04,
+               0x34a0c, 0x34a2c,
+               0x34a44, 0x34a50,
+               0x34a74, 0x34c24,
+               0x34d08, 0x34d14,
+               0x34d1c, 0x34d20,
+               0x34d3c, 0x34d50,
+               0x35200, 0x3520c,
+               0x35220, 0x35220,
+               0x35240, 0x35240,
+               0x35600, 0x35600,
+               0x35608, 0x3560c,
+               0x35a00, 0x35a1c,
+               0x35e04, 0x35e20,
+               0x35e38, 0x35e3c,
+               0x35e80, 0x35e80,
+               0x35e88, 0x35ea8,
+               0x35eb0, 0x35eb4,
+               0x35ec8, 0x35ed4,
+               0x35fb8, 0x36004,
+               0x36208, 0x3623c,
+               0x36600, 0x36630,
+               0x36a00, 0x36abc,
+               0x36b00, 0x36b70,
+               0x37000, 0x37048,
+               0x37060, 0x3709c,
+               0x370f0, 0x37148,
+               0x37160, 0x3719c,
+               0x371f0, 0x372e4,
+               0x372f8, 0x373e4,
+               0x373f8, 0x37448,
+               0x37460, 0x3749c,
+               0x374f0, 0x37548,
+               0x37560, 0x3759c,
+               0x375f0, 0x376e4,
+               0x376f8, 0x377e4,
+               0x377f8, 0x377fc,
+               0x37814, 0x37814,
+               0x3782c, 0x3782c,
+               0x37880, 0x3788c,
+               0x378e8, 0x378ec,
+               0x37900, 0x37948,
+               0x37960, 0x3799c,
+               0x379f0, 0x37ae4,
+               0x37af8, 0x37b10,
+               0x37b28, 0x37b28,
+               0x37b3c, 0x37b50,
+               0x37bf0, 0x37c10,
+               0x37c28, 0x37c28,
+               0x37c3c, 0x37c50,
+               0x37cf0, 0x37cfc,
+               0x38000, 0x38030,
+               0x38100, 0x38144,
+               0x38190, 0x381d0,
+               0x38200, 0x38318,
+               0x38400, 0x3852c,
+               0x38540, 0x3861c,
+               0x38800, 0x38834,
+               0x388c0, 0x38908,
+               0x38910, 0x389ac,
+               0x38a00, 0x38a04,
+               0x38a0c, 0x38a2c,
+               0x38a44, 0x38a50,
+               0x38a74, 0x38c24,
+               0x38d08, 0x38d14,
+               0x38d1c, 0x38d20,
+               0x38d3c, 0x38d50,
+               0x39200, 0x3920c,
+               0x39220, 0x39220,
+               0x39240, 0x39240,
+               0x39600, 0x39600,
+               0x39608, 0x3960c,
+               0x39a00, 0x39a1c,
+               0x39e04, 0x39e20,
+               0x39e38, 0x39e3c,
+               0x39e80, 0x39e80,
+               0x39e88, 0x39ea8,
+               0x39eb0, 0x39eb4,
+               0x39ec8, 0x39ed4,
+               0x39fb8, 0x3a004,
+               0x3a208, 0x3a23c,
+               0x3a600, 0x3a630,
+               0x3aa00, 0x3aabc,
+               0x3ab00, 0x3ab70,
+               0x3b000, 0x3b048,
+               0x3b060, 0x3b09c,
+               0x3b0f0, 0x3b148,
+               0x3b160, 0x3b19c,
+               0x3b1f0, 0x3b2e4,
+               0x3b2f8, 0x3b3e4,
+               0x3b3f8, 0x3b448,
+               0x3b460, 0x3b49c,
+               0x3b4f0, 0x3b548,
+               0x3b560, 0x3b59c,
+               0x3b5f0, 0x3b6e4,
+               0x3b6f8, 0x3b7e4,
+               0x3b7f8, 0x3b7fc,
+               0x3b814, 0x3b814,
+               0x3b82c, 0x3b82c,
+               0x3b880, 0x3b88c,
+               0x3b8e8, 0x3b8ec,
+               0x3b900, 0x3b948,
+               0x3b960, 0x3b99c,
+               0x3b9f0, 0x3bae4,
+               0x3baf8, 0x3bb10,
+               0x3bb28, 0x3bb28,
+               0x3bb3c, 0x3bb50,
+               0x3bbf0, 0x3bc10,
+               0x3bc28, 0x3bc28,
+               0x3bc3c, 0x3bc50,
+               0x3bcf0, 0x3bcfc,
+               0x3c000, 0x3c030,
+               0x3c100, 0x3c144,
+               0x3c190, 0x3c1d0,
+               0x3c200, 0x3c318,
+               0x3c400, 0x3c52c,
+               0x3c540, 0x3c61c,
+               0x3c800, 0x3c834,
+               0x3c8c0, 0x3c908,
+               0x3c910, 0x3c9ac,
+               0x3ca00, 0x3ca04,
+               0x3ca0c, 0x3ca2c,
+               0x3ca44, 0x3ca50,
+               0x3ca74, 0x3cc24,
+               0x3cd08, 0x3cd14,
+               0x3cd1c, 0x3cd20,
+               0x3cd3c, 0x3cd50,
+               0x3d200, 0x3d20c,
+               0x3d220, 0x3d220,
+               0x3d240, 0x3d240,
+               0x3d600, 0x3d600,
+               0x3d608, 0x3d60c,
+               0x3da00, 0x3da1c,
+               0x3de04, 0x3de20,
+               0x3de38, 0x3de3c,
+               0x3de80, 0x3de80,
+               0x3de88, 0x3dea8,
+               0x3deb0, 0x3deb4,
+               0x3dec8, 0x3ded4,
+               0x3dfb8, 0x3e004,
+               0x3e208, 0x3e23c,
+               0x3e600, 0x3e630,
+               0x3ea00, 0x3eabc,
+               0x3eb00, 0x3eb70,
+               0x3f000, 0x3f048,
+               0x3f060, 0x3f09c,
+               0x3f0f0, 0x3f148,
+               0x3f160, 0x3f19c,
+               0x3f1f0, 0x3f2e4,
+               0x3f2f8, 0x3f3e4,
+               0x3f3f8, 0x3f448,
+               0x3f460, 0x3f49c,
+               0x3f4f0, 0x3f548,
+               0x3f560, 0x3f59c,
+               0x3f5f0, 0x3f6e4,
+               0x3f6f8, 0x3f7e4,
+               0x3f7f8, 0x3f7fc,
+               0x3f814, 0x3f814,
+               0x3f82c, 0x3f82c,
+               0x3f880, 0x3f88c,
+               0x3f8e8, 0x3f8ec,
+               0x3f900, 0x3f948,
+               0x3f960, 0x3f99c,
+               0x3f9f0, 0x3fae4,
+               0x3faf8, 0x3fb10,
+               0x3fb28, 0x3fb28,
+               0x3fb3c, 0x3fb50,
+               0x3fbf0, 0x3fc10,
+               0x3fc28, 0x3fc28,
+               0x3fc3c, 0x3fc50,
+               0x3fcf0, 0x3fcfc,
+               0x40000, 0x4000c,
+               0x40040, 0x40068,
+               0x40080, 0x40144,
+               0x40180, 0x4018c,
+               0x40200, 0x40298,
+               0x402ac, 0x4033c,
+               0x403f8, 0x403fc,
+               0x41304, 0x413c4,
+               0x41400, 0x4141c,
+               0x41480, 0x414d0,
+               0x44000, 0x44078,
+               0x440c0, 0x44278,
+               0x442c0, 0x44478,
+               0x444c0, 0x44678,
+               0x446c0, 0x44878,
+               0x448c0, 0x449fc,
+               0x45000, 0x45068,
+               0x45080, 0x45084,
+               0x450a0, 0x450b0,
+               0x45200, 0x45268,
+               0x45280, 0x45284,
+               0x452a0, 0x452b0,
+               0x460c0, 0x460e4,
+               0x47000, 0x4708c,
+               0x47200, 0x47250,
+               0x47400, 0x47420,
+               0x47600, 0x47618,
+               0x47800, 0x47814,
+               0x48000, 0x4800c,
+               0x48040, 0x48068,
+               0x48080, 0x48144,
+               0x48180, 0x4818c,
+               0x48200, 0x48298,
+               0x482ac, 0x4833c,
+               0x483f8, 0x483fc,
+               0x49304, 0x493c4,
+               0x49400, 0x4941c,
+               0x49480, 0x494d0,
+               0x4c000, 0x4c078,
+               0x4c0c0, 0x4c278,
+               0x4c2c0, 0x4c478,
+               0x4c4c0, 0x4c678,
+               0x4c6c0, 0x4c878,
+               0x4c8c0, 0x4c9fc,
+               0x4d000, 0x4d068,
+               0x4d080, 0x4d084,
+               0x4d0a0, 0x4d0b0,
+               0x4d200, 0x4d268,
+               0x4d280, 0x4d284,
+               0x4d2a0, 0x4d2b0,
+               0x4e0c0, 0x4e0e4,
+               0x4f000, 0x4f08c,
+               0x4f200, 0x4f250,
+               0x4f400, 0x4f420,
+               0x4f600, 0x4f618,
+               0x4f800, 0x4f814,
+               0x50000, 0x500cc,
+               0x50400, 0x50400,
+               0x50800, 0x508cc,
+               0x50c00, 0x50c00,
+               0x51000, 0x5101c,
+               0x51300, 0x51308,
+       };
+
+       u32 *buf_end = (u32 *)((char *)buf + buf_size);
+       const unsigned int *reg_ranges;
+       int reg_ranges_size, range;
+       unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+       /* Select the right set of register ranges to dump depending on the
+        * adapter chip type.
+        */
+       switch (chip_version) {
+       case CHELSIO_T4:
+               reg_ranges = t4_reg_ranges;
+               reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
+               break;
+
+       case CHELSIO_T5:
+               reg_ranges = t5_reg_ranges;
+               reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
+               break;
+
+       default:
+               dev_err(adap->pdev_dev,
+                       "Unsupported chip version %d\n", chip_version);
+               return;
+       }
+
+       /* Clear the register buffer and insert the appropriate register
+        * values selected by the above register ranges.
+        */
+       memset(buf, 0, buf_size);
+       for (range = 0; range < reg_ranges_size; range += 2) {
+               unsigned int reg = reg_ranges[range];
+               unsigned int last_reg = reg_ranges[range + 1];
+               u32 *bufp = (u32 *)((char *)buf + reg);
+
+               /* Iterate across the register range filling in the register
+                * buffer but don't write past the end of the register buffer.
+                */
+               while (reg <= last_reg && bufp < buf_end) {
+                       *bufp++ = t4_read_reg(adap, reg);
+                       reg += sizeof(u32);
+               }
+       }
+}
+
 #define EEPROM_STAT_ADDR   0x7bfc
 #define VPD_BASE           0x400
 #define VPD_BASE_OLD       0
@@ -4458,6 +5186,59 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
        return 0;
 }
 
+/**
+ *     t4_init_devlog_params - initialize adapter->params.devlog
+ *     @adap: the adapter
+ *
+ *     Initialize various fields of the adapter's Firmware Device Log
+ *     Parameters structure.
+ */
+int t4_init_devlog_params(struct adapter *adap)
+{
+       struct devlog_params *dparams = &adap->params.devlog;
+       u32 pf_dparams;
+       unsigned int devlog_meminfo;
+       struct fw_devlog_cmd devlog_cmd;
+       int ret;
+
+       /* If we're dealing with newer firmware, the Device Log Paramerters
+        * are stored in a designated register which allows us to access the
+        * Device Log even if we can't talk to the firmware.
+        */
+       pf_dparams =
+               t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
+       if (pf_dparams) {
+               unsigned int nentries, nentries128;
+
+               dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
+               dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
+
+               nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
+               nentries = (nentries128 + 1) * 128;
+               dparams->size = nentries * sizeof(struct fw_devlog_e);
+
+               return 0;
+       }
+
+       /* Otherwise, ask the firmware for it's Device Log Parameters.
+        */
+       memset(&devlog_cmd, 0, sizeof(devlog_cmd));
+       devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
+                                      FW_CMD_REQUEST_F | FW_CMD_READ_F);
+       devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
+       ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
+                        &devlog_cmd);
+       if (ret)
+               return ret;
+
+       devlog_meminfo = ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
+       dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
+       dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
+       dparams->size = ntohl(devlog_cmd.memsize_devlog);
+
+       return 0;
+}
+
 /**
  *     t4_init_sge_params - initialize adap->params.sge
  *     @adapter: the adapter
index 231a725f6d5d1679c4d6b07ff6694ac387f0b66a..326674b19983825af5631993b4427e0132ed6ba6 100644 (file)
@@ -63,6 +63,8 @@
 #define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
 #define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
 
+#define PCIE_FW_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+
 #define SGE_PF_KDOORBELL_A 0x0
 
 #define QID_S    15
 #define PFNUM_V(x) ((x) << PFNUM_S)
 
 #define PCIE_FW_A 0x30b8
+#define PCIE_FW_PF_A 0x30bc
 
 #define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A 0x5908
 
index d136ca6a0c8a1544dbdf938214bb5cedbdc2113f..03fbfd1fb3dff35f5cef20f84a574df09e285116 100644 (file)
@@ -101,7 +101,7 @@ enum fw_wr_opcodes {
        FW_RI_BIND_MW_WR               = 0x18,
        FW_RI_FR_NSMR_WR               = 0x19,
        FW_RI_INV_LSTAG_WR             = 0x1a,
-       FW_LASTC2E_WR                  = 0x40
+       FW_LASTC2E_WR                  = 0x70
 };
 
 struct fw_wr_hdr {
@@ -993,6 +993,7 @@ enum fw_memtype_cf {
        FW_MEMTYPE_CF_EXTMEM            = 0x2,
        FW_MEMTYPE_CF_FLASH             = 0x4,
        FW_MEMTYPE_CF_INTERNAL          = 0x5,
+       FW_MEMTYPE_CF_EXTMEM1           = 0x6,
 };
 
 struct fw_caps_config_cmd {
@@ -1035,6 +1036,7 @@ enum fw_params_mnem {
        FW_PARAMS_MNEM_PFVF             = 2,    /* function params */
        FW_PARAMS_MNEM_REG              = 3,    /* limited register access */
        FW_PARAMS_MNEM_DMAQ             = 4,    /* dma queue params */
+       FW_PARAMS_MNEM_CHNET            = 5,    /* chnet params */
        FW_PARAMS_MNEM_LAST
 };
 
@@ -3102,7 +3104,8 @@ enum fw_devlog_facility {
        FW_DEVLOG_FACILITY_FCOE         = 0x2E,
        FW_DEVLOG_FACILITY_FOISCSI      = 0x30,
        FW_DEVLOG_FACILITY_FOFCOE       = 0x32,
-       FW_DEVLOG_FACILITY_MAX          = 0x32,
+       FW_DEVLOG_FACILITY_CHNET        = 0x34,
+       FW_DEVLOG_FACILITY_MAX          = 0x34,
 };
 
 /* log message format */
@@ -3139,4 +3142,36 @@ struct fw_devlog_cmd {
        (((x) >> FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S) & \
         FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M)
 
+/* P C I E   F W   P F 7   R E G I S T E R */
+
+/* PF7 stores the Firmware Device Log parameters which allows Host Drivers to
+ * access the "devlog" which needing to contact firmware.  The encoding is
+ * mostly the same as that returned by the DEVLOG command except for the size
+ * which is encoded as the number of entries in multiples-1 of 128 here rather
+ * than the memory size as is done in the DEVLOG command.  Thus, 0 means 128
+ * and 15 means 2048.  This of course in turn constrains the allowed values
+ * for the devlog size ...
+ */
+#define PCIE_FW_PF_DEVLOG              7
+
+#define PCIE_FW_PF_DEVLOG_NENTRIES128_S        28
+#define PCIE_FW_PF_DEVLOG_NENTRIES128_M        0xf
+#define PCIE_FW_PF_DEVLOG_NENTRIES128_V(x) \
+       ((x) << PCIE_FW_PF_DEVLOG_NENTRIES128_S)
+#define PCIE_FW_PF_DEVLOG_NENTRIES128_G(x) \
+       (((x) >> PCIE_FW_PF_DEVLOG_NENTRIES128_S) & \
+        PCIE_FW_PF_DEVLOG_NENTRIES128_M)
+
+#define PCIE_FW_PF_DEVLOG_ADDR16_S     4
+#define PCIE_FW_PF_DEVLOG_ADDR16_M     0xffffff
+#define PCIE_FW_PF_DEVLOG_ADDR16_V(x)  ((x) << PCIE_FW_PF_DEVLOG_ADDR16_S)
+#define PCIE_FW_PF_DEVLOG_ADDR16_G(x) \
+       (((x) >> PCIE_FW_PF_DEVLOG_ADDR16_S) & PCIE_FW_PF_DEVLOG_ADDR16_M)
+
+#define PCIE_FW_PF_DEVLOG_MEMTYPE_S    0
+#define PCIE_FW_PF_DEVLOG_MEMTYPE_M    0xf
+#define PCIE_FW_PF_DEVLOG_MEMTYPE_V(x) ((x) << PCIE_FW_PF_DEVLOG_MEMTYPE_S)
+#define PCIE_FW_PF_DEVLOG_MEMTYPE_G(x) \
+       (((x) >> PCIE_FW_PF_DEVLOG_MEMTYPE_S) & PCIE_FW_PF_DEVLOG_MEMTYPE_M)
+
 #endif /* _T4FW_INTERFACE_H_ */
index e2bd3f74785851d0db24c282f5a497972bcaf20f..b9d1cbac0eee3c97e76cff147df732601b83f714 100644 (file)
 #define __T4FW_VERSION_H__
 
 #define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x0C
-#define T4FW_VERSION_MICRO 0x19
+#define T4FW_VERSION_MINOR 0x0D
+#define T4FW_VERSION_MICRO 0x20
 #define T4FW_VERSION_BUILD 0x00
 
 #define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x0C
-#define T5FW_VERSION_MICRO 0x19
+#define T5FW_VERSION_MINOR 0x0D
+#define T5FW_VERSION_MICRO 0x20
 #define T5FW_VERSION_BUILD 0x00
 
 #endif
index 5ba14b32c3700ff59ce518eca54173ded698205a..482f6de6817d47e77e0b3ef648d0239554ef9561 100644 (file)
@@ -1004,7 +1004,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
                                              ? (tq->pidx - 1)
                                              : (tq->size - 1));
                        __be64 *src = (__be64 *)&tq->desc[index];
-                       __be64 __iomem *dst = (__be64 *)(tq->bar2_addr +
+                       __be64 __iomem *dst = (__be64 __iomem *)(tq->bar2_addr +
                                                         SGE_UDB_WCDOORBELL);
                        unsigned int count = EQ_UNIT / sizeof(__be64);
 
@@ -1018,7 +1018,11 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
                         * DMA.
                         */
                        while (count) {
-                               writeq(*src, dst);
+                               /* the (__force u64) is because the compiler
+                                * doesn't understand the endian swizzling
+                                * going on
+                                */
+                               writeq((__force u64)*src, dst);
                                src++;
                                dst++;
                                count--;
@@ -1252,8 +1256,8 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
        BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
        wr = (void *)&txq->q.desc[txq->q.pidx];
        wr->equiq_to_len16 = cpu_to_be32(wr_mid);
-       wr->r3[0] = cpu_to_be64(0);
-       wr->r3[1] = cpu_to_be64(0);
+       wr->r3[0] = cpu_to_be32(0);
+       wr->r3[1] = cpu_to_be32(0);
        skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
        end = (u64 *)wr + flits;
 
@@ -1747,7 +1751,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
                 * Figure out what kind of response we've received from the
                 * SGE.
                 */
-               rmb();
+               dma_rmb();
                rsp_type = RSPD_TYPE(rc->type_gen);
                if (likely(rsp_type == RSP_TYPE_FLBUF)) {
                        struct page_frag *fp;
@@ -1931,7 +1935,7 @@ static unsigned int process_intrq(struct adapter *adapter)
                 * error and go on to the next response message.  This should
                 * never happen ...
                 */
-               rmb();
+               dma_rmb();
                if (unlikely(RSPD_TYPE(rc->type_gen) != RSP_TYPE_INTR)) {
                        dev_err(adapter->pdev_dev,
                                "Unexpected INTRQ response type %d\n",
index c21e2e954ad8b84437e5643e59be58693d9e5f2f..966ee900ed00bdad79d24fdd6653665230272e87 100644 (file)
@@ -210,10 +210,10 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
 
                        if (rpl) {
                                /* request bit in high-order BE word */
-                               WARN_ON((be32_to_cpu(*(const u32 *)cmd)
+                               WARN_ON((be32_to_cpu(*(const __be32 *)cmd)
                                         & FW_CMD_REQUEST_F) == 0);
                                get_mbox_rpl(adapter, rpl, size, mbox_data);
-                               WARN_ON((be32_to_cpu(*(u32 *)rpl)
+                               WARN_ON((be32_to_cpu(*(__be32 *)rpl)
                                         & FW_CMD_REQUEST_F) != 0);
                        }
                        t4_write_reg(adapter, mbox_ctl,
@@ -484,7 +484,7 @@ int t4_bar2_sge_qregs(struct adapter *adapter,
         *  o The BAR2 Queue ID.
         *  o The BAR2 Queue ID Offset into the BAR2 page.
         */
-       bar2_page_offset = ((qid >> qpp_shift) << page_shift);
+       bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
        bar2_qid = qid & qpp_mask;
        bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
 
index 78e1ce09b1ab1deadad177472593e020a8f9d3c2..f6a3a7abd468e1f25fd4c33e874a38f0c85bc4dd 100644 (file)
@@ -1954,6 +1954,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
        struct fec_enet_private *fep = netdev_priv(ndev);
        struct device_node *node;
        int err = -ENXIO, i;
+       u32 mii_speed, holdtime;
 
        /*
         * The i.MX28 dual fec interfaces are not equal.
@@ -1991,10 +1992,33 @@ static int fec_enet_mii_init(struct platform_device *pdev)
         * Reference Manual has an error on this, and gets fixed on i.MX6Q
         * document.
         */
-       fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
+       mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
        if (fep->quirks & FEC_QUIRK_ENET_MAC)
-               fep->phy_speed--;
-       fep->phy_speed <<= 1;
+               mii_speed--;
+       if (mii_speed > 63) {
+               dev_err(&pdev->dev,
+                       "fec clock (%lu) to fast to get right mii speed\n",
+                       clk_get_rate(fep->clk_ipg));
+               err = -EINVAL;
+               goto err_out;
+       }
+
+       /*
+        * The i.MX28 and i.MX6 types have another filed in the MSCR (aka
+        * MII_SPEED) register that defines the MDIO output hold time. Earlier
+        * versions are RAZ there, so just ignore the difference and write the
+        * register always.
+        * The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
+        * HOLDTIME + 1 is the number of clk cycles the fec is holding the
+        * output.
+        * The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
+        * Given that ceil(clkrate / 5000000) <= 64, the calculation for
+        * holdtime cannot result in a value greater than 3.
+        */
+       holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;
+
+       fep->phy_speed = mii_speed << 1 | holdtime << 8;
+
        writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
 
        fep->mii_bus = mdiobus_alloc();
index bfdccbd58be0855e1f3f26d71ed4871ed1b4e9aa..4dd40e057f40035ff6b8a1e236ab80c8ddc5497b 100644 (file)
@@ -3893,6 +3893,9 @@ static int ucc_geth_probe(struct platform_device* ofdev)
        ugeth->phy_interface = phy_interface;
        ugeth->max_speed = max_speed;
 
+       /* Carrier starts down, phylib will bring it up */
+       netif_carrier_off(dev);
+
        err = register_netdev(dev);
        if (err) {
                if (netif_msg_probe(ugeth))
index 05f88394f9a5599dcc1a4076a05694e596e6fdbe..1a450f4b6b125d2234ea159022666b5ca381d328 100644 (file)
@@ -899,7 +899,7 @@ static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
        /* Order is important otherwise we'll be in a race with h/w:
         * set S-bit in current first, then clear S-bit in previous. */
        cb->command |= cpu_to_le16(cb_s);
-       wmb();
+       dma_wmb();
        cb->prev->command &= cpu_to_le16(~cb_s);
 
        while (nic->cb_to_send != nic->cb_to_use) {
@@ -1843,7 +1843,7 @@ static int e100_tx_clean(struct nic *nic)
        for (cb = nic->cb_to_clean;
            cb->status & cpu_to_le16(cb_complete);
            cb = nic->cb_to_clean = cb->next) {
-               rmb(); /* read skb after status */
+               dma_rmb(); /* read skb after status */
                netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
                             "cb[%d]->status = 0x%04X\n",
                             (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
@@ -1993,7 +1993,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
 
        netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
                     "status=0x%04X\n", rfd_status);
-       rmb(); /* read size after status bit */
+       dma_rmb(); /* read size after status bit */
 
        /* If data isn't ready, nothing to indicate */
        if (unlikely(!(rfd_status & cb_complete))) {
index b548ef0cf56be1278daa2d92edcde4fed51f9541..983eb4e6f7aa184deb48f526316c3254622c6756 100644 (file)
@@ -3856,7 +3856,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
        while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
               (count < tx_ring->count)) {
                bool cleaned = false;
-               rmb();  /* read buffer_info after eop_desc */
+               dma_rmb();      /* read buffer_info after eop_desc */
                for ( ; !cleaned; count++) {
                        tx_desc = E1000_TX_DESC(*tx_ring, i);
                        buffer_info = &tx_ring->buffer_info[i];
@@ -4154,7 +4154,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
                if (*work_done >= work_to_do)
                        break;
                (*work_done)++;
-               rmb(); /* read descriptor and rx_buffer_info after status DD */
+               dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
 
                status = rx_desc->status;
 
@@ -4375,7 +4375,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
                if (*work_done >= work_to_do)
                        break;
                (*work_done)++;
-               rmb(); /* read descriptor and rx_buffer_info after status DD */
+               dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
 
                status = rx_desc->status;
                length = le16_to_cpu(rx_desc->length);
index 4e56c31959897402bf581131af693be5375311e1..74ec185a697facce09174abda046db2b08495203 100644 (file)
@@ -947,7 +947,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
                if (*work_done >= work_to_do)
                        break;
                (*work_done)++;
-               rmb();  /* read descriptor and rx_buffer_info after status DD */
+               dma_rmb();      /* read descriptor and rx_buffer_info after status DD */
 
                skb = buffer_info->skb;
                buffer_info->skb = NULL;
@@ -1232,7 +1232,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
               (count < tx_ring->count)) {
                bool cleaned = false;
 
-               rmb();          /* read buffer_info after eop_desc */
+               dma_rmb();              /* read buffer_info after eop_desc */
                for (; !cleaned; count++) {
                        tx_desc = E1000_TX_DESC(*tx_ring, i);
                        buffer_info = &tx_ring->buffer_info[i];
@@ -1332,7 +1332,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
                        break;
                (*work_done)++;
                skb = buffer_info->skb;
-               rmb();  /* read descriptor and rx_buffer_info after status DD */
+               dma_rmb();      /* read descriptor and rx_buffer_info after status DD */
 
                /* in the packet split case this is header only */
                prefetch(skb->data - NET_IP_ALIGN);
@@ -1536,7 +1536,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
                if (*work_done >= work_to_do)
                        break;
                (*work_done)++;
-               rmb();  /* read descriptor and rx_buffer_info after status DD */
+               dma_rmb();      /* read descriptor and rx_buffer_info after status DD */
 
                skb = buffer_info->skb;
                buffer_info->skb = NULL;
index 1c8bd7c152c2e15ae4f09accfeb37cf17d0a10c5..33c35d3b7420fa9ae545aea4ebd5160036914718 100644 (file)
@@ -628,6 +628,7 @@ extern const char i40e_driver_name[];
 extern const char i40e_driver_version_str[];
 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
+struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id);
 void i40e_update_stats(struct i40e_vsi *vsi);
 void i40e_update_eth_stats(struct i40e_vsi *vsi);
 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
index 400fb28db576a134aebd94d3624e92e3786c2996..bd5079d5c1b682016db7a166c11c9a0e9f392b38 100644 (file)
@@ -178,6 +178,10 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
        if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
                return;
 
+       /* MFP mode but not an iSCSI PF so return */
+       if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi))
+               return;
+
        dcbxcfg = &hw->local_dcbx_config;
 
        /* Set up all the App TLVs if DCBx is negotiated */
@@ -282,6 +286,10 @@ void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
        struct i40e_dcb_app_priority_table app;
        int i;
 
+       /* MFP mode but not an iSCSI PF so return */
+       if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi))
+               return;
+
        for (i = 0; i < old_cfg->numapps; i++) {
                app = old_cfg->app[i];
                /* The APP is not available anymore delete it */
index 1ca48458e6683eb935c96ede4eb0e60dba05766a..1803afeef23ede81ed906b5400e9f3164234a4de 100644 (file)
@@ -1306,8 +1306,7 @@ static void i40e_fcoe_tx_map(struct i40e_ring *tx_ring,
        /* MACLEN is ether header length in words not bytes */
        td_offset |= (maclen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
 
-       return i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
-                          td_cmd, td_offset);
+       i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, td_cmd, td_offset);
 }
 
 /**
index 845bceeda6451da241f257d0eb323c59cbcfc91d..63de3f4b7a94e4f2b6ec5454028e5bc0f5e99140 100644 (file)
@@ -38,8 +38,8 @@ static const char i40e_driver_string[] =
 #define DRV_KERN "-k"
 
 #define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 2
-#define DRV_VERSION_BUILD 43
+#define DRV_VERSION_MINOR 3
+#define DRV_VERSION_BUILD 1
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
             __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -249,6 +249,22 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
        return count;
 }
 
+/**
+ * i40e_find_vsi_from_id - searches for the vsi with the given id
+ * @pf - the pf structure to search for the vsi
+ * @id - id of the vsi it is searching for
+ **/
+struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
+{
+       int i;
+
+       for (i = 0; i < pf->num_alloc_vsi; i++)
+               if (pf->vsi[i] && (pf->vsi[i]->id == id))
+                       return pf->vsi[i];
+
+       return NULL;
+}
+
 /**
  * i40e_service_event_schedule - Schedule the service task to wake up
  * @pf: board private structure
@@ -1969,7 +1985,7 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
                                    I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
 
        ctxt.seid = vsi->seid;
-       memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+       ctxt.info = vsi->info;
        ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
        if (ret) {
                dev_info(&vsi->back->pdev->dev,
@@ -1998,7 +2014,7 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
                                    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
 
        ctxt.seid = vsi->seid;
-       memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+       ctxt.info = vsi->info;
        ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
        if (ret) {
                dev_info(&vsi->back->pdev->dev,
@@ -2282,7 +2298,7 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
                                    I40E_AQ_VSI_PVLAN_EMOD_STR;
 
        ctxt.seid = vsi->seid;
-       memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+       ctxt.info = vsi->info;
        aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
        if (aq_ret) {
                dev_info(&vsi->back->pdev->dev,
@@ -3197,6 +3213,9 @@ static irqreturn_t i40e_intr(int irq, void *data)
        if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
                icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
                dev_info(&pf->pdev->dev, "HMC error interrupt\n");
+               dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
+                        rd32(hw, I40E_PFHMC_ERRORINFO),
+                        rd32(hw, I40E_PFHMC_ERRORDATA));
        }
 
        if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
@@ -4392,7 +4411,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
        ctxt.pf_num = vsi->back->hw.pf_id;
        ctxt.vf_num = 0;
        ctxt.uplink_seid = vsi->uplink_seid;
-       memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+       ctxt.info = vsi->info;
        i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
 
        /* Update the VSI after updating the VSI queue-mapping information */
@@ -5220,9 +5239,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
                goto exit;
        }
 
-       memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg));
        /* Store the old configuration */
-       memcpy(&tmp_dcbx_cfg, &hw->local_dcbx_config, sizeof(tmp_dcbx_cfg));
+       tmp_dcbx_cfg = hw->local_dcbx_config;
 
        /* Reset the old DCBx configuration data */
        memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
@@ -5782,11 +5800,9 @@ static void i40e_handle_link_event(struct i40e_pf *pf,
        struct i40e_hw *hw = &pf->hw;
        struct i40e_aqc_get_link_status *status =
                (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
-       struct i40e_link_status *hw_link_info = &hw->phy.link_info;
 
        /* save off old link status information */
-       memcpy(&pf->hw.phy.link_info_old, hw_link_info,
-              sizeof(pf->hw.phy.link_info_old));
+       hw->phy.link_info_old = hw->phy.link_info;
 
        /* Do a new status request to re-enable LSE reporting
         * and load new status information into the hw struct
@@ -6608,7 +6624,6 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
 {
        struct i40e_hw *hw = &pf->hw;
        i40e_status ret;
-       u8 filter_index;
        __be16 port;
        int i;
 
@@ -6621,22 +6636,20 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
                if (pf->pending_vxlan_bitmap & (1 << i)) {
                        pf->pending_vxlan_bitmap &= ~(1 << i);
                        port = pf->vxlan_ports[i];
-                       ret = port ?
-                             i40e_aq_add_udp_tunnel(hw, ntohs(port),
+                       if (port)
+                               ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
                                                     I40E_AQC_TUNNEL_TYPE_VXLAN,
-                                                    &filter_index, NULL)
-                             : i40e_aq_del_udp_tunnel(hw, i, NULL);
+                                                    NULL, NULL);
+                       else
+                               ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
 
                        if (ret) {
-                               dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n",
-                                        port ? "adding" : "deleting",
-                                        ntohs(port), port ? i : i);
-
+                               dev_info(&pf->pdev->dev,
+                                        "%s vxlan port %d, index %d failed, err %d, aq_err %d\n",
+                                        port ? "add" : "delete",
+                                        ntohs(port), i, ret,
+                                        pf->hw.aq.asq_last_status);
                                pf->vxlan_ports[i] = 0;
-                       } else {
-                               dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n",
-                                        port ? "Added" : "Deleted",
-                                        ntohs(port), port ? i : filter_index);
                        }
                }
        }
@@ -7829,7 +7842,8 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
 
        /* Check if port already exists */
        if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
-               netdev_info(netdev, "Port %d already offloaded\n", ntohs(port));
+               netdev_info(netdev, "vxlan port %d already offloaded\n",
+                           ntohs(port));
                return;
        }
 
@@ -7837,7 +7851,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
        next_idx = i40e_get_vxlan_port_idx(pf, 0);
 
        if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
-               netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n",
+               netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n",
                            ntohs(port));
                return;
        }
@@ -7845,8 +7859,9 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
        /* New port: add it and mark its index in the bitmap */
        pf->vxlan_ports[next_idx] = port;
        pf->pending_vxlan_bitmap |= (1 << next_idx);
-
        pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
+
+       dev_info(&pf->pdev->dev, "adding vxlan port %d\n", ntohs(port));
 }
 
 /**
@@ -7874,12 +7889,13 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
                 * and make it pending
                 */
                pf->vxlan_ports[idx] = 0;
-
                pf->pending_vxlan_bitmap |= (1 << idx);
-
                pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
+
+               dev_info(&pf->pdev->dev, "deleting vxlan port %d\n",
+                        ntohs(port));
        } else {
-               netdev_warn(netdev, "Port %d was not found, not deleting\n",
+               netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
                            ntohs(port));
        }
 }
@@ -8269,7 +8285,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                                 ret, pf->hw.aq.asq_last_status);
                        return -ENOENT;
                }
-               memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
+               vsi->info = ctxt.info;
                vsi->info.valid_sections = 0;
 
                vsi->seid = ctxt.seid;
@@ -8403,7 +8419,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                        ret = -ENOENT;
                        goto err;
                }
-               memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
+               vsi->info = ctxt.info;
                vsi->info.valid_sections = 0;
                vsi->seid = ctxt.seid;
                vsi->id = ctxt.vsi_number;
@@ -10210,6 +10226,8 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
        set_bit(__I40E_DOWN, &pf->state);
        del_timer_sync(&pf->service_timer);
        cancel_work_sync(&pf->service_task);
+       i40e_fdir_teardown(pf);
+
        rtnl_lock();
        i40e_prep_for_reset(pf);
        rtnl_unlock();
index 9b11f2e7e3610044e6a1e17f951eb1718038a9b7..4bd3a80aba82998bba343a1870b2d21f59bca4e0 100644 (file)
@@ -1554,7 +1554,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
                 * any other fields out of the rx_desc until we know the
                 * DD bit is set.
                 */
-               rmb();
+               dma_rmb();
                if (i40e_rx_is_programming_status(qword)) {
                        i40e_clean_programming_status(rx_ring, rx_desc);
                        I40E_RX_INCREMENT(rx_ring, i);
@@ -1565,8 +1565,11 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
                if (likely(!skb)) {
                        skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
                                                        rx_ring->rx_hdr_len);
-                       if (!skb)
+                       if (!skb) {
                                rx_ring->rx_stats.alloc_buff_failed++;
+                               break;
+                       }
+
                        /* initialize queue mapping */
                        skb_record_rx_queue(skb, rx_ring->queue_index);
                        /* we are reusing so sync this buffer for CPU use */
@@ -1742,7 +1745,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
                 * any other fields out of the rx_desc until we know the
                 * DD bit is set.
                 */
-               rmb();
+               dma_rmb();
 
                if (i40e_rx_is_programming_status(qword)) {
                        i40e_clean_programming_status(rx_ring, rx_desc);
@@ -2054,6 +2057,19 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
        __be16 protocol = skb->protocol;
        u32  tx_flags = 0;
 
+       if (protocol == htons(ETH_P_8021Q) &&
+           !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
+               /* When HW VLAN acceleration is turned off by the user the
+                * stack sets the protocol to 8021q so that the driver
+                * can take any steps required to support the SW only
+                * VLAN handling.  In our case the driver doesn't need
+                * to take any further steps so just set the protocol
+                * to the encapsulated ethertype.
+                */
+               skb->protocol = vlan_get_protocol(skb);
+               goto out;
+       }
+
        /* if we have a HW VLAN tag being added, default to the HW one */
        if (skb_vlan_tag_present(skb)) {
                tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
index 0a93684130b94e3384ea6fcf986295ce68c971d8..4d69e1f04901553d4efe7a9e5357c1c8229235c8 100644 (file)
@@ -53,11 +53,12 @@ static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
  *
  * check for the valid VSI id
  **/
-static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id)
+static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
 {
        struct i40e_pf *pf = vf->pf;
+       struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
 
-       return pf->vsi[vsi_id]->vf_id == vf->vf_id;
+       return (vsi && (vsi->vf_id == vf->vf_id));
 }
 
 /**
@@ -68,12 +69,13 @@ static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id)
  *
  * check for the valid queue id
  **/
-static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id,
+static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
                                            u8 qid)
 {
        struct i40e_pf *pf = vf->pf;
+       struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
 
-       return qid < pf->vsi[vsi_id]->alloc_queue_pairs;
+       return (vsi && (qid < vsi->alloc_queue_pairs));
 }
 
 /**
@@ -95,18 +97,21 @@ static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
 /**
  * i40e_vc_get_pf_queue_id
  * @vf: pointer to the VF info
- * @vsi_idx: index of VSI in PF struct
+ * @vsi_id: id of VSI as provided by the FW
  * @vsi_queue_id: vsi relative queue id
  *
  * return PF relative queue id
  **/
-static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
+static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
                                   u8 vsi_queue_id)
 {
        struct i40e_pf *pf = vf->pf;
-       struct i40e_vsi *vsi = pf->vsi[vsi_idx];
+       struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
        u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
 
+       if (!vsi)
+               return pf_queue_id;
+
        if (le16_to_cpu(vsi->info.mapping_flags) &
            I40E_AQ_VSI_QUE_MAP_NONCONTIG)
                pf_queue_id =
@@ -121,12 +126,12 @@ static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
 /**
  * i40e_config_irq_link_list
  * @vf: pointer to the VF info
- * @vsi_idx: index of VSI in PF struct
+ * @vsi_id: id of VSI as given by the FW
  * @vecmap: irq map info
  *
  * configure irq link list from the map
  **/
-static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
+static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
                                      struct i40e_virtchnl_vector_map *vecmap)
 {
        unsigned long linklistmap = 0, tempmap;
@@ -171,7 +176,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
                                 I40E_VIRTCHNL_SUPPORTED_QTYPES));
        vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES;
        qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES;
-       pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+       pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
        reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
 
        wr32(hw, reg_idx, reg);
@@ -198,7 +203,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
                    (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
                        vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
                        qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
-                       pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx,
+                       pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id,
                                                              vsi_queue_id);
                } else {
                        pf_queue_id = I40E_QUEUE_END_OF_LIST;
@@ -221,24 +226,26 @@ irq_list_done:
 /**
  * i40e_config_vsi_tx_queue
  * @vf: pointer to the VF info
- * @vsi_idx: index of VSI in PF struct
+ * @vsi_id: id of VSI as provided by the FW
  * @vsi_queue_id: vsi relative queue index
  * @info: config. info
  *
  * configure tx queue
  **/
-static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
+static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
                                    u16 vsi_queue_id,
                                    struct i40e_virtchnl_txq_info *info)
 {
        struct i40e_pf *pf = vf->pf;
        struct i40e_hw *hw = &pf->hw;
        struct i40e_hmc_obj_txq tx_ctx;
+       struct i40e_vsi *vsi;
        u16 pf_queue_id;
        u32 qtx_ctl;
        int ret = 0;
 
-       pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+       pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
+       vsi = i40e_find_vsi_from_id(pf, vsi_id);
 
        /* clear the context structure first */
        memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
@@ -246,7 +253,7 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
        /* only set the required fields */
        tx_ctx.base = info->dma_ring_addr / 128;
        tx_ctx.qlen = info->ring_len;
-       tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
+       tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
        tx_ctx.rdylist_act = 0;
        tx_ctx.head_wb_ena = info->headwb_enabled;
        tx_ctx.head_wb_addr = info->dma_headwb_addr;
@@ -288,13 +295,13 @@ error_context:
 /**
  * i40e_config_vsi_rx_queue
  * @vf: pointer to the VF info
- * @vsi_idx: index of VSI in PF struct
+ * @vsi_id: id of VSI  as provided by the FW
  * @vsi_queue_id: vsi relative queue index
  * @info: config. info
  *
  * configure rx queue
  **/
-static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
+static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
                                    u16 vsi_queue_id,
                                    struct i40e_virtchnl_rxq_info *info)
 {
@@ -304,7 +311,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
        u16 pf_queue_id;
        int ret = 0;
 
-       pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+       pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
 
        /* clear the context structure first */
        memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
@@ -401,7 +408,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
        }
        if (type == I40E_VSI_SRIOV) {
                u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-               vf->lan_vsi_index = vsi->idx;
+               vf->lan_vsi_idx = vsi->idx;
                vf->lan_vsi_id = vsi->id;
                /* If the port VLAN has been configured and then the
                 * VF driver was removed then the VSI port VLAN
@@ -466,8 +473,8 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
        wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
 
        /* map PF queues to VF queues */
-       for (j = 0; j < pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs; j++) {
-               u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
+       for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) {
+               u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j);
                reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
                wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
                total_queue_pairs++;
@@ -475,13 +482,13 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
 
        /* map PF queues to VSI */
        for (j = 0; j < 7; j++) {
-               if (j * 2 >= pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs) {
+               if (j * 2 >= pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs) {
                        reg = 0x07FF07FF;       /* unused */
                } else {
-                       u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
+                       u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
                                                          j * 2);
                        reg = qid;
-                       qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
+                       qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
                                                      (j * 2) + 1);
                        reg |= qid << 16;
                }
@@ -525,9 +532,9 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
        int i, msix_vf;
 
        /* free vsi & disconnect it from the parent uplink */
-       if (vf->lan_vsi_index) {
-               i40e_vsi_release(pf->vsi[vf->lan_vsi_index]);
-               vf->lan_vsi_index = 0;
+       if (vf->lan_vsi_idx) {
+               i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
+               vf->lan_vsi_idx = 0;
                vf->lan_vsi_id = 0;
        }
        msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
@@ -582,7 +589,7 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf)
        ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
        if (ret)
                goto error_alloc;
-       total_queue_pairs += pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs;
+       total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
        set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
 
        /* store the total qps number for the runtime
@@ -692,10 +699,10 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
        wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
 
        /* On initial reset, we won't have any queues */
-       if (vf->lan_vsi_index == 0)
+       if (vf->lan_vsi_idx == 0)
                goto complete_reset;
 
-       i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_index], false);
+       i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false);
 complete_reset:
        /* reallocate VF resources to reset the VSI state */
        i40e_free_vf_res(vf);
@@ -732,6 +739,8 @@ void i40e_free_vfs(struct i40e_pf *pf)
         */
        if (!pci_vfs_assigned(pf->pdev))
                pci_disable_sriov(pf->pdev);
+       else
+               dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
 
        msleep(20); /* let any messages in transit get finished up */
 
@@ -761,9 +770,6 @@ void i40e_free_vfs(struct i40e_pf *pf)
                        bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
                        wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
                }
-       } else {
-               dev_warn(&pf->pdev->dev,
-                        "unable to disable SR-IOV because VFs are assigned.\n");
        }
        clear_bit(__I40E_VF_DISABLE, &pf->state);
 }
@@ -1017,18 +1023,18 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
        }
 
        vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
-       vsi = pf->vsi[vf->lan_vsi_index];
+       vsi = pf->vsi[vf->lan_vsi_idx];
        if (!vsi->info.pvid)
                vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
 
        vfres->num_vsis = num_vsis;
        vfres->num_queue_pairs = vf->num_queue_pairs;
        vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
-       if (vf->lan_vsi_index) {
-               vfres->vsi_res[i].vsi_id = vf->lan_vsi_index;
+       if (vf->lan_vsi_idx) {
+               vfres->vsi_res[i].vsi_id = vf->lan_vsi_id;
                vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
                vfres->vsi_res[i].num_queue_pairs =
-                   pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs;
+                   pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
                memcpy(vfres->vsi_res[i].default_mac_addr,
                       vf->default_lan_addr.addr, ETH_ALEN);
                i++;
@@ -1080,14 +1086,14 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
        bool allmulti = false;
        i40e_status aq_ret;
 
+       vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
            !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
            !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
-           (pf->vsi[info->vsi_id]->type != I40E_VSI_FCOE)) {
+           (vsi->type != I40E_VSI_FCOE)) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
        }
-       vsi = pf->vsi[info->vsi_id];
        if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
                allmulti = true;
        aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
@@ -1149,7 +1155,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                }
        }
        /* set vsi num_queue_pairs in use to num configured by VF */
-       pf->vsi[vf->lan_vsi_index]->num_queue_pairs = qci->num_queue_pairs;
+       pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs;
 
 error_param:
        /* send the response to the VF */
@@ -1250,7 +1256,8 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
        }
-       if (i40e_vsi_control_rings(pf->vsi[vsi_id], true))
+
+       if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], true))
                aq_ret = I40E_ERR_TIMEOUT;
 error_param:
        /* send the response to the VF */
@@ -1272,7 +1279,6 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
        struct i40e_virtchnl_queue_select *vqs =
            (struct i40e_virtchnl_queue_select *)msg;
        struct i40e_pf *pf = vf->pf;
-       u16 vsi_id = vqs->vsi_id;
        i40e_status aq_ret = 0;
 
        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
@@ -1289,7 +1295,8 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
        }
-       if (i40e_vsi_control_rings(pf->vsi[vsi_id], false))
+
+       if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false))
                aq_ret = I40E_ERR_TIMEOUT;
 
 error_param:
@@ -1327,7 +1334,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                goto error_param;
        }
 
-       vsi = pf->vsi[vqs->vsi_id];
+       vsi = pf->vsi[vf->lan_vsi_idx];
        if (!vsi) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
@@ -1405,7 +1412,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                if (ret)
                        goto error_param;
        }
-       vsi = pf->vsi[vsi_id];
+       vsi = pf->vsi[vf->lan_vsi_idx];
 
        /* add new addresses to the list */
        for (i = 0; i < al->num_elements; i++) {
@@ -1473,7 +1480,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                        goto error_param;
                }
        }
-       vsi = pf->vsi[vsi_id];
+       vsi = pf->vsi[vf->lan_vsi_idx];
 
        /* delete addresses from the list */
        for (i = 0; i < al->num_elements; i++)
@@ -1523,7 +1530,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                        goto error_param;
                }
        }
-       vsi = pf->vsi[vsi_id];
+       vsi = pf->vsi[vf->lan_vsi_idx];
        if (vsi->info.pvid) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
@@ -1576,7 +1583,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                }
        }
 
-       vsi = pf->vsi[vsi_id];
+       vsi = pf->vsi[vf->lan_vsi_idx];
        if (vsi->info.pvid) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
@@ -1965,7 +1972,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
        }
 
        vf = &(pf->vf[vf_id]);
-       vsi = pf->vsi[vf->lan_vsi_index];
+       vsi = pf->vsi[vf->lan_vsi_idx];
        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
                dev_err(&pf->pdev->dev,
                        "Uninitialized VF %d\n", vf_id);
@@ -2039,7 +2046,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
        }
 
        vf = &(pf->vf[vf_id]);
-       vsi = pf->vsi[vf->lan_vsi_index];
+       vsi = pf->vsi[vf->lan_vsi_idx];
        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
                dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
                ret = -EINVAL;
@@ -2152,7 +2159,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
        }
 
        vf = &(pf->vf[vf_id]);
-       vsi = pf->vsi[vf->lan_vsi_index];
+       vsi = pf->vsi[vf->lan_vsi_idx];
        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
                dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id);
                ret = -EINVAL;
@@ -2226,7 +2233,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
 
        vf = &(pf->vf[vf_id]);
        /* first vsi is always the LAN vsi */
-       vsi = pf->vsi[vf->lan_vsi_index];
+       vsi = pf->vsi[vf->lan_vsi_idx];
        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
                dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
                ret = -EINVAL;
@@ -2350,7 +2357,7 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
 
        vf->spoofchk = enable;
        memset(&ctxt, 0, sizeof(ctxt));
-       ctxt.seid = pf->vsi[vf->lan_vsi_index]->seid;
+       ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
        ctxt.pf_num = pf->hw.pf_id;
        ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
        if (enable)
index 9c3a410408353e0bf536ba2e653e502f54cfb2d1..09043c1aae5435109fcd2bab8b4059c52c105961 100644 (file)
@@ -88,7 +88,7 @@ struct i40e_vf {
         * When assigned, these will be non-zero, because VSI 0 is always
         * the main LAN VSI for the PF.
         */
-       u8 lan_vsi_index;       /* index into PF struct */
+       u8 lan_vsi_idx;         /* index into PF struct */
        u8 lan_vsi_id;          /* ID as used by firmware */
 
        u8 num_queue_pairs;     /* num of qps assigned to VF vsis */
index f41da5d8047bd84da79a295ae1543f0703fc658a..b077e02a0cc7ac8f67ad90560cf990f8f7a66277 100644 (file)
@@ -915,9 +915,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
         * so the total length of IPv4 header is IHL*4 bytes
         * The UDP_0 bit *may* bet set if the *inner* header is UDP
         */
-       if (ipv4_tunnel &&
-           (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
-           !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
+       if (ipv4_tunnel) {
                skb->transport_header = skb->mac_header +
                                        sizeof(struct ethhdr) +
                                        (ip_hdr(skb)->ihl * 4);
@@ -927,15 +925,19 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
                                          skb->protocol == htons(ETH_P_8021AD))
                                          ? VLAN_HLEN : 0;
 
-               rx_udp_csum = udp_csum(skb);
-               iph = ip_hdr(skb);
-               csum = csum_tcpudp_magic(
-                               iph->saddr, iph->daddr,
-                               (skb->len - skb_transport_offset(skb)),
-                               IPPROTO_UDP, rx_udp_csum);
+               if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
+                   (udp_hdr(skb)->check != 0)) {
+                       rx_udp_csum = udp_csum(skb);
+                       iph = ip_hdr(skb);
+                       csum = csum_tcpudp_magic(iph->saddr, iph->daddr,
+                                                (skb->len -
+                                                 skb_transport_offset(skb)),
+                                                IPPROTO_UDP, rx_udp_csum);
 
-               if (udp_hdr(skb)->check != csum)
-                       goto checksum_fail;
+                       if (udp_hdr(skb)->check != csum)
+                               goto checksum_fail;
+
+               } /* else its GRE and so no outer UDP header */
        }
 
        skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1032,14 +1034,17 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
                 * any other fields out of the rx_desc until we know the
                 * DD bit is set.
                 */
-               rmb();
+               dma_rmb();
                rx_bi = &rx_ring->rx_bi[i];
                skb = rx_bi->skb;
                if (likely(!skb)) {
                        skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
                                                        rx_ring->rx_hdr_len);
-                       if (!skb)
+                       if (!skb) {
                                rx_ring->rx_stats.alloc_buff_failed++;
+                               break;
+                       }
+
                        /* initialize queue mapping */
                        skb_record_rx_queue(skb, rx_ring->queue_index);
                        /* we are reusing so sync this buffer for CPU use */
@@ -1208,7 +1213,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
                 * any other fields out of the rx_desc until we know the
                 * DD bit is set.
                 */
-               rmb();
+               dma_rmb();
 
                rx_bi = &rx_ring->rx_bi[i];
                skb = rx_bi->skb;
@@ -1365,6 +1370,19 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
        __be16 protocol = skb->protocol;
        u32  tx_flags = 0;
 
+       if (protocol == htons(ETH_P_8021Q) &&
+           !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
+               /* When HW VLAN acceleration is turned off by the user the
+                * stack sets the protocol to 8021q so that the driver
+                * can take any steps required to support the SW only
+                * VLAN handling.  In our case the driver doesn't need
+                * to take any further steps so just set the protocol
+                * to the encapsulated ethertype.
+                */
+               skb->protocol = vlan_get_protocol(skb);
+               goto out;
+       }
+
        /* if we have a HW VLAN tag being added, default to the HW one */
        if (skb_vlan_tag_present(skb)) {
                tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
@@ -1381,6 +1399,7 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
                tx_flags |= I40E_TX_FLAGS_SW_VLAN;
        }
 
+out:
        *flags = tx_flags;
        return 0;
 }
index b08efafee1aee4c763fa111289ce4fe1736b5039..6d5f3b21c68a9939fa33f1c8ea7bd59948c22741 100644 (file)
@@ -664,13 +664,21 @@ i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan)
 static struct
 i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
 {
-       struct i40evf_vlan_filter *f;
+       struct i40evf_vlan_filter *f = NULL;
+       int count = 50;
+
+       while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+                               &adapter->crit_section)) {
+               udelay(1);
+               if (--count == 0)
+                       goto out;
+       }
 
        f = i40evf_find_vlan(adapter, vlan);
        if (!f) {
                f = kzalloc(sizeof(*f), GFP_ATOMIC);
                if (!f)
-                       return NULL;
+                       goto clearout;
 
                f->vlan = vlan;
 
@@ -680,6 +688,9 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
                adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
        }
 
+clearout:
+       clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+out:
        return f;
 }
 
@@ -691,12 +702,21 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
 static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
 {
        struct i40evf_vlan_filter *f;
+       int count = 50;
+
+       while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+                               &adapter->crit_section)) {
+               udelay(1);
+               if (--count == 0)
+                       return;
+       }
 
        f = i40evf_find_vlan(adapter, vlan);
        if (f) {
                f->remove = true;
                adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
        }
+       clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
 }
 
 /**
@@ -1415,41 +1435,22 @@ restart_watchdog:
 }
 
 /**
- * next_queue - increment to next available tx queue
- * @adapter: board private structure
- * @j: queue counter
- *
- * Helper function for RSS programming to increment through available
- * queus. Returns the next queue value.
- **/
-static int next_queue(struct i40evf_adapter *adapter, int j)
-{
-       j += 1;
-
-       return j >= adapter->num_active_queues ? 0 : j;
-}
-
-/**
- * i40evf_configure_rss - Prepare for RSS if used
+ * i40evf_configure_rss - Prepare for RSS
  * @adapter: board private structure
  **/
 static void i40evf_configure_rss(struct i40evf_adapter *adapter)
 {
        u32 rss_key[I40E_VFQF_HKEY_MAX_INDEX + 1];
        struct i40e_hw *hw = &adapter->hw;
+       u32 cqueue = 0;
        u32 lut = 0;
        int i, j;
        u64 hena;
 
-       /* No RSS for single queue. */
-       if (adapter->num_active_queues == 1) {
-               wr32(hw, I40E_VFQF_HENA(0), 0);
-               wr32(hw, I40E_VFQF_HENA(1), 0);
-               return;
-       }
-
        /* Hash type is configured by the PF - we just supply the key */
        netdev_rss_key_fill(rss_key, sizeof(rss_key));
+
+       /* Fill out hash function seed */
        for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
                wr32(hw, I40E_VFQF_HKEY(i), rss_key[i]);
 
@@ -1459,16 +1460,14 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
        wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
 
        /* Populate the LUT with max no. of queues in round robin fashion */
-       j = adapter->num_active_queues;
        for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
-               j = next_queue(adapter, j);
-               lut = j;
-               j = next_queue(adapter, j);
-               lut |= j << 8;
-               j = next_queue(adapter, j);
-               lut |= j << 16;
-               j = next_queue(adapter, j);
-               lut |= j << 24;
+               lut = 0;
+               for (j = 0; j < 4; j++) {
+                       if (cqueue == adapter->vsi_res->num_queue_pairs)
+                               cqueue = 0;
+                       lut |= ((cqueue) << (8 * j));
+                       cqueue++;
+               }
                wr32(hw, I40E_VFQF_HLUT(i), lut);
        }
        i40e_flush(hw);
index 7068e9c3691dd84af5faf6712eafae23b81b901b..ae5b8b22e7e00ca4dbf61b0784a484db27554049 100644 (file)
@@ -642,7 +642,6 @@ struct ixgbe_adapter {
 #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP         (u32)(1 << 8)
 #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP         (u32)(1 << 9)
 #define IXGBE_FLAG2_PTP_PPS_ENABLED            (u32)(1 << 10)
-#define IXGBE_FLAG2_BRIDGE_MODE_VEB            (u32)(1 << 11)
 
        /* Tx fast path data */
        int num_tx_queues;
@@ -722,6 +721,8 @@ struct ixgbe_adapter {
        u8 __iomem *io_addr; /* Mainly for iounmap use */
        u32 wol;
 
+       u16 bridge_mode;
+
        u16 eeprom_verh;
        u16 eeprom_verl;
        u16 eeprom_cap;
index 2ad91cb04dab9fc7f0cfbd9c328e4b269d6ac0f4..631c603fc96649c95995b286577ca7b489d3f58d 100644 (file)
@@ -71,6 +71,7 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
        struct ixgbe_fcoe *fcoe;
        struct ixgbe_adapter *adapter;
        struct ixgbe_fcoe_ddp *ddp;
+       struct ixgbe_hw *hw;
        u32 fcbuff;
 
        if (!netdev)
@@ -85,25 +86,51 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
        if (!ddp->udl)
                return 0;
 
+       hw = &adapter->hw;
        len = ddp->len;
-       /* if there an error, force to invalidate ddp context */
-       if (ddp->err) {
+       /* if no error then skip ddp context invalidation */
+       if (!ddp->err)
+               goto skip_ddpinv;
+
+       if (hw->mac.type == ixgbe_mac_X550) {
+               /* X550 does not require DDP FCoE lock */
+
+               IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid),
+                               (xid | IXGBE_FCFLTRW_WE));
+
+               /* program FCBUFF */
+               IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), 0);
+
+               /* program FCDMARW */
+               IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid),
+                               (xid | IXGBE_FCDMARW_WE));
+
+               /* read FCBUFF to check context invalidated */
+               IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid),
+                               (xid | IXGBE_FCDMARW_RE));
+               fcbuff = IXGBE_READ_REG(hw, IXGBE_FCDDC(2, xid));
+       } else {
+               /* other hardware requires DDP FCoE lock */
                spin_lock_bh(&fcoe->lock);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLT, 0);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLTRW,
+               IXGBE_WRITE_REG(hw, IXGBE_FCFLT, 0);
+               IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW,
                                (xid | IXGBE_FCFLTRW_WE));
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
+               IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, 0);
+               IXGBE_WRITE_REG(hw, IXGBE_FCDMARW,
                                (xid | IXGBE_FCDMARW_WE));
 
                /* guaranteed to be invalidated after 100us */
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
+               IXGBE_WRITE_REG(hw, IXGBE_FCDMARW,
                                (xid | IXGBE_FCDMARW_RE));
-               fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF);
+               fcbuff = IXGBE_READ_REG(hw, IXGBE_FCBUFF);
                spin_unlock_bh(&fcoe->lock);
-               if (fcbuff & IXGBE_FCBUFF_VALID)
-                       udelay(100);
-       }
+               }
+
+       if (fcbuff & IXGBE_FCBUFF_VALID)
+               usleep_range(100, 150);
+
+skip_ddpinv:
        if (ddp->sgl)
                dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc,
                             DMA_FROM_DEVICE);
@@ -272,7 +299,6 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
 
        /* program DMA context */
        hw = &adapter->hw;
-       spin_lock_bh(&fcoe->lock);
 
        /* turn on last frame indication for target mode as FCP_RSPtarget is
         * supposed to send FCP_RSP when it is done. */
@@ -283,16 +309,33 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
                IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl);
        }
 
-       IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32));
-       IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32);
-       IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff);
-       IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw);
-       /* program filter context */
-       IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0);
-       IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID);
-       IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw);
+       if (hw->mac.type == ixgbe_mac_X550) {
+               /* X550 does not require DDP lock */
+
+               IXGBE_WRITE_REG(hw, IXGBE_FCDDC(0, xid),
+                               ddp->udp & DMA_BIT_MASK(32));
+               IXGBE_WRITE_REG(hw, IXGBE_FCDDC(1, xid), (u64)ddp->udp >> 32);
+               IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), fcbuff);
+               IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), fcdmarw);
+               /* program filter context */
+               IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), IXGBE_FCFLT_VALID);
+               IXGBE_WRITE_REG(hw, IXGBE_FCDFC(1, xid), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), fcfltrw);
+       } else {
+               /* DDP lock for indirect DDP context access */
+               spin_lock_bh(&fcoe->lock);
+
+               IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32));
+               IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32);
+               IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff);
+               IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw);
+               /* program filter context */
+               IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0);
+               IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID);
+               IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw);
 
-       spin_unlock_bh(&fcoe->lock);
+               spin_unlock_bh(&fcoe->lock);
+       }
 
        return 1;
 
@@ -371,6 +414,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
        struct fcoe_crc_eof *crc;
        __le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR);
        __le32 ddp_err;
+       int ddp_max;
        u32 fctl;
        u16 xid;
 
@@ -392,7 +436,11 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
        else
                xid =  be16_to_cpu(fh->fh_rx_id);
 
-       if (xid >= IXGBE_FCOE_DDP_MAX)
+       ddp_max = IXGBE_FCOE_DDP_MAX;
+       /* X550 has different DDP Max limit */
+       if (adapter->hw.mac.type == ixgbe_mac_X550)
+               ddp_max = IXGBE_FCOE_DDP_MAX_X550;
+       if (xid >= ddp_max)
                return -EINVAL;
 
        fcoe = &adapter->fcoe;
@@ -612,7 +660,8 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
        struct ixgbe_hw *hw = &adapter->hw;
-       int i, fcoe_q, fcoe_i;
+       int i, fcoe_q, fcoe_i, fcoe_q_h = 0;
+       int fcreta_size;
        u32 etqf;
 
        /* Minimal functionality for FCoE requires at least CRC offloads */
@@ -633,10 +682,23 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
                return;
 
        /* Use one or more Rx queues for FCoE by redirection table */
-       for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
+       fcreta_size = IXGBE_FCRETA_SIZE;
+       if (adapter->hw.mac.type == ixgbe_mac_X550)
+               fcreta_size = IXGBE_FCRETA_SIZE_X550;
+
+       for (i = 0; i < fcreta_size; i++) {
+               if (adapter->hw.mac.type == ixgbe_mac_X550) {
+                       int fcoe_i_h = fcoe->offset + ((i + fcreta_size) %
+                                                       fcoe->indices);
+                       fcoe_q_h = adapter->rx_ring[fcoe_i_h]->reg_idx;
+                       fcoe_q_h = (fcoe_q_h << IXGBE_FCRETA_ENTRY_HIGH_SHIFT) &
+                                  IXGBE_FCRETA_ENTRY_HIGH_MASK;
+               }
+
                fcoe_i = fcoe->offset + (i % fcoe->indices);
                fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
                fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
+               fcoe_q |= fcoe_q_h;
                IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
        }
        IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
@@ -672,13 +734,18 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
 void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_fcoe *fcoe = &adapter->fcoe;
-       int cpu, i;
+       int cpu, i, ddp_max;
 
        /* do nothing if no DDP pools were allocated */
        if (!fcoe->ddp_pool)
                return;
 
-       for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
+       ddp_max = IXGBE_FCOE_DDP_MAX;
+       /* X550 has different DDP Max limit */
+       if (adapter->hw.mac.type == ixgbe_mac_X550)
+               ddp_max = IXGBE_FCOE_DDP_MAX_X550;
+
+       for (i = 0; i < ddp_max; i++)
                ixgbe_fcoe_ddp_put(adapter->netdev, i);
 
        for_each_possible_cpu(cpu)
@@ -758,6 +825,9 @@ static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter)
        }
 
        adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
+       /* X550 has different DDP Max limit */
+       if (adapter->hw.mac.type == ixgbe_mac_X550)
+               adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX_X550 - 1;
 
        return 0;
 }
index 0772b7730fce92de4e2ff54d44f6528397c2b3a9..38385876effb0c9a3d8659afebc5f469575f55f4 100644 (file)
@@ -46,6 +46,7 @@
 #define IXGBE_FCBUFF_MAX       65536   /* 64KB max */
 #define IXGBE_FCBUFF_MIN       4096    /* 4KB min */
 #define IXGBE_FCOE_DDP_MAX     512     /* 9 bits xid */
+#define IXGBE_FCOE_DDP_MAX_X550        2048    /* 11 bits xid */
 
 /* Default traffic class to use for FCoE */
 #define IXGBE_FCOE_DEFTC       3
@@ -77,7 +78,7 @@ struct ixgbe_fcoe {
        struct ixgbe_fcoe_ddp_pool __percpu *ddp_pool;
        atomic_t refcnt;
        spinlock_t lock;
-       struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
+       struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX_X550];
        void *extra_ddp_buffer;
        dma_addr_t extra_ddp_buffer_dma;
        unsigned long mode;
index 395dc6bb5d82139b6e4a502517f171256d06eac9..a7acb2dee0099c69a9beca085990c478e21cd6a4 100644 (file)
@@ -3553,7 +3553,7 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
        IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
        IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
        IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
-       if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB)
+       if (adapter->bridge_mode == BRIDGE_MODE_VEB)
                IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
 
        /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
@@ -7870,6 +7870,80 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
        return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
 }
 
+/**
+ * ixgbe_configure_bridge_mode - set various bridge modes
+ * @adapter - the private structure
+ * @mode - requested bridge mode
+ *
+ * Configure some settings require for various bridge modes.
+ **/
+static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter,
+                                      __u16 mode)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       unsigned int p, num_pools;
+       u32 vmdctl;
+
+       switch (mode) {
+       case BRIDGE_MODE_VEPA:
+               /* disable Tx loopback, rely on switch hairpin mode */
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0);
+
+               /* must enable Rx switching replication to allow multicast
+                * packet reception on all VFs, and to enable source address
+                * pruning.
+                */
+               vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
+               vmdctl |= IXGBE_VT_CTL_REPLEN;
+               IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
+
+               /* enable Rx source address pruning. Note, this requires
+                * replication to be enabled or else it does nothing.
+                */
+               num_pools = adapter->num_vfs + adapter->num_rx_pools;
+               for (p = 0; p < num_pools; p++) {
+                       if (hw->mac.ops.set_source_address_pruning)
+                               hw->mac.ops.set_source_address_pruning(hw,
+                                                                      true,
+                                                                      p);
+               }
+               break;
+       case BRIDGE_MODE_VEB:
+               /* enable Tx loopback for internal VF/PF communication */
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC,
+                               IXGBE_PFDTXGSWC_VT_LBEN);
+
+               /* disable Rx switching replication unless we have SR-IOV
+                * virtual functions
+                */
+               vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
+               if (!adapter->num_vfs)
+                       vmdctl &= ~IXGBE_VT_CTL_REPLEN;
+               IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
+
+               /* disable Rx source address pruning, since we don't expect to
+                * be receiving external loopback of our transmitted frames.
+                */
+               num_pools = adapter->num_vfs + adapter->num_rx_pools;
+               for (p = 0; p < num_pools; p++) {
+                       if (hw->mac.ops.set_source_address_pruning)
+                               hw->mac.ops.set_source_address_pruning(hw,
+                                                                      false,
+                                                                      p);
+               }
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       adapter->bridge_mode = mode;
+
+       e_info(drv, "enabling bridge mode: %s\n",
+              mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+
+       return 0;
+}
+
 static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
                                    struct nlmsghdr *nlh, u16 flags)
 {
@@ -7885,8 +7959,8 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
                return -EINVAL;
 
        nla_for_each_nested(attr, br_spec, rem) {
+               u32 status;
                __u16 mode;
-               u32 reg = 0;
 
                if (nla_type(attr) != IFLA_BRIDGE_MODE)
                        continue;
@@ -7895,19 +7969,11 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
                        return -EINVAL;
 
                mode = nla_get_u16(attr);
-               if (mode == BRIDGE_MODE_VEPA) {
-                       reg = 0;
-                       adapter->flags2 &= ~IXGBE_FLAG2_BRIDGE_MODE_VEB;
-               } else if (mode == BRIDGE_MODE_VEB) {
-                       reg = IXGBE_PFDTXGSWC_VT_LBEN;
-                       adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB;
-               } else
-                       return -EINVAL;
-
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, reg);
+               status = ixgbe_configure_bridge_mode(adapter, mode);
+               if (status)
+                       return status;
 
-               e_info(drv, "enabling bridge mode: %s\n",
-                       mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+               break;
        }
 
        return 0;
@@ -7918,17 +7984,12 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
                                    u32 filter_mask)
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
-       u16 mode;
 
        if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
                return 0;
 
-       if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB)
-               mode = BRIDGE_MODE_VEB;
-       else
-               mode = BRIDGE_MODE_VEPA;
-
-       return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0);
+       return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
+                                      adapter->bridge_mode, 0, 0);
 }
 
 static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
@@ -8394,7 +8455,6 @@ skip_sriov:
                           NETIF_F_IPV6_CSUM |
                           NETIF_F_HW_VLAN_CTAG_TX |
                           NETIF_F_HW_VLAN_CTAG_RX |
-                          NETIF_F_HW_VLAN_CTAG_FILTER |
                           NETIF_F_TSO |
                           NETIF_F_TSO6 |
                           NETIF_F_RXHASH |
@@ -8416,6 +8476,7 @@ skip_sriov:
        }
 
        netdev->hw_features |= NETIF_F_RXALL;
+       netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
 
        netdev->vlan_features |= NETIF_F_TSO;
        netdev->vlan_features |= NETIF_F_TSO6;
@@ -8977,8 +9038,6 @@ static void __exit ixgbe_exit_module(void)
        pci_unregister_driver(&ixgbe_driver);
 
        ixgbe_dbg_exit();
-
-       rcu_barrier(); /* Wait for completion of call_rcu()'s */
 }
 
 #ifdef CONFIG_IXGBE_DCA
index 09a291bb7c343c6b202e8bff6f7594329ec2a564..2d98ecdbd3d67cede0261ccf1309841a2b0cc293 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/ip.h>
 #include <linux/tcp.h>
 #include <linux/ipv6.h>
+#include <linux/if_bridge.h>
 #ifdef NETIF_F_HW_VLAN_CTAG_TX
 #include <linux/if_vlan.h>
 #endif
@@ -79,7 +80,7 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
 
        /* Initialize default switching mode VEB */
        IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
-       adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB;
+       adapter->bridge_mode = BRIDGE_MODE_VEB;
 
        /* If call to enable VFs succeeded then allocate memory
         * for per VF control structures.
index c3ddc944f1e95722b9db06a14d0dcb74df704484..dd6ba5916dfe002b528684db30eb23c847829c2c 100644 (file)
@@ -285,6 +285,8 @@ struct ixgbe_thermal_sensor_data {
 #define IXGBE_VLVF(_i)  (0x0F100 + ((_i) * 4))  /* 64 of these (0-63) */
 #define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4))  /* 128 of these (0-127) */
 #define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4))  /* 64 of these (0-63) */
+#define IXGBE_PFFLPL   0x050B0
+#define IXGBE_PFFLPH   0x050B4
 #define IXGBE_VT_CTL         0x051B0
 #define IXGBE_PFMAILBOX(_i)  (0x04B00 + (4 * (_i))) /* 64 total */
 #define IXGBE_PFMBMEM(_i)    (0x13000 + (64 * (_i))) /* 64 Mailboxes, 16 DW each */
@@ -608,6 +610,8 @@ struct ixgbe_thermal_sensor_data {
 #define IXGBE_RTTBCNRM    0x04980
 #define IXGBE_RTTQCNRM    0x04980
 
+/* FCoE Direct DMA Context */
+#define IXGBE_FCDDC(_i, _j)    (0x20000 + ((_i) * 0x4) + ((_j) * 0x10))
 /* FCoE DMA Context Registers */
 #define IXGBE_FCPTRL    0x02410 /* FC User Desc. PTR Low */
 #define IXGBE_FCPTRH    0x02414 /* FC USer Desc. PTR High */
@@ -634,6 +638,9 @@ struct ixgbe_thermal_sensor_data {
 #define IXGBE_TSOFF     0x04A98 /* Tx FC SOF */
 #define IXGBE_REOFF     0x05158 /* Rx FC EOF */
 #define IXGBE_RSOFF     0x051F8 /* Rx FC SOF */
+/* FCoE Direct Filter Context */
+#define IXGBE_FCDFC(_i, _j)    (0x28000 + ((_i) * 0x4) + ((_j) * 0x10))
+#define IXGBE_FCDFCD(_i)       (0x30000 + ((_i) * 0x4))
 /* FCoE Filter Context Registers */
 #define IXGBE_FCFLT     0x05108 /* FC FLT Context */
 #define IXGBE_FCFLTRW   0x05110 /* FC Filter RW Control */
@@ -664,6 +671,10 @@ struct ixgbe_thermal_sensor_data {
 #define IXGBE_FCRECTL_ENA       0x1        /* FCoE Redir Table Enable */
 #define IXGBE_FCRETA_SIZE       8          /* Max entries in FCRETA */
 #define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */
+#define IXGBE_FCRETA_SIZE_X550 32 /* Max entries in FCRETA */
+/* Higher 7 bits for the queue index */
+#define IXGBE_FCRETA_ENTRY_HIGH_MASK   0x007F0000
+#define IXGBE_FCRETA_ENTRY_HIGH_SHIFT  16
 
 /* Stats registers */
 #define IXGBE_CRCERRS   0x04000
@@ -3069,6 +3080,8 @@ struct ixgbe_mac_operations {
        s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
        void (*disable_rx)(struct ixgbe_hw *hw);
        void (*enable_rx)(struct ixgbe_hw *hw);
+       void (*set_source_address_pruning)(struct ixgbe_hw *, bool,
+                                          unsigned int);
        void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int);
 
        /* DMA Coalescing */
index 58a3155af7cd0215a8bf552973218859b59f1ed8..cf5cf819a6b890bdce1da9d8eecc4acbe0d479bb 100644 (file)
@@ -1363,6 +1363,33 @@ static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
        IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
 }
 
+/** ixgbe_set_source_address_pruning_X550 - Enable/Disbale src address pruning
+ *  @hw: pointer to hardware structure
+ *  @enable: enable or disable source address pruning
+ *  @pool: Rx pool to set source address pruning for
+ **/
+static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw,
+                                                 bool enable,
+                                                 unsigned int pool)
+{
+       u64 pfflp;
+
+       /* max rx pool is 63 */
+       if (pool > 63)
+               return;
+
+       pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL);
+       pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32;
+
+       if (enable)
+               pfflp |= (1ULL << pool);
+       else
+               pfflp &= ~(1ULL << pool);
+
+       IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp);
+       IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32));
+}
+
 #define X550_COMMON_MAC \
        .init_hw                        = &ixgbe_init_hw_generic, \
        .start_hw                       = &ixgbe_start_hw_X540, \
@@ -1397,6 +1424,8 @@ static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
        .init_uta_tables                = &ixgbe_init_uta_tables_generic, \
        .set_mac_anti_spoofing          = &ixgbe_set_mac_anti_spoofing, \
        .set_vlan_anti_spoofing         = &ixgbe_set_vlan_anti_spoofing, \
+       .set_source_address_pruning     = \
+                               &ixgbe_set_source_address_pruning_X550, \
        .set_ethertype_anti_spoofing    = \
                                &ixgbe_set_ethertype_anti_spoofing_X550, \
        .acquire_swfw_sync              = &ixgbe_acquire_swfw_sync_X540, \
index 96208f17bb53be6240ae9e8d0cd30cc09041163e..ce5f7f9cff060868db1ad0ba3bde378d65222793 100644 (file)
 #define MVNETA_TXQ_CMD                           0x2448
 #define      MVNETA_TXQ_DISABLE_SHIFT            8
 #define      MVNETA_TXQ_ENABLE_MASK              0x000000ff
+#define MVNETA_GMAC_CLOCK_DIVIDER                0x24f4
+#define      MVNETA_GMAC_1MS_CLOCK_ENABLE        BIT(31)
 #define MVNETA_ACC_MODE                          0x2500
 #define MVNETA_CPU_MAP(cpu)                      (0x2540 + ((cpu) << 2))
 #define      MVNETA_CPU_RXQ_ACCESS_ALL_MASK      0x000000ff
 #define      MVNETA_TX_INTR_MASK_ALL             (0xff << 0)
 #define      MVNETA_RX_INTR_MASK(nr_rxqs)        (((1 << nr_rxqs) - 1) << 8)
 #define      MVNETA_RX_INTR_MASK_ALL             (0xff << 8)
+#define      MVNETA_MISCINTR_INTR_MASK           BIT(31)
 
 #define MVNETA_INTR_OLD_CAUSE                    0x25a8
 #define MVNETA_INTR_OLD_MASK                     0x25ac
 #define      MVNETA_GMAC_MAX_RX_SIZE_MASK        0x7ffc
 #define      MVNETA_GMAC0_PORT_ENABLE            BIT(0)
 #define MVNETA_GMAC_CTRL_2                       0x2c08
+#define      MVNETA_GMAC2_INBAND_AN_ENABLE       BIT(0)
 #define      MVNETA_GMAC2_PCS_ENABLE             BIT(3)
 #define      MVNETA_GMAC2_PORT_RGMII             BIT(4)
 #define      MVNETA_GMAC2_PORT_RESET             BIT(6)
 #define MVNETA_GMAC_AUTONEG_CONFIG               0x2c0c
 #define      MVNETA_GMAC_FORCE_LINK_DOWN         BIT(0)
 #define      MVNETA_GMAC_FORCE_LINK_PASS         BIT(1)
+#define      MVNETA_GMAC_INBAND_AN_ENABLE        BIT(2)
 #define      MVNETA_GMAC_CONFIG_MII_SPEED        BIT(5)
 #define      MVNETA_GMAC_CONFIG_GMII_SPEED       BIT(6)
 #define      MVNETA_GMAC_AN_SPEED_EN             BIT(7)
+#define      MVNETA_GMAC_AN_FLOW_CTRL_EN         BIT(11)
 #define      MVNETA_GMAC_CONFIG_FULL_DUPLEX      BIT(12)
 #define      MVNETA_GMAC_AN_DUPLEX_EN            BIT(13)
 #define MVNETA_MIB_COUNTERS_BASE                 0x3080
@@ -304,6 +310,7 @@ struct mvneta_port {
        unsigned int link;
        unsigned int duplex;
        unsigned int speed;
+       int use_inband_status:1;
 };
 
 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
@@ -994,6 +1001,20 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
        val &= ~MVNETA_PHY_POLLING_ENABLE;
        mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
 
+       if (pp->use_inband_status) {
+               val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+               val &= ~(MVNETA_GMAC_FORCE_LINK_PASS |
+                        MVNETA_GMAC_FORCE_LINK_DOWN |
+                        MVNETA_GMAC_AN_FLOW_CTRL_EN);
+               val |= MVNETA_GMAC_INBAND_AN_ENABLE |
+                      MVNETA_GMAC_AN_SPEED_EN |
+                      MVNETA_GMAC_AN_DUPLEX_EN;
+               mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+               val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
+               val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
+               mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
+       }
+
        mvneta_set_ucast_table(pp, -1);
        mvneta_set_special_mcast_table(pp, -1);
        mvneta_set_other_mcast_table(pp, -1);
@@ -2043,6 +2064,28 @@ static irqreturn_t mvneta_isr(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+static int mvneta_fixed_link_update(struct mvneta_port *pp,
+                                   struct phy_device *phy)
+{
+       struct fixed_phy_status status;
+       struct fixed_phy_status changed = {};
+       u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
+
+       status.link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
+       if (gmac_stat & MVNETA_GMAC_SPEED_1000)
+               status.speed = SPEED_1000;
+       else if (gmac_stat & MVNETA_GMAC_SPEED_100)
+               status.speed = SPEED_100;
+       else
+               status.speed = SPEED_10;
+       status.duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
+       changed.link = 1;
+       changed.speed = 1;
+       changed.duplex = 1;
+       fixed_phy_update_state(phy, &status, &changed);
+       return 0;
+}
+
 /* NAPI handler
  * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
  * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
@@ -2063,8 +2106,18 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
        }
 
        /* Read cause register */
-       cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) &
-               (MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
+       cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
+       if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
+               u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
+
+               mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
+               if (pp->use_inband_status && (cause_misc &
+                               (MVNETA_CAUSE_PHY_STATUS_CHANGE |
+                                MVNETA_CAUSE_LINK_CHANGE |
+                                MVNETA_CAUSE_PSC_SYNC_CHANGE))) {
+                       mvneta_fixed_link_update(pp, pp->phy_dev);
+               }
+       }
 
        /* Release Tx descriptors */
        if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
@@ -2109,7 +2162,9 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
                napi_complete(napi);
                local_irq_save(flags);
                mvreg_write(pp, MVNETA_INTR_NEW_MASK,
-                           MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
+                           MVNETA_RX_INTR_MASK(rxq_number) |
+                           MVNETA_TX_INTR_MASK(txq_number) |
+                           MVNETA_MISCINTR_INTR_MASK);
                local_irq_restore(flags);
        }
 
@@ -2373,7 +2428,13 @@ static void mvneta_start_dev(struct mvneta_port *pp)
 
        /* Unmask interrupts */
        mvreg_write(pp, MVNETA_INTR_NEW_MASK,
-                   MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
+                   MVNETA_RX_INTR_MASK(rxq_number) |
+                   MVNETA_TX_INTR_MASK(txq_number) |
+                   MVNETA_MISCINTR_INTR_MASK);
+       mvreg_write(pp, MVNETA_INTR_MISC_MASK,
+                   MVNETA_CAUSE_PHY_STATUS_CHANGE |
+                   MVNETA_CAUSE_LINK_CHANGE |
+                   MVNETA_CAUSE_PSC_SYNC_CHANGE);
 
        phy_start(pp->phy_dev);
        netif_tx_start_all_queues(pp->dev);
@@ -2523,9 +2584,7 @@ static void mvneta_adjust_link(struct net_device *ndev)
                        val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
                        val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
                                 MVNETA_GMAC_CONFIG_GMII_SPEED |
-                                MVNETA_GMAC_CONFIG_FULL_DUPLEX |
-                                MVNETA_GMAC_AN_SPEED_EN |
-                                MVNETA_GMAC_AN_DUPLEX_EN);
+                                MVNETA_GMAC_CONFIG_FULL_DUPLEX);
 
                        if (phydev->duplex)
                                val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
@@ -2554,12 +2613,24 @@ static void mvneta_adjust_link(struct net_device *ndev)
 
        if (status_change) {
                if (phydev->link) {
-                       u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
-                       val |= (MVNETA_GMAC_FORCE_LINK_PASS |
-                               MVNETA_GMAC_FORCE_LINK_DOWN);
-                       mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+                       if (!pp->use_inband_status) {
+                               u32 val = mvreg_read(pp,
+                                                 MVNETA_GMAC_AUTONEG_CONFIG);
+                               val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
+                               val |= MVNETA_GMAC_FORCE_LINK_PASS;
+                               mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
+                                           val);
+                       }
                        mvneta_port_up(pp);
                } else {
+                       if (!pp->use_inband_status) {
+                               u32 val = mvreg_read(pp,
+                                                 MVNETA_GMAC_AUTONEG_CONFIG);
+                               val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
+                               val |= MVNETA_GMAC_FORCE_LINK_DOWN;
+                               mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
+                                           val);
+                       }
                        mvneta_port_down(pp);
                }
                phy_print_status(phydev);
@@ -2658,16 +2729,11 @@ static int mvneta_stop(struct net_device *dev)
 static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
        struct mvneta_port *pp = netdev_priv(dev);
-       int ret;
 
        if (!pp->phy_dev)
                return -ENOTSUPP;
 
-       ret = phy_mii_ioctl(pp->phy_dev, ifr, cmd);
-       if (!ret)
-               mvneta_adjust_link(dev);
-
-       return ret;
+       return phy_mii_ioctl(pp->phy_dev, ifr, cmd);
 }
 
 /* Ethtool methods */
@@ -2910,6 +2976,9 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
                return -EINVAL;
        }
 
+       if (pp->use_inband_status)
+               ctrl |= MVNETA_GMAC2_INBAND_AN_ENABLE;
+
        /* Cancel Port Reset */
        ctrl &= ~MVNETA_GMAC2_PORT_RESET;
        mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
@@ -2934,6 +3003,7 @@ static int mvneta_probe(struct platform_device *pdev)
        char hw_mac_addr[ETH_ALEN];
        const char *mac_from;
        int phy_mode;
+       int fixed_phy = 0;
        int err;
 
        /* Our multiqueue support is not complete, so for now, only
@@ -2967,6 +3037,7 @@ static int mvneta_probe(struct platform_device *pdev)
                        dev_err(&pdev->dev, "cannot register fixed PHY\n");
                        goto err_free_irq;
                }
+               fixed_phy = 1;
 
                /* In the case of a fixed PHY, the DT node associated
                 * to the PHY is the Ethernet MAC DT node.
@@ -2990,6 +3061,8 @@ static int mvneta_probe(struct platform_device *pdev)
        pp = netdev_priv(dev);
        pp->phy_node = phy_node;
        pp->phy_interface = phy_mode;
+       pp->use_inband_status = (phy_mode == PHY_INTERFACE_MODE_SGMII) &&
+                               fixed_phy;
 
        pp->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(pp->clk)) {
@@ -3067,6 +3140,12 @@ static int mvneta_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, pp->dev);
 
+       if (pp->use_inband_status) {
+               struct phy_device *phy = of_phy_find_device(dn);
+
+               mvneta_fixed_link_update(pp, phy);
+       }
+
        return 0;
 
 err_free_stats:
index 3e9c70f15b4258f56ac61fd8999d7d8edfe101c2..c82217e0d22d557d6ae3da0907e3b2cd5a08cf36 100644 (file)
@@ -1,7 +1,8 @@
 obj-$(CONFIG_MLX4_CORE)                += mlx4_core.o
 
-mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
-               mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o resource_tracker.o
+mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o fw_qos.o icm.o intf.o \
+               main.o mcg.o mr.o pd.o port.o profile.o qp.o reset.o sense.o \
+               srq.o resource_tracker.o
 
 obj-$(CONFIG_MLX4_EN)               += mlx4_en.o
 
index 20b3c7b21e632bb05dc38f6ba617c5abbac36c98..f0fbb4ade85db9b3db5ce049eff76cd978d08353 100644 (file)
@@ -48,6 +48,7 @@
 
 #include "mlx4.h"
 #include "fw.h"
+#include "fw_qos.h"
 
 #define CMD_POLL_TOKEN 0xffff
 #define INBOX_MASK     0xffffffffffffff00ULL
@@ -724,8 +725,10 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
                 * on the host, we deprecate the error message for this
                 * specific command/input_mod/opcode_mod/fw-status to be debug.
                 */
-               if (op == MLX4_CMD_SET_PORT && in_modifier == 1 &&
-                   op_modifier == 0 && context->fw_status == CMD_STAT_BAD_SIZE)
+               if (op == MLX4_CMD_SET_PORT &&
+                   (in_modifier == 1 || in_modifier == 2) &&
+                   op_modifier == MLX4_SET_PORT_IB_OPCODE &&
+                   context->fw_status == CMD_STAT_BAD_SIZE)
                        mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n",
                                 op, context->fw_status);
                else
@@ -1454,6 +1457,24 @@ static struct mlx4_cmd_info cmd_info[] = {
                .verify = NULL,
                .wrapper = mlx4_CMD_EPERM_wrapper,
        },
+       {
+               .opcode = MLX4_CMD_ALLOCATE_VPP,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_CMD_EPERM_wrapper,
+       },
+       {
+               .opcode = MLX4_CMD_SET_VPORT_QOS,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_CMD_EPERM_wrapper,
+       },
        {
                .opcode = MLX4_CMD_CONF_SPECIAL_QP,
                .has_inbox = false,
@@ -1790,7 +1811,8 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
 
        if (vp_oper->state.default_vlan == vp_admin->default_vlan &&
            vp_oper->state.default_qos == vp_admin->default_qos &&
-           vp_oper->state.link_state == vp_admin->link_state)
+           vp_oper->state.link_state == vp_admin->link_state &&
+           vp_oper->state.qos_vport == vp_admin->qos_vport)
                return 0;
 
        if (!(priv->mfunc.master.slave_state[slave].active &&
@@ -1848,6 +1870,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
        vp_oper->state.default_vlan = vp_admin->default_vlan;
        vp_oper->state.default_qos = vp_admin->default_qos;
        vp_oper->state.link_state = vp_admin->link_state;
+       vp_oper->state.qos_vport = vp_admin->qos_vport;
 
        if (vp_admin->link_state == IFLA_VF_LINK_STATE_DISABLE)
                work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE;
@@ -1856,6 +1879,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
        work->port = port;
        work->slave = slave;
        work->qos = vp_oper->state.default_qos;
+       work->qos_vport = vp_oper->state.qos_vport;
        work->vlan_id = vp_oper->state.default_vlan;
        work->vlan_ix = vp_oper->vlan_idx;
        work->priv = priv;
@@ -1865,6 +1889,63 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
        return 0;
 }
 
+static void mlx4_set_default_port_qos(struct mlx4_dev *dev, int port)
+{
+       struct mlx4_qos_manager *port_qos_ctl;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       port_qos_ctl = &priv->mfunc.master.qos_ctl[port];
+       bitmap_zero(port_qos_ctl->priority_bm, MLX4_NUM_UP);
+
+       /* Enable only default prio at PF init routine */
+       set_bit(MLX4_DEFAULT_QOS_PRIO, port_qos_ctl->priority_bm);
+}
+
+static void mlx4_allocate_port_vpps(struct mlx4_dev *dev, int port)
+{
+       int i;
+       int err;
+       int num_vfs;
+       u16 availible_vpp;
+       u8 vpp_param[MLX4_NUM_UP];
+       struct mlx4_qos_manager *port_qos;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param);
+       if (err) {
+               mlx4_info(dev, "Failed query availible VPPs\n");
+               return;
+       }
+
+       port_qos = &priv->mfunc.master.qos_ctl[port];
+       num_vfs = (availible_vpp /
+                  bitmap_weight(port_qos->priority_bm, MLX4_NUM_UP));
+
+       for (i = 0; i < MLX4_NUM_UP; i++) {
+               if (test_bit(i, port_qos->priority_bm))
+                       vpp_param[i] = num_vfs;
+       }
+
+       err = mlx4_ALLOCATE_VPP_set(dev, port, vpp_param);
+       if (err) {
+               mlx4_info(dev, "Failed allocating VPPs\n");
+               return;
+       }
+
+       /* Query actual allocated VPP, just to make sure */
+       err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param);
+       if (err) {
+               mlx4_info(dev, "Failed query availible VPPs\n");
+               return;
+       }
+
+       port_qos->num_of_qos_vfs = num_vfs;
+       mlx4_dbg(dev, "Port %d Availible VPPs %d\n", port, availible_vpp);
+
+       for (i = 0; i < MLX4_NUM_UP; i++)
+               mlx4_dbg(dev, "Port %d UP %d Allocated %d VPPs\n", port, i,
+                        vpp_param[i]);
+}
 
 static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
 {
@@ -2002,7 +2083,6 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
                        goto reset_slave;
                slave_state[slave].vhcr_dma = ((u64) param) << 48;
                priv->mfunc.master.slave_state[slave].cookie = 0;
-               mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]);
                break;
        case MLX4_COMM_CMD_VHCR1:
                if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
@@ -2213,6 +2293,9 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
        }
 
        if (mlx4_is_master(dev)) {
+               struct mlx4_vf_oper_state *vf_oper;
+               struct mlx4_vf_admin_state *vf_admin;
+
                priv->mfunc.master.slave_state =
                        kzalloc(dev->num_slaves *
                                sizeof(struct mlx4_slave_state), GFP_KERNEL);
@@ -2232,8 +2315,11 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
                        goto err_comm_oper;
 
                for (i = 0; i < dev->num_slaves; ++i) {
+                       vf_admin = &priv->mfunc.master.vf_admin[i];
+                       vf_oper = &priv->mfunc.master.vf_oper[i];
                        s_state = &priv->mfunc.master.slave_state[i];
                        s_state->last_cmd = MLX4_COMM_CMD_RESET;
+                       mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
                        for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
                                s_state->event_eq[j].eqn = -1;
                        __raw_writel((__force u32) 0,
@@ -2242,6 +2328,9 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
                                     &priv->mfunc.comm[i].slave_read);
                        mmiowb();
                        for (port = 1; port <= MLX4_MAX_PORTS; port++) {
+                               struct mlx4_vport_state *admin_vport;
+                               struct mlx4_vport_state *oper_vport;
+
                                s_state->vlan_filter[port] =
                                        kzalloc(sizeof(struct mlx4_vlan_fltr),
                                                GFP_KERNEL);
@@ -2250,15 +2339,30 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
                                                kfree(s_state->vlan_filter[port]);
                                        goto err_slaves;
                                }
+
+                               admin_vport = &vf_admin->vport[port];
+                               oper_vport = &vf_oper->vport[port].state;
                                INIT_LIST_HEAD(&s_state->mcast_filters[port]);
-                               priv->mfunc.master.vf_admin[i].vport[port].default_vlan = MLX4_VGT;
-                               priv->mfunc.master.vf_oper[i].vport[port].state.default_vlan = MLX4_VGT;
-                               priv->mfunc.master.vf_oper[i].vport[port].vlan_idx = NO_INDX;
-                               priv->mfunc.master.vf_oper[i].vport[port].mac_idx = NO_INDX;
+                               admin_vport->default_vlan = MLX4_VGT;
+                               oper_vport->default_vlan = MLX4_VGT;
+                               admin_vport->qos_vport =
+                                               MLX4_VPP_DEFAULT_VPORT;
+                               oper_vport->qos_vport = MLX4_VPP_DEFAULT_VPORT;
+                               vf_oper->vport[port].vlan_idx = NO_INDX;
+                               vf_oper->vport[port].mac_idx = NO_INDX;
                        }
                        spin_lock_init(&s_state->lock);
                }
 
+               if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP) {
+                       for (port = 1; port <= dev->caps.num_ports; port++) {
+                               if (mlx4_is_eth(dev, port)) {
+                                       mlx4_set_default_port_qos(dev, port);
+                                       mlx4_allocate_port_vpps(dev, port);
+                               }
+                       }
+               }
+
                memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size);
                priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
                INIT_WORK(&priv->mfunc.master.comm_work,
@@ -2679,6 +2783,103 @@ static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port)
        return port;
 }
 
+static int mlx4_set_vport_qos(struct mlx4_priv *priv, int slave, int port,
+                             int max_tx_rate)
+{
+       int i;
+       int err;
+       struct mlx4_qos_manager *port_qos;
+       struct mlx4_dev *dev = &priv->dev;
+       struct mlx4_vport_qos_param vpp_qos[MLX4_NUM_UP];
+
+       port_qos = &priv->mfunc.master.qos_ctl[port];
+       memset(vpp_qos, 0, sizeof(struct mlx4_vport_qos_param) * MLX4_NUM_UP);
+
+       if (slave > port_qos->num_of_qos_vfs) {
+               mlx4_info(dev, "No availible VPP resources for this VF\n");
+               return -EINVAL;
+       }
+
+       /* Query for default QoS values from Vport 0 is needed */
+       err = mlx4_SET_VPORT_QOS_get(dev, port, 0, vpp_qos);
+       if (err) {
+               mlx4_info(dev, "Failed to query Vport 0 QoS values\n");
+               return err;
+       }
+
+       for (i = 0; i < MLX4_NUM_UP; i++) {
+               if (test_bit(i, port_qos->priority_bm) && max_tx_rate) {
+                       vpp_qos[i].max_avg_bw = max_tx_rate;
+                       vpp_qos[i].enable = 1;
+               } else {
+                       /* if user supplied tx_rate == 0, meaning no rate limit
+                        * configuration is required. so we are leaving the
+                        * value of max_avg_bw as queried from Vport 0.
+                        */
+                       vpp_qos[i].enable = 0;
+               }
+       }
+
+       err = mlx4_SET_VPORT_QOS_set(dev, port, slave, vpp_qos);
+       if (err) {
+               mlx4_info(dev, "Failed to set Vport %d QoS values\n", slave);
+               return err;
+       }
+
+       return 0;
+}
+
+static bool mlx4_is_vf_vst_and_prio_qos(struct mlx4_dev *dev, int port,
+                                       struct mlx4_vport_state *vf_admin)
+{
+       struct mlx4_qos_manager *info;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       if (!mlx4_is_master(dev) ||
+           !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP))
+               return false;
+
+       info = &priv->mfunc.master.qos_ctl[port];
+
+       if (vf_admin->default_vlan != MLX4_VGT &&
+           test_bit(vf_admin->default_qos, info->priority_bm))
+               return true;
+
+       return false;
+}
+
+static bool mlx4_valid_vf_state_change(struct mlx4_dev *dev, int port,
+                                      struct mlx4_vport_state *vf_admin,
+                                      int vlan, int qos)
+{
+       struct mlx4_vport_state dummy_admin = {0};
+
+       if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) ||
+           !vf_admin->tx_rate)
+               return true;
+
+       dummy_admin.default_qos = qos;
+       dummy_admin.default_vlan = vlan;
+
+       /* VF wants to move to other VST state which is valid with current
+        * rate limit. Either differnt default vlan in VST or other
+        * supported QoS priority. Otherwise we don't allow this change when
+        * the TX rate is still configured.
+        */
+       if (mlx4_is_vf_vst_and_prio_qos(dev, port, &dummy_admin))
+               return true;
+
+       mlx4_info(dev, "Cannot change VF state to %s while rate is set\n",
+                 (vlan == MLX4_VGT) ? "VGT" : "VST");
+
+       if (vlan != MLX4_VGT)
+               mlx4_info(dev, "VST priority %d not supported for QoS\n", qos);
+
+       mlx4_info(dev, "Please set rate to 0 prior to this VF state change\n");
+
+       return false;
+}
+
 int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2722,12 +2923,22 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
        port = mlx4_slaves_closest_port(dev, slave, port);
        vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
 
+       if (!mlx4_valid_vf_state_change(dev, port, vf_admin, vlan, qos))
+               return -EPERM;
+
        if ((0 == vlan) && (0 == qos))
                vf_admin->default_vlan = MLX4_VGT;
        else
                vf_admin->default_vlan = vlan;
        vf_admin->default_qos = qos;
 
+       /* If rate was configured prior to VST, we saved the configured rate
+        * in vf_admin->rate and now, if priority supported we enforce the QoS
+        */
+       if (mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) &&
+           vf_admin->tx_rate)
+               vf_admin->qos_vport = slave;
+
        if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
                mlx4_info(dev,
                          "updating vf %d port %d config will take effect on next VF restart\n",
@@ -2736,6 +2947,69 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
 }
 EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
 
+int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate,
+                    int max_tx_rate)
+{
+       int err;
+       int slave;
+       struct mlx4_vport_state *vf_admin;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       if (!mlx4_is_master(dev) ||
+           !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP))
+               return -EPROTONOSUPPORT;
+
+       if (min_tx_rate) {
+               mlx4_info(dev, "Minimum BW share not supported\n");
+               return -EPROTONOSUPPORT;
+       }
+
+       slave = mlx4_get_slave_indx(dev, vf);
+       if (slave < 0)
+               return -EINVAL;
+
+       port = mlx4_slaves_closest_port(dev, slave, port);
+       vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
+
+       err = mlx4_set_vport_qos(priv, slave, port, max_tx_rate);
+       if (err) {
+               mlx4_info(dev, "vf %d failed to set rate %d\n", vf,
+                         max_tx_rate);
+               return err;
+       }
+
+       vf_admin->tx_rate = max_tx_rate;
+       /* if VF is not in supported mode (VST with supported prio),
+        * we do not change vport configuration for its QPs, but save
+        * the rate, so it will be enforced when it moves to supported
+        * mode next time.
+        */
+       if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin)) {
+               mlx4_info(dev,
+                         "rate set for VF %d when not in valid state\n", vf);
+
+               if (vf_admin->default_vlan != MLX4_VGT)
+                       mlx4_info(dev, "VST priority not supported by QoS\n");
+               else
+                       mlx4_info(dev, "VF in VGT mode (needed VST)\n");
+
+               mlx4_info(dev,
+                         "rate %d take affect when VF moves to valid state\n",
+                         max_tx_rate);
+               return 0;
+       }
+
+       /* If user sets rate 0 assigning default vport for its QPs */
+       vf_admin->qos_vport = max_tx_rate ? slave : MLX4_VPP_DEFAULT_VPORT;
+
+       if (priv->mfunc.master.slave_state[slave].active &&
+           dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)
+               mlx4_master_immediate_activate_vlan_qos(priv, slave, port);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_set_vf_rate);
+
  /* mlx4_get_slave_default_vlan -
  * return true if VST ( default vlan)
  * if VST, will return vlan & qos (if not NULL)
@@ -2809,7 +3083,12 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in
 
        ivf->vlan               = s_info->default_vlan;
        ivf->qos                = s_info->default_qos;
-       ivf->max_tx_rate        = s_info->tx_rate;
+
+       if (mlx4_is_vf_vst_and_prio_qos(dev, port, s_info))
+               ivf->max_tx_rate = s_info->tx_rate;
+       else
+               ivf->max_tx_rate = 0;
+
        ivf->min_tx_rate        = 0;
        ivf->spoofchk           = s_info->spoofchk;
        ivf->linkstate          = s_info->link_state;
index 8e3260c0eaa5802c5967b22f24970025c037cd6a..f01918c63f2816fecf07141a29c8c140b29e8629 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/math64.h>
 
 #include "mlx4_en.h"
+#include "fw_qos.h"
 
 /* Definitions for QCN
  */
index eba969b08dd1f6685433d3252ded00a45877af32..3f44e2bbb9824caad9068e7ce6f03e1a2df382f2 100644 (file)
@@ -1939,6 +1939,32 @@ static int mlx4_en_get_module_eeprom(struct net_device *dev,
        return 0;
 }
 
+static int mlx4_en_set_phys_id(struct net_device *dev,
+                              enum ethtool_phys_id_state state)
+{
+       int err;
+       u16 beacon_duration;
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = priv->mdev;
+
+       if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_BEACON))
+               return -EOPNOTSUPP;
+
+       switch (state) {
+       case ETHTOOL_ID_ACTIVE:
+               beacon_duration = PORT_BEACON_MAX_LIMIT;
+               break;
+       case ETHTOOL_ID_INACTIVE:
+               beacon_duration = 0;
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       err = mlx4_SET_PORT_BEACON(mdev->dev, priv->port, beacon_duration);
+       return err;
+}
+
 const struct ethtool_ops mlx4_en_ethtool_ops = {
        .get_drvinfo = mlx4_en_get_drvinfo,
        .get_settings = mlx4_en_get_settings,
@@ -1948,6 +1974,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
        .get_sset_count = mlx4_en_get_sset_count,
        .get_ethtool_stats = mlx4_en_get_ethtool_stats,
        .self_test = mlx4_en_self_test,
+       .set_phys_id = mlx4_en_set_phys_id,
        .get_wol = mlx4_en_get_wol,
        .set_wol = mlx4_en_set_wol,
        .get_msglevel = mlx4_en_get_msglevel,
index 58d5a07d0ff4da6397118fb4b3b051ac936ed95b..913b716ed2e141189a978af29ab5d54c1b606387 100644 (file)
@@ -103,6 +103,11 @@ void mlx4_en_update_loopback_state(struct net_device *dev,
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
 
+       if (features & NETIF_F_LOOPBACK)
+               priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
+       else
+               priv->ctrl_flags &= cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
+
        priv->flags &= ~(MLX4_EN_FLAG_RX_FILTER_NEEDED|
                        MLX4_EN_FLAG_ENABLE_HW_LOOPBACK);
 
index 354e254b53cfcc8b1040e1bfb591b4445fd5e4cc..0f1afc085d580b34e0eda1eaa4b1cdc1737c71be 100644 (file)
@@ -2195,31 +2195,50 @@ static int mlx4_en_set_features(struct net_device *netdev,
                netdev_features_t features)
 {
        struct mlx4_en_priv *priv = netdev_priv(netdev);
+       bool reset = false;
        int ret = 0;
 
+       if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) {
+               en_info(priv, "Turn %s RX-FCS\n",
+                       (features & NETIF_F_RXFCS) ? "ON" : "OFF");
+               reset = true;
+       }
+
+       if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) {
+               u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0;
+
+               en_info(priv, "Turn %s RX-ALL\n",
+                       ignore_fcs_value ? "ON" : "OFF");
+               ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev,
+                                             priv->port, ignore_fcs_value);
+               if (ret)
+                       return ret;
+       }
+
        if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
                en_info(priv, "Turn %s RX vlan strip offload\n",
                        (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF");
-               ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
-                                          features);
-               if (ret)
-                       return ret;
+               reset = true;
        }
 
        if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX))
                en_info(priv, "Turn %s TX vlan strip offload\n",
                        (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
 
-       if (features & NETIF_F_LOOPBACK)
-               priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
-       else
-               priv->ctrl_flags &=
-                       cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
+       if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
+               en_info(priv, "Turn %s loopback\n",
+                       (features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
+               mlx4_en_update_loopback_state(netdev, features);
+       }
 
-       mlx4_en_update_loopback_state(netdev, features);
+       if (reset) {
+               ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
+                                          features);
+               if (ret)
+                       return ret;
+       }
 
        return 0;
-
 }
 
 static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
@@ -2242,6 +2261,16 @@ static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
        return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos);
 }
 
+static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
+                              int max_tx_rate)
+{
+       struct mlx4_en_priv *en_priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = en_priv->mdev;
+
+       return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate,
+                               max_tx_rate);
+}
+
 static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
 {
        struct mlx4_en_priv *en_priv = netdev_priv(dev);
@@ -2460,6 +2489,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
        .ndo_vlan_rx_kill_vid   = mlx4_en_vlan_rx_kill_vid,
        .ndo_set_vf_mac         = mlx4_en_set_vf_mac,
        .ndo_set_vf_vlan        = mlx4_en_set_vf_vlan,
+       .ndo_set_vf_rate        = mlx4_en_set_vf_rate,
        .ndo_set_vf_spoofchk    = mlx4_en_set_vf_spoofchk,
        .ndo_set_vf_link_state  = mlx4_en_set_vf_link_state,
        .ndo_get_vf_config      = mlx4_en_get_vf_config,
@@ -2805,7 +2835,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        priv->msg_enable = MLX4_EN_MSG_LEVEL;
 #ifdef CONFIG_MLX4_EN_DCB
        if (!mlx4_is_slave(priv->mdev->dev)) {
-               if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
+               if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
                        dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
                } else {
                        en_info(priv, "enabling only PFC DCB ops\n");
@@ -2892,6 +2922,12 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        dev->hw_features |= NETIF_F_LOOPBACK |
                        NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
 
+       if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
+               dev->hw_features |= NETIF_F_RXFCS;
+
+       if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)
+               dev->hw_features |= NETIF_F_RXALL;
+
        if (mdev->dev->caps.steering_mode ==
            MLX4_STEERING_MODE_DEVICE_MANAGED &&
            mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
@@ -2917,13 +2953,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        netif_carrier_off(dev);
        mlx4_en_set_default_moderation(priv);
 
-       err = register_netdev(dev);
-       if (err) {
-               en_err(priv, "Netdev registration failed for port %d\n", port);
-               goto out;
-       }
-       priv->registered = 1;
-
        en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
        en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
 
@@ -2969,6 +2998,14 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
                                 mdev->profile.prof[priv->port].tx_ppp,
                                 mdev->profile.prof[priv->port].tx_pause);
 
+       err = register_netdev(dev);
+       if (err) {
+               en_err(priv, "Netdev registration failed for port %d\n", port);
+               goto out;
+       }
+
+       priv->registered = 1;
+
        return 0;
 
 out:
@@ -2987,7 +3024,8 @@ int mlx4_en_reset_config(struct net_device *dev,
 
        if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
            priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
-           !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX))
+           !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
+           !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS))
                return 0; /* Nothing to change */
 
        if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
@@ -3026,6 +3064,13 @@ int mlx4_en_reset_config(struct net_device *dev,
                        dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
        }
 
+       if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) {
+               if (features & NETIF_F_RXFCS)
+                       dev->features |= NETIF_F_RXFCS;
+               else
+                       dev->features &= ~NETIF_F_RXFCS;
+       }
+
        /* RX vlan offload and RX time-stamping can't co-exist !
         * Regardless of the caller's choice,
         * Turn Off RX vlan offload in case of time-stamping is ON
index 698d60de1255269c11363c0196fd16800d5c4f13..4fdd3c37e47bf7c7862b9edf569be6f7f38e8dae 100644 (file)
@@ -771,7 +771,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                /*
                 * make sure we read the CQE after we read the ownership bit
                 */
-               rmb();
+               dma_rmb();
 
                /* Drop packet on bad receive or bad checksum */
                if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
@@ -1116,7 +1116,10 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
        /* Cancel FCS removal if FW allows */
        if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) {
                context->param3 |= cpu_to_be32(1 << 29);
-               ring->fcs_del = ETH_FCS_LEN;
+               if (priv->dev->features & NETIF_F_RXFCS)
+                       ring->fcs_del = 0;
+               else
+                       ring->fcs_del = ETH_FCS_LEN;
        } else
                ring->fcs_del = 0;
 
index 55f9f5c5344e19a3d8d76b083e0fd73d44b9870b..1783705273d89773c0a462cb28684f2969e55ac4 100644 (file)
@@ -416,7 +416,7 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
                 * make sure we read the CQE after we read the
                 * ownership bit
                 */
-               rmb();
+               dma_rmb();
 
                if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
                             MLX4_CQE_OPCODE_ERROR)) {
@@ -667,7 +667,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
                                       skb_frag_size(&shinfo->frags[0]));
                }
 
-               wmb();
+               dma_wmb();
                inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
        }
 }
@@ -804,7 +804,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
 
                        data->addr = cpu_to_be64(dma);
                        data->lkey = ring->mr_key;
-                       wmb();
+                       dma_wmb();
                        data->byte_count = cpu_to_be32(byte_count);
                        --data;
                }
@@ -821,7 +821,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
 
                        data->addr = cpu_to_be64(dma);
                        data->lkey = ring->mr_key;
-                       wmb();
+                       dma_wmb();
                        data->byte_count = cpu_to_be32(byte_count);
                }
                /* tx completion can avoid cache line miss for common cases */
@@ -938,7 +938,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                /* Ensure new descriptor hits memory
                 * before setting ownership of this descriptor to HW
                 */
-               wmb();
+               dma_wmb();
                tx_desc->ctrl.owner_opcode = op_own;
 
                wmb();
@@ -958,7 +958,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                /* Ensure new descriptor hits memory
                 * before setting ownership of this descriptor to HW
                 */
-               wmb();
+               dma_wmb();
                tx_desc->ctrl.owner_opcode = op_own;
                if (send_doorbell) {
                        wmb();
index 264bc15c1ff212ad649c3547c8a8ffc0b2c22e43..190fd624bdfebd6e7b5b9e83f8470b840f8d09cb 100644 (file)
@@ -153,12 +153,10 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
 
                /* All active slaves need to receive the event */
                if (slave == ALL_SLAVES) {
-                       for (i = 0; i < dev->num_slaves; i++) {
-                               if (i != dev->caps.function &&
-                                   master->slave_state[i].active)
-                                       if (mlx4_GEN_EQE(dev, i, eqe))
-                                               mlx4_warn(dev, "Failed to generate event for slave %d\n",
-                                                         i);
+                       for (i = 0; i <= dev->persist->num_vfs; i++) {
+                               if (mlx4_GEN_EQE(dev, i, eqe))
+                                       mlx4_warn(dev, "Failed to generate event for slave %d\n",
+                                                 i);
                        }
                } else {
                        if (mlx4_GEN_EQE(dev, slave, eqe))
@@ -190,7 +188,7 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
        memcpy(s_eqe, eqe, dev->caps.eqe_size - 1);
        s_eqe->slave_id = slave;
        /* ensure all information is written before setting the ownersip bit */
-       wmb();
+       dma_wmb();
        s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
        ++slave_eq->prod;
 
@@ -203,13 +201,11 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
                             struct mlx4_eqe *eqe)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
-       struct mlx4_slave_state *s_slave =
-               &priv->mfunc.master.slave_state[slave];
 
-       if (!s_slave->active) {
-               /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/
+       if (slave < 0 || slave > dev->persist->num_vfs ||
+           slave == dev->caps.function ||
+           !priv->mfunc.master.slave_state[slave].active)
                return;
-       }
 
        slave_event(dev, slave, eqe);
 }
@@ -477,7 +473,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                 * Make sure we read EQ entry contents after we've
                 * checked the ownership bit.
                 */
-               rmb();
+               dma_rmb();
 
                switch (eqe->type) {
                case MLX4_EVENT_TYPE_COMP:
index 209a6171e59b2cf9248b073edf957eeb026aad73..b9881fc1252fab863cb184a633113ac7307a4467 100644 (file)
@@ -49,9 +49,9 @@ enum {
 extern void __buggy_use_of_MLX4_GET(void);
 extern void __buggy_use_of_MLX4_PUT(void);
 
-static bool enable_qos;
+static bool enable_qos = true;
 module_param(enable_qos, bool, 0444);
-MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
+MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: on)");
 
 #define MLX4_GET(dest, source, offset)                               \
        do {                                                          \
@@ -105,6 +105,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
                [41] = "Unicast VEP steering support",
                [42] = "Multicast VEP steering support",
                [48] = "Counters support",
+               [52] = "RSS IP fragments support",
                [53] = "Port ETS Scheduler support",
                [55] = "Port link type sensing support",
                [59] = "Port management change event support",
@@ -146,7 +147,11 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
                [21] = "Port Remap support",
                [22] = "QCN support",
                [23] = "QP rate limiting support",
-               [24] = "Ethernet Flow control statistics support"
+               [24] = "Ethernet Flow control statistics support",
+               [25] = "Granular QoS per VF support",
+               [26] = "Port ETS Scheduler support",
+               [27] = "Port beacon support",
+               [28] = "RX-ALL support",
        };
        int i;
 
@@ -644,6 +649,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 #define QUERY_DEV_CAP_RSS_OFFSET               0x2e
 #define QUERY_DEV_CAP_MAX_RDMA_OFFSET          0x2f
 #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET           0x33
+#define QUERY_DEV_CAP_PORT_BEACON_OFFSET       0x34
 #define QUERY_DEV_CAP_ACK_DELAY_OFFSET         0x35
 #define QUERY_DEV_CAP_MTU_WIDTH_OFFSET         0x36
 #define QUERY_DEV_CAP_VL_PORT_OFFSET           0x37
@@ -783,6 +789,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        if (field & 0x80)
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
        dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_BEACON_OFFSET);
+       if (field & 0x80)
+               dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_BEACON;
        MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
        if (field & 0x80)
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB;
@@ -870,6 +879,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
        dev_cap->max_rq_desc_sz = size;
        MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
+       if (field & (1 << 4))
+               dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QOS_VPP;
        if (field & (1 << 5))
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL;
        if (field & (1 << 6))
@@ -883,6 +894,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
        if (field & 0x20)
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
+       if (field & (1 << 2))
+               dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
        MLX4_GET(dev_cap->reserved_lkey, outbox,
                 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
        MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
@@ -896,6 +909,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN);
        if (field & 1<<3)
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS;
+       if (field & (1 << 5))
+               dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG;
        MLX4_GET(dev_cap->max_icm_sz, outbox,
                 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
        if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
@@ -1138,6 +1153,9 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
        }
        for (; slave_port < dev->caps.num_ports; ++slave_port)
                flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
+
+       /* Not exposing RSS IP fragments to guests */
+       flags &= ~MLX4_DEV_CAP_FLAG_RSS_IP_FRAG;
        MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
 
        MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VL_PORT_OFFSET);
@@ -1150,11 +1168,16 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
        field &= 0x7f;
        MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
 
-       /* For guests, disable vxlan tunneling */
+       /* For guests, disable vxlan tunneling and QoS support */
        MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN);
-       field &= 0xf7;
+       field &= 0xd7;
        MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN);
 
+       /* For guests, disable port BEACON */
+       MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_PORT_BEACON_OFFSET);
+       field &= 0x7f;
+       MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_PORT_BEACON_OFFSET);
+
        /* For guests, report Blueflame disabled */
        MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
        field &= 0x7f;
@@ -1195,6 +1218,16 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
        field16 = 0;
        MLX4_PUT(outbox->buf, field16, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET);
 
+       /* turn off QoS per VF support for guests */
+       MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
+       field &= 0xef;
+       MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
+
+       /* turn off ignore FCS feature for guests */
+       MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
+       field &= 0xfb;
+       MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
+
        return 0;
 }
 
@@ -1694,13 +1727,17 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
                *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
 
        /* Enable QoS support if module parameter set */
-       if (enable_qos)
+       if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG && enable_qos)
                *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
 
        /* enable counters */
        if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)
                *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4);
 
+       /* Enable RSS spread to fragmented IP packets when supported */
+       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_RSS_IP_FRAG)
+               *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 13);
+
        /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
        if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) {
                *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29);
@@ -1889,6 +1926,10 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
                else
                        param->steering_mode = MLX4_STEERING_MODE_A0;
        }
+
+       if (dword_field & (1 << 13))
+               param->rss_ip_frags = 1;
+
        /* steering attributes */
        if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
                MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
index 863655bd3947015cd869707f1e65feda77a3e6cd..07cb7c2461adaa90cbfab5e478a6a82d14613f87 100644 (file)
@@ -203,6 +203,7 @@ struct mlx4_init_hca_param {
        u64 dev_cap_enabled;
        u16 cqe_size; /* For use only when CQE stride feature enabled */
        u16 eqe_size; /* For use only when EQE stride feature enabled */
+       u8 rss_ip_frags;
 };
 
 struct mlx4_init_ib_param {
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw_qos.c b/drivers/net/ethernet/mellanox/mlx4/fw_qos.c
new file mode 100644 (file)
index 0000000..8f2fde0
--- /dev/null
@@ -0,0 +1,289 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
+ * All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include "fw_qos.h"
+#include "fw.h"
+
+enum {
+       /* allocate vpp opcode modifiers */
+       MLX4_ALLOCATE_VPP_ALLOCATE      = 0x0,
+       MLX4_ALLOCATE_VPP_QUERY         = 0x1
+};
+
+enum {
+       /* set vport qos opcode modifiers */
+       MLX4_SET_VPORT_QOS_SET          = 0x0,
+       MLX4_SET_VPORT_QOS_QUERY        = 0x1
+};
+
+struct mlx4_set_port_prio2tc_context {
+       u8 prio2tc[4];
+};
+
+struct mlx4_port_scheduler_tc_cfg_be {
+       __be16 pg;
+       __be16 bw_precentage;
+       __be16 max_bw_units; /* 3-100Mbps, 4-1Gbps, other values - reserved */
+       __be16 max_bw_value;
+};
+
+struct mlx4_set_port_scheduler_context {
+       struct mlx4_port_scheduler_tc_cfg_be tc[MLX4_NUM_TC];
+};
+
+/* Granular Qos (per VF) section */
+struct mlx4_alloc_vpp_param {
+       __be32 availible_vpp;
+       __be32 vpp_p_up[MLX4_NUM_UP];
+};
+
+struct mlx4_prio_qos_param {
+       __be32 bw_share;
+       __be32 max_avg_bw;
+       __be32 reserved;
+       __be32 enable;
+       __be32 reserved1[4];
+};
+
+struct mlx4_set_vport_context {
+       __be32 reserved[8];
+       struct mlx4_prio_qos_param qos_p_up[MLX4_NUM_UP];
+};
+
+int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_set_port_prio2tc_context *context;
+       int err;
+       u32 in_mod;
+       int i;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       context = mailbox->buf;
+
+       for (i = 0; i < MLX4_NUM_UP; i += 2)
+               context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
+
+       in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port;
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC);
+
+int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
+                           u8 *pg, u16 *ratelimit)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_set_port_scheduler_context *context;
+       int err;
+       u32 in_mod;
+       int i;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       context = mailbox->buf;
+
+       for (i = 0; i < MLX4_NUM_TC; i++) {
+               struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
+               u16 r;
+
+               if (ratelimit && ratelimit[i]) {
+                       if (ratelimit[i] <= MLX4_MAX_100M_UNITS_VAL) {
+                               r = ratelimit[i];
+                               tc->max_bw_units =
+                                       htons(MLX4_RATELIMIT_100M_UNITS);
+                       } else {
+                               r = ratelimit[i] / 10;
+                               tc->max_bw_units =
+                                       htons(MLX4_RATELIMIT_1G_UNITS);
+                       }
+                       tc->max_bw_value = htons(r);
+               } else {
+                       tc->max_bw_value = htons(MLX4_RATELIMIT_DEFAULT);
+                       tc->max_bw_units = htons(MLX4_RATELIMIT_1G_UNITS);
+               }
+
+               tc->pg = htons(pg[i]);
+               tc->bw_precentage = htons(tc_tx_bw[i]);
+       }
+
+       in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port;
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER);
+
+int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port,
+                         u16 *availible_vpp, u8 *vpp_p_up)
+{
+       int i;
+       int err;
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_alloc_vpp_param *out_param;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       out_param = mailbox->buf;
+
+       err = mlx4_cmd_box(dev, 0, mailbox->dma, port,
+                          MLX4_ALLOCATE_VPP_QUERY,
+                          MLX4_CMD_ALLOCATE_VPP,
+                          MLX4_CMD_TIME_CLASS_A,
+                          MLX4_CMD_NATIVE);
+       if (err)
+               goto out;
+
+       /* Total number of supported VPPs */
+       *availible_vpp = (u16)be32_to_cpu(out_param->availible_vpp);
+
+       for (i = 0; i < MLX4_NUM_UP; i++)
+               vpp_p_up[i] = (u8)be32_to_cpu(out_param->vpp_p_up[i]);
+
+out:
+       mlx4_free_cmd_mailbox(dev, mailbox);
+
+       return err;
+}
+EXPORT_SYMBOL(mlx4_ALLOCATE_VPP_get);
+
+int mlx4_ALLOCATE_VPP_set(struct mlx4_dev *dev, u8 port, u8 *vpp_p_up)
+{
+       int i;
+       int err;
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_alloc_vpp_param *in_param;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       in_param = mailbox->buf;
+
+       for (i = 0; i < MLX4_NUM_UP; i++)
+               in_param->vpp_p_up[i] = cpu_to_be32(vpp_p_up[i]);
+
+       err = mlx4_cmd(dev, mailbox->dma, port,
+                      MLX4_ALLOCATE_VPP_ALLOCATE,
+                      MLX4_CMD_ALLOCATE_VPP,
+                      MLX4_CMD_TIME_CLASS_A,
+                      MLX4_CMD_NATIVE);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+EXPORT_SYMBOL(mlx4_ALLOCATE_VPP_set);
+
+int mlx4_SET_VPORT_QOS_get(struct mlx4_dev *dev, u8 port, u8 vport,
+                          struct mlx4_vport_qos_param *out_param)
+{
+       int i;
+       int err;
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_set_vport_context *ctx;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       ctx = mailbox->buf;
+
+       err = mlx4_cmd_box(dev, 0, mailbox->dma, (vport << 8) | port,
+                          MLX4_SET_VPORT_QOS_QUERY,
+                          MLX4_CMD_SET_VPORT_QOS,
+                          MLX4_CMD_TIME_CLASS_A,
+                          MLX4_CMD_NATIVE);
+       if (err)
+               goto out;
+
+       for (i = 0; i < MLX4_NUM_UP; i++) {
+               out_param[i].bw_share = be32_to_cpu(ctx->qos_p_up[i].bw_share);
+               out_param[i].max_avg_bw =
+                       be32_to_cpu(ctx->qos_p_up[i].max_avg_bw);
+               out_param[i].enable =
+                       !!(be32_to_cpu(ctx->qos_p_up[i].enable) & 31);
+       }
+
+out:
+       mlx4_free_cmd_mailbox(dev, mailbox);
+
+       return err;
+}
+EXPORT_SYMBOL(mlx4_SET_VPORT_QOS_get);
+
+int mlx4_SET_VPORT_QOS_set(struct mlx4_dev *dev, u8 port, u8 vport,
+                          struct mlx4_vport_qos_param *in_param)
+{
+       int i;
+       int err;
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_set_vport_context *ctx;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       ctx = mailbox->buf;
+
+       for (i = 0; i < MLX4_NUM_UP; i++) {
+               ctx->qos_p_up[i].bw_share = cpu_to_be32(in_param[i].bw_share);
+               ctx->qos_p_up[i].max_avg_bw =
+                               cpu_to_be32(in_param[i].max_avg_bw);
+               ctx->qos_p_up[i].enable =
+                               cpu_to_be32(in_param[i].enable << 31);
+       }
+
+       err = mlx4_cmd(dev, mailbox->dma, (vport << 8) | port,
+                      MLX4_SET_VPORT_QOS_SET,
+                      MLX4_CMD_SET_VPORT_QOS,
+                      MLX4_CMD_TIME_CLASS_A,
+                      MLX4_CMD_NATIVE);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+EXPORT_SYMBOL(mlx4_SET_VPORT_QOS_set);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw_qos.h b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h
new file mode 100644 (file)
index 0000000..ac1f331
--- /dev/null
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
+ * All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX4_FW_QOS_H
+#define MLX4_FW_QOS_H
+
+#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/device.h>
+
+#define MLX4_NUM_UP 8
+#define MLX4_NUM_TC 8
+
+/* Default supported priorities for VPP allocation */
+#define MLX4_DEFAULT_QOS_PRIO (0)
+
+/* Derived from FW feature definition, 0 is the default vport fo all QPs */
+#define MLX4_VPP_DEFAULT_VPORT (0)
+
+struct mlx4_vport_qos_param {
+       u32 bw_share;
+       u32 max_avg_bw;
+       u8 enable;
+};
+
+/**
+ * mlx4_SET_PORT_PRIO2TC - This routine maps user priorities to traffic
+ * classes of a given port and device.
+ *
+ * @dev: mlx4_dev.
+ * @port: Physical port number.
+ * @prio2tc: Array of TC associated with each priorities.
+ *
+ * Returns 0 on success or a negative mlx4_core errno code.
+ **/
+int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc);
+
+/**
+ * mlx4_SET_PORT_SCHEDULER - This routine configures the arbitration between
+ * traffic classes (ETS) and configured rate limit for traffic classes.
+ * tc_tx_bw, pg and ratelimit are arrays where each index represents a TC.
+ * The description for those parameters below refers to a single TC.
+ *
+ * @dev: mlx4_dev.
+ * @port: Physical port number.
+ * @tc_tx_bw: The percentage of the bandwidth allocated for traffic class
+ *  within a TC group. The sum of the bw_percentage of all the traffic
+ *  classes within a TC group must equal 100% for correct operation.
+ * @pg: The TC group the traffic class is associated with.
+ * @ratelimit: The maximal bandwidth allowed for the use by this traffic class.
+ *
+ * Returns 0 on success or a negative mlx4_core errno code.
+ **/
+int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
+                           u8 *pg, u16 *ratelimit);
+/**
+ * mlx4_ALLOCATE_VPP_get - Query port VPP availible resources and allocation.
+ * Before distribution of VPPs to priorities, only availible_vpp is returned.
+ * After initialization it returns the distribution of VPPs among priorities.
+ *
+ * @dev: mlx4_dev.
+ * @port: Physical port number.
+ * @availible_vpp: Pointer to variable where number of availible VPPs is stored
+ * @vpp_p_up: Distribution of VPPs to priorities is stored in this array
+ *
+ * Returns 0 on success or a negative mlx4_core errno code.
+ **/
+int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port,
+                         u16 *availible_vpp, u8 *vpp_p_up);
+/**
+ * mlx4_ALLOCATE_VPP_set - Distribution of VPPs among differnt priorities.
+ * The total number of VPPs assigned to all for a port must not exceed
+ * the value reported by availible_vpp in mlx4_ALLOCATE_VPP_get.
+ * VPP allocation is allowed only after the port type has been set,
+ * and while no QPs are open for this port.
+ *
+ * @dev: mlx4_dev.
+ * @port: Physical port number.
+ * @vpp_p_up: Allocation of VPPs to different priorities.
+ *
+ * Returns 0 on success or a negative mlx4_core errno code.
+ **/
+int mlx4_ALLOCATE_VPP_set(struct mlx4_dev *dev, u8 port, u8 *vpp_p_up);
+
+/**
+ * mlx4_SET_VPORT_QOS_get - Query QoS proporties of a Vport.
+ * Each priority allowed for the Vport is assigned with a share of the BW,
+ * and a BW limitation. This commands query the current QoS values.
+ *
+ * @dev: mlx4_dev.
+ * @port: Physical port number.
+ * @vport: Vport id.
+ * @out_param: Array of mlx4_vport_qos_param that will contain the values.
+ *
+ * Returns 0 on success or a negative mlx4_core errno code.
+ **/
+int mlx4_SET_VPORT_QOS_get(struct mlx4_dev *dev, u8 port, u8 vport,
+                          struct mlx4_vport_qos_param *out_param);
+
+/**
+ * mlx4_SET_VPORT_QOS_set - Set QoS proporties of a Vport.
+ * QoS parameters can be modified at any time, but must be initialized
+ * before any QP is associated with the VPort.
+ *
+ * @dev: mlx4_dev.
+ * @port: Physical port number.
+ * @vport: Vport id.
+ * @out_param: Array of mlx4_vport_qos_param which holds the requested values.
+ *
+ * Returns 0 on success or a negative mlx4_core errno code.
+ **/
+int mlx4_SET_VPORT_QOS_set(struct mlx4_dev *dev, u8 port, u8 vport,
+                          struct mlx4_vport_qos_param *in_param);
+
+#endif /* MLX4_FW_QOS_H */
index 43aa76775b5f0909ee774b00eb7f8e14bf6571b2..acceb75e8c440c6aab8061cc1cdec7c0d420f4b1 100644 (file)
@@ -297,6 +297,25 @@ static int mlx4_dev_port(struct mlx4_dev *dev, int port,
        return err;
 }
 
+static inline void mlx4_enable_ignore_fcs(struct mlx4_dev *dev)
+{
+       if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS))
+               return;
+
+       if (mlx4_is_mfunc(dev)) {
+               mlx4_dbg(dev, "SRIOV mode - Disabling Ignore FCS");
+               dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
+               return;
+       }
+
+       if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) {
+               mlx4_dbg(dev,
+                        "Keep FCS is not supported - Disabling Ignore FCS");
+               dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
+               return;
+       }
+}
+
 #define MLX4_A0_STEERING_TABLE_SIZE    256
 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 {
@@ -528,10 +547,20 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                dev->caps.alloc_res_qp_mask =
                        (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) |
                        MLX4_RESERVE_A0_QP;
+
+               if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) &&
+                   dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
+                       mlx4_warn(dev, "Old device ETS support detected\n");
+                       mlx4_warn(dev, "Consider upgrading device FW.\n");
+                       dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG;
+               }
+
        } else {
                dev->caps.alloc_res_qp_mask = 0;
        }
 
+       mlx4_enable_ignore_fcs(dev);
+
        return 0;
 }
 
@@ -885,6 +914,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
        mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
 
        slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
+       mlx4_dbg(dev, "RSS support for IP fragments is %s\n",
+                hca_param.rss_ip_frags ? "on" : "off");
 
        if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP &&
            dev->caps.bf_reg_size)
index 0b16db015745b33885ff24198bd933d22d547eae..f30eeb730a8667d44bead81f19c606d6770d2bb9 100644 (file)
@@ -50,6 +50,7 @@
 #include <linux/mlx4/driver.h>
 #include <linux/mlx4/doorbell.h>
 #include <linux/mlx4/cmd.h>
+#include "fw_qos.h"
 
 #define DRV_NAME       "mlx4_core"
 #define PFX            DRV_NAME ": "
 
 #define INIT_HCA_TPT_MW_ENABLE          (1 << 7)
 
-struct mlx4_set_port_prio2tc_context {
-       u8 prio2tc[4];
-};
-
-struct mlx4_port_scheduler_tc_cfg_be {
-       __be16 pg;
-       __be16 bw_precentage;
-       __be16 max_bw_units; /* 3-100Mbps, 4-1Gbps, other values - reserved */
-       __be16 max_bw_value;
-};
-
-struct mlx4_set_port_scheduler_context {
-       struct mlx4_port_scheduler_tc_cfg_be tc[MLX4_NUM_TC];
-};
-
 enum {
        MLX4_HCR_BASE           = 0x80680,
        MLX4_HCR_SIZE           = 0x0001c,
@@ -512,6 +498,7 @@ struct mlx4_vport_state {
        u32 tx_rate;
        bool spoofchk;
        u32 link_state;
+       u8 qos_vport;
 };
 
 struct mlx4_vf_admin_state {
@@ -568,6 +555,11 @@ struct mlx4_slave_event_eq {
        struct mlx4_eqe event_eqe[SLAVE_EVENT_EQ_SIZE];
 };
 
+struct mlx4_qos_manager {
+       int num_of_qos_vfs;
+       DECLARE_BITMAP(priority_bm, MLX4_NUM_UP);
+};
+
 struct mlx4_master_qp0_state {
        int proxy_qp0_active;
        int qp0_active;
@@ -592,6 +584,7 @@ struct mlx4_mfunc_master_ctx {
        struct mlx4_eqe         cmd_eqe;
        struct mlx4_slave_event_eq slave_eq;
        struct mutex            gen_eqe_mutex[MLX4_MFUNC_MAX];
+       struct mlx4_qos_manager qos_ctl[MLX4_MAX_PORTS + 1];
 };
 
 struct mlx4_mfunc {
@@ -644,6 +637,7 @@ struct mlx4_vf_immed_vlan_work {
        int                     orig_vlan_ix;
        u8                      port;
        u8                      qos;
+       u8                      qos_vport;
        u16                     vlan_id;
        u16                     orig_vlan_id;
 };
@@ -769,9 +763,11 @@ enum {
 
 
 struct mlx4_set_port_general_context {
-       u8 reserved[3];
+       u16 reserved1;
+       u8 v_ignore_fcs;
        u8 flags;
-       u16 reserved2;
+       u8 ignore_fcs;
+       u8 reserved2;
        __be16 mtu;
        u8 pptx;
        u8 pfctx;
index 67eeea244eff50d515c876494e4fe2bd45d174f0..9de30216b146bb09188a6867307b5bbcf7aa9dd0 100644 (file)
@@ -482,6 +482,7 @@ enum {
        MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP        = (1 << 5),
 };
 
+#define PORT_BEACON_MAX_LIMIT (65535)
 #define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE)
 #define MLX4_EN_MAC_HASH_IDX 5
 
index b97f173ab06251774dcdbd3718a494b4e0e73269..c2b21313dba7f64d0e51cff8ca3c601720a848ef 100644 (file)
 #define MLX4_VLAN_VALID                (1u << 31)
 #define MLX4_VLAN_MASK         0xfff
 
+#define MLX4_STATS_TRAFFIC_COUNTERS_MASK       0xfULL
+#define MLX4_STATS_TRAFFIC_DROPS_MASK          0xc0ULL
+#define MLX4_STATS_ERROR_COUNTERS_MASK         0x1ffc30ULL
+#define MLX4_STATS_PORT_COUNTERS_MASK          0x1fe00000ULL
+
+#define MLX4_FLAG_V_IGNORE_FCS_MASK            0x2
+#define MLX4_IGNORE_FCS_MASK                   0x1
+
 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
 {
        int i;
@@ -123,8 +131,9 @@ static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
 
        in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
 
-       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
+                      MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+                      MLX4_CMD_NATIVE);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
        return err;
@@ -337,8 +346,9 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
 
        memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
        in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
-       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
+                      MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+                      MLX4_CMD_NATIVE);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
 
@@ -625,9 +635,9 @@ static int mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave,
                       MLX4_ROCE_GID_ENTRY_SIZE);
 
        err = mlx4_cmd(dev, mailbox->dma,
-                      ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8), 1,
-                      MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
-                      MLX4_CMD_NATIVE);
+                      ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8),
+                      MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT,
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
        mutex_unlock(&(priv->port[port].gid_table.mutex));
        return err;
 }
@@ -833,6 +843,12 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
                                MLX4_CMD_NATIVE);
        }
 
+       /* Slaves are not allowed to SET_PORT beacon (LED) blink */
+       if (op_mod == MLX4_SET_PORT_BEACON_OPCODE) {
+               mlx4_warn(dev, "denying SET_PORT Beacon slave:%d\n", slave);
+               return -EPERM;
+       }
+
        /* For IB, we only consider:
         * - The capability mask, which is set to the aggregate of all
         *   slave function capabilities
@@ -941,8 +957,9 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
                        (pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) |
                        (dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) |
                        (vl_cap << MLX4_SET_PORT_VL_CAP));
-               err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
-                               MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
+               err = mlx4_cmd(dev, mailbox->dma, port,
+                              MLX4_SET_PORT_IB_OPCODE, MLX4_CMD_SET_PORT,
+                              MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
                if (err != -ENOMEM)
                        break;
        }
@@ -971,8 +988,9 @@ int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
        context->pfcrx = pfcrx;
 
        in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
-       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B,  MLX4_CMD_WRAPPED);
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
+                      MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+                      MLX4_CMD_WRAPPED);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
        return err;
@@ -1008,84 +1026,40 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
        context->vlan_miss = MLX4_VLAN_MISS_IDX;
 
        in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
-       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B,  MLX4_CMD_WRAPPED);
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
+                      MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+                      MLX4_CMD_WRAPPED);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
        return err;
 }
 EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
 
-int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc)
+int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value)
 {
        struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_set_port_prio2tc_context *context;
-       int err;
+       struct mlx4_set_port_general_context *context;
        u32 in_mod;
-       int i;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-       context = mailbox->buf;
-       for (i = 0; i < MLX4_NUM_UP; i += 2)
-               context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
-
-       in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port;
-       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
-
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return err;
-}
-EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC);
-
-int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
-               u8 *pg, u16 *ratelimit)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_set_port_scheduler_context *context;
        int err;
-       u32 in_mod;
-       int i;
 
        mailbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(mailbox))
                return PTR_ERR(mailbox);
        context = mailbox->buf;
+       context->v_ignore_fcs |= MLX4_FLAG_V_IGNORE_FCS_MASK;
+       if (ignore_fcs_value)
+               context->ignore_fcs |= MLX4_IGNORE_FCS_MASK;
+       else
+               context->ignore_fcs &= ~MLX4_IGNORE_FCS_MASK;
 
-       for (i = 0; i < MLX4_NUM_TC; i++) {
-               struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
-               u16 r;
-
-               if (ratelimit && ratelimit[i]) {
-                       if (ratelimit[i] <= MLX4_MAX_100M_UNITS_VAL) {
-                               r = ratelimit[i];
-                               tc->max_bw_units =
-                                       htons(MLX4_RATELIMIT_100M_UNITS);
-                       } else {
-                               r = ratelimit[i]/10;
-                               tc->max_bw_units =
-                                       htons(MLX4_RATELIMIT_1G_UNITS);
-                       }
-                       tc->max_bw_value = htons(r);
-               } else {
-                       tc->max_bw_value = htons(MLX4_RATELIMIT_DEFAULT);
-                       tc->max_bw_units = htons(MLX4_RATELIMIT_1G_UNITS);
-               }
-
-               tc->pg = htons(pg[i]);
-               tc->bw_precentage = htons(tc_tx_bw[i]);
-       }
-
-       in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port;
+       in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
        err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
                       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
        return err;
 }
-EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER);
+EXPORT_SYMBOL(mlx4_SET_PORT_fcs_check);
 
 enum {
        VXLAN_ENABLE_MODIFY     = 1 << 7,
@@ -1121,14 +1095,35 @@ int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable)
        context->steering  = steering;
 
        in_mod = MLX4_SET_PORT_VXLAN << 8 | port;
-       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
+                      MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+                      MLX4_CMD_NATIVE);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
        return err;
 }
 EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN);
 
+int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time)
+{
+       int err;
+       struct mlx4_cmd_mailbox *mailbox;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       *((__be32 *)mailbox->buf) = cpu_to_be32(time);
+
+       err = mlx4_cmd(dev, mailbox->dma, port, MLX4_SET_PORT_BEACON_OPCODE,
+                      MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+                      MLX4_CMD_NATIVE);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_BEACON);
+
 int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
                                struct mlx4_vhcr *vhcr,
                                struct mlx4_cmd_mailbox *inbox,
index 69e4462e4ee41b18431009a591b21a2e3582f497..b75214a80d0e5be03ccc4bb9e52049ea25e225f6 100644 (file)
@@ -447,6 +447,11 @@ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
                cmd->qp_context.rate_limit_params = cpu_to_be16((params->rate_unit << 14) | params->rate_val);
        }
 
+       if (attr & MLX4_UPDATE_QP_QOS_VPORT) {
+               qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP;
+               cmd->qp_context.qos_vport = params->qos_vport;
+       }
+
        cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
        cmd->qp_mask = cpu_to_be64(qp_mask);
 
index c258f8625aac7ab83e2f78a58a15346ff96ef5d9..c7f28bf4b8e21436cc927c8212c5cc6b57706e51 100644 (file)
@@ -221,11 +221,6 @@ struct res_fs_rule {
        int                     qpn;
 };
 
-static int mlx4_is_eth(struct mlx4_dev *dev, int port)
-{
-       return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
-}
-
 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
 {
        struct rb_node *node = root->rb_node;
@@ -770,6 +765,7 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
                qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
                qpc->pri_path.sched_queue &= 0xC7;
                qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
+               qpc->qos_vport = vp_oper->state.qos_vport;
        }
        if (vp_oper->state.spoofchk) {
                qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
@@ -3099,6 +3095,12 @@ int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
        if (!priv->mfunc.master.slave_state)
                return -EINVAL;
 
+       /* check for slave valid, slave not PF, and slave active */
+       if (slave < 0 || slave > dev->persist->num_vfs ||
+           slave == dev->caps.function ||
+           !priv->mfunc.master.slave_state[slave].active)
+               return 0;
+
        event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
 
        /* Create the event only if the slave is registered */
@@ -4916,6 +4918,11 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
                                        qp->sched_queue & 0xC7;
                                upd_context->qp_context.pri_path.sched_queue |=
                                        ((work->qos & 0x7) << 3);
+                               upd_context->qp_mask |=
+                                       cpu_to_be64(1ULL <<
+                                                   MLX4_UPD_QP_MASK_QOS_VPP);
+                               upd_context->qp_context.qos_vport =
+                                       work->qos_vport;
                        }
 
                        err = mlx4_cmd(dev, mailbox->dma,
index 201ca6d76ce563862b6f2d620b68c69e3e58002a..ac0f7bf4be958bef168c0281f05108f6287304f4 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -171,6 +171,9 @@ static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
        db->db      = pgdir->db_page + offset / sizeof(*pgdir->db_page);
        db->dma     = pgdir->db_dma  + offset;
 
+       db->db[0] = 0;
+       db->db[1] = 0;
+
        return 0;
 }
 
index a2853057c779529b0a226e5aa54adfbc10c3a645..e3273faf4568945cb494e6598dbc013e61b11919 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -125,7 +125,10 @@ static u8 alloc_token(struct mlx5_cmd *cmd)
        u8 token;
 
        spin_lock(&cmd->token_lock);
-       token = cmd->token++ % 255 + 1;
+       cmd->token++;
+       if (cmd->token == 0)
+               cmd->token++;
+       token = cmd->token;
        spin_unlock(&cmd->token_lock);
 
        return token;
@@ -515,10 +518,11 @@ static void cmd_work_handler(struct work_struct *work)
        ent->ts1 = ktime_get_ns();
 
        /* ring doorbell after the descriptor is valid */
+       mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
        wmb();
        iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
-       mlx5_core_dbg(dev, "write 0x%x to command doorbell\n", 1 << ent->idx);
        mmiowb();
+       /* if not in polling don't use ent after this point */
        if (cmd->mode == CMD_MODE_POLLING) {
                poll_timeout(ent);
                /* make sure we read the descriptor after ownership is SW */
@@ -1236,7 +1240,8 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
                goto out_out;
        }
 
-       err = mlx5_copy_from_msg(out, outb, out_size);
+       if (!callback)
+               err = mlx5_copy_from_msg(out, outb, out_size);
 
 out_out:
        if (!callback)
@@ -1319,6 +1324,45 @@ ex_err:
        return err;
 }
 
+static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
+{
+       struct device *ddev = &dev->pdev->dev;
+
+       cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
+                                                &cmd->alloc_dma, GFP_KERNEL);
+       if (!cmd->cmd_alloc_buf)
+               return -ENOMEM;
+
+       /* make sure it is aligned to 4K */
+       if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) {
+               cmd->cmd_buf = cmd->cmd_alloc_buf;
+               cmd->dma = cmd->alloc_dma;
+               cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE;
+               return 0;
+       }
+
+       dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
+                         cmd->alloc_dma);
+       cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev,
+                                                2 * MLX5_ADAPTER_PAGE_SIZE - 1,
+                                                &cmd->alloc_dma, GFP_KERNEL);
+       if (!cmd->cmd_alloc_buf)
+               return -ENOMEM;
+
+       cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE);
+       cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE);
+       cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1;
+       return 0;
+}
+
+static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
+{
+       struct device *ddev = &dev->pdev->dev;
+
+       dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
+                         cmd->alloc_dma);
+}
+
 int mlx5_cmd_init(struct mlx5_core_dev *dev)
 {
        int size = sizeof(struct mlx5_cmd_prot_block);
@@ -1341,17 +1385,9 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
        if (!cmd->pool)
                return -ENOMEM;
 
-       cmd->cmd_buf = (void *)__get_free_pages(GFP_ATOMIC, 0);
-       if (!cmd->cmd_buf) {
-               err = -ENOMEM;
+       err = alloc_cmd_page(dev, cmd);
+       if (err)
                goto err_free_pool;
-       }
-       cmd->dma = dma_map_single(&dev->pdev->dev, cmd->cmd_buf, PAGE_SIZE,
-                                 DMA_BIDIRECTIONAL);
-       if (dma_mapping_error(&dev->pdev->dev, cmd->dma)) {
-               err = -ENOMEM;
-               goto err_free;
-       }
 
        cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
        cmd->log_sz = cmd_l >> 4 & 0xf;
@@ -1360,13 +1396,13 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
                dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n",
                        1 << cmd->log_sz);
                err = -EINVAL;
-               goto err_map;
+               goto err_free_page;
        }
 
        if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
                dev_err(&dev->pdev->dev, "command queue size overflow\n");
                err = -EINVAL;
-               goto err_map;
+               goto err_free_page;
        }
 
        cmd->checksum_disabled = 1;
@@ -1378,7 +1414,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
                dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
                        CMD_IF_REV, cmd->cmdif_rev);
                err = -ENOTSUPP;
-               goto err_map;
+               goto err_free_page;
        }
 
        spin_lock_init(&cmd->alloc_lock);
@@ -1394,7 +1430,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
        if (cmd_l & 0xfff) {
                dev_err(&dev->pdev->dev, "invalid command queue address\n");
                err = -ENOMEM;
-               goto err_map;
+               goto err_free_page;
        }
 
        iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
@@ -1410,7 +1446,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
        err = create_msg_cache(dev);
        if (err) {
                dev_err(&dev->pdev->dev, "failed to create command cache\n");
-               goto err_map;
+               goto err_free_page;
        }
 
        set_wqname(dev);
@@ -1435,11 +1471,8 @@ err_wq:
 err_cache:
        destroy_msg_cache(dev);
 
-err_map:
-       dma_unmap_single(&dev->pdev->dev, cmd->dma, PAGE_SIZE,
-                        DMA_BIDIRECTIONAL);
-err_free:
-       free_pages((unsigned long)cmd->cmd_buf, 0);
+err_free_page:
+       free_cmd_page(dev, cmd);
 
 err_free_pool:
        pci_pool_destroy(cmd->pool);
@@ -1455,9 +1488,7 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
        clean_debug_files(dev);
        destroy_workqueue(cmd->wq);
        destroy_msg_cache(dev);
-       dma_unmap_single(&dev->pdev->dev, cmd->dma, PAGE_SIZE,
-                        DMA_BIDIRECTIONAL);
-       free_pages((unsigned long)cmd->cmd_buf, 0);
+       free_cmd_page(dev, cmd);
        pci_pool_destroy(cmd->pool);
 }
 EXPORT_SYMBOL(mlx5_cmd_cleanup);
index 43c5f48095260b5966d7e7dd7b6bf654bf5249e3..eb0cf81f5f4518a06579a6c52e191b72ad1d0e50 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 4878025e231c6b4ebb9f63c72ec3fcdf1402150b..5210d92e6bc7252a4989082e17ac1953490b4af9 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index da82991239a8cb0df3f74e1a14d36037ed2d9a18..58800e4f39585c2fd30d76e8de4a21bfb6f8bf66 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -208,7 +208,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
                 * Make sure we read EQ entry contents after we've
                 * checked the ownership bit.
                 */
-               rmb();
+               dma_rmb();
 
                mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
                              eq->eqn, eqe_type_str(eqe->type));
index 06f9036acd836196424b294ea47ce4dc624cbe09..4b4cda3bcc5fa1eecf99b6ed5265d41cc40dfedc 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 3e6670c4a7cd215998bf2302f2573acb8540ffb7..292d76f2a9041105bceb9ae0a792fe4eb05c53d2 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index fd80ecfa7195195464b8ccd2f0cfc1bc556d96c2..ee1b0b965f34a3f4e29a71c79daf40e47693d67c 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 350c6297fe5dff8570b241b1a49de9673af53a38..28425e5ea91f871670e84721bb865c1725472e80 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
 #include "mlx5_core.h"
 
 #define DRIVER_NAME "mlx5_core"
-#define DRIVER_VERSION "2.2-1"
-#define DRIVER_RELDATE "Feb 2014"
+#define DRIVER_VERSION "3.0"
+#define DRIVER_RELDATE  "January 2015"
 
 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
-MODULE_DESCRIPTION("Mellanox ConnectX-IB HCA core library");
+MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_VERSION(DRIVER_VERSION);
 
@@ -288,8 +288,6 @@ static void copy_rw_fields(void *to, struct mlx5_caps *from)
        MLX5_SET(cmd_hca_cap, to, log_max_ra_req_qp, from->gen.log_max_ra_req_qp);
        MLX5_SET(cmd_hca_cap, to, log_max_ra_res_qp, from->gen.log_max_ra_res_qp);
        MLX5_SET(cmd_hca_cap, to, pkey_table_size, from->gen.pkey_table_size);
-       MLX5_SET(cmd_hca_cap, to, log_max_ra_req_dc, from->gen.log_max_ra_req_dc);
-       MLX5_SET(cmd_hca_cap, to, log_max_ra_res_dc, from->gen.log_max_ra_res_dc);
        MLX5_SET(cmd_hca_cap, to, pkey_table_size, to_fw_pkey_sz(from->gen.pkey_table_size));
        MLX5_SET(cmd_hca_cap, to, log_uar_page_sz, PAGE_SHIFT - 12);
        v64 = from->gen.flags & MLX5_CAP_BITS_RW_MASK;
@@ -509,6 +507,87 @@ static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
        return 0;
 }
 
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn)
+{
+       struct mlx5_eq_table *table = &dev->priv.eq_table;
+       struct mlx5_eq *eq, *n;
+       int err = -ENOENT;
+
+       spin_lock(&table->lock);
+       list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
+               if (eq->index == vector) {
+                       *eqn = eq->eqn;
+                       *irqn = eq->irqn;
+                       err = 0;
+                       break;
+               }
+       }
+       spin_unlock(&table->lock);
+
+       return err;
+}
+EXPORT_SYMBOL(mlx5_vector2eqn);
+
+static void free_comp_eqs(struct mlx5_core_dev *dev)
+{
+       struct mlx5_eq_table *table = &dev->priv.eq_table;
+       struct mlx5_eq *eq, *n;
+
+       spin_lock(&table->lock);
+       list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
+               list_del(&eq->list);
+               spin_unlock(&table->lock);
+               if (mlx5_destroy_unmap_eq(dev, eq))
+                       mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n",
+                                      eq->eqn);
+               kfree(eq);
+               spin_lock(&table->lock);
+       }
+       spin_unlock(&table->lock);
+}
+
+static int alloc_comp_eqs(struct mlx5_core_dev *dev)
+{
+       struct mlx5_eq_table *table = &dev->priv.eq_table;
+       char name[MLX5_MAX_EQ_NAME];
+       struct mlx5_eq *eq;
+       int ncomp_vec;
+       int nent;
+       int err;
+       int i;
+
+       INIT_LIST_HEAD(&table->comp_eqs_list);
+       ncomp_vec = table->num_comp_vectors;
+       nent = MLX5_COMP_EQ_SIZE;
+       for (i = 0; i < ncomp_vec; i++) {
+               eq = kzalloc(sizeof(*eq), GFP_KERNEL);
+               if (!eq) {
+                       err = -ENOMEM;
+                       goto clean;
+               }
+
+               snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
+               err = mlx5_create_map_eq(dev, eq,
+                                        i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
+                                        name, &dev->priv.uuari.uars[0]);
+               if (err) {
+                       kfree(eq);
+                       goto clean;
+               }
+               mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
+               eq->index = i;
+               spin_lock(&table->lock);
+               list_add_tail(&eq->list, &table->comp_eqs_list);
+               spin_unlock(&table->lock);
+       }
+
+       return 0;
+
+clean:
+       free_comp_eqs(dev);
+       return err;
+}
+
 static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
 {
        struct mlx5_priv *priv = &dev->priv;
@@ -645,6 +724,12 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
                goto err_free_uar;
        }
 
+       err = alloc_comp_eqs(dev);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to alloc completion EQs\n");
+               goto err_stop_eqs;
+       }
+
        MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
 
        mlx5_init_cq_table(dev);
@@ -654,6 +739,9 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
 
        return 0;
 
+err_stop_eqs:
+       mlx5_stop_eqs(dev);
+
 err_free_uar:
        mlx5_free_uuars(dev, &priv->uuari);
 
@@ -705,6 +793,7 @@ static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
        mlx5_cleanup_srq_table(dev);
        mlx5_cleanup_qp_table(dev);
        mlx5_cleanup_cq_table(dev);
+       free_comp_eqs(dev);
        mlx5_stop_eqs(dev);
        mlx5_free_uuars(dev, &priv->uuari);
        mlx5_eq_cleanup(dev);
@@ -819,6 +908,28 @@ void mlx5_unregister_interface(struct mlx5_interface *intf)
 }
 EXPORT_SYMBOL(mlx5_unregister_interface);
 
+void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
+{
+       struct mlx5_priv *priv = &mdev->priv;
+       struct mlx5_device_context *dev_ctx;
+       unsigned long flags;
+       void *result = NULL;
+
+       spin_lock_irqsave(&priv->ctx_lock, flags);
+
+       list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
+               if ((dev_ctx->intf->protocol == protocol) &&
+                   dev_ctx->intf->get_dev) {
+                       result = dev_ctx->intf->get_dev(dev_ctx->context);
+                       break;
+               }
+
+       spin_unlock_irqrestore(&priv->ctx_lock, flags);
+
+       return result;
+}
+EXPORT_SYMBOL(mlx5_get_protocol_dev);
+
 static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
                            unsigned long param)
 {
index 44837640bd7ca45c1380d6d3fe1f4bc9ecceee51..d79fd85d1dd50c6e991eb9659d426839e4e013a8 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index f0c9f9a7a36142f1a7fded7a88120e1cff213aaa..a051b906afdf1a3fb8059c9567fe8378b4818cf9 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 184c3615f4799bbda0adc0da0e265cacc6ae8bd6..1adb300dd850691eaafbcf5c5f68212090e585cd 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -141,7 +141,7 @@ EXPORT_SYMBOL(mlx5_core_destroy_mkey);
 int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
                         struct mlx5_query_mkey_mbox_out *out, int outlen)
 {
-       struct mlx5_destroy_mkey_mbox_in in;
+       struct mlx5_query_mkey_mbox_in in;
        int err;
 
        memset(&in, 0, sizeof(in));
index 4fdaae9b54d99f56f21a78706776805303ba5ed9..df2238372ea73a0d71b39450cd816810bbcdd1ad 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -243,8 +243,9 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
        struct page *page;
        u64 addr;
        int err;
+       int nid = dev_to_node(&dev->pdev->dev);
 
-       page = alloc_page(GFP_HIGHUSER);
+       page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
        if (!page) {
                mlx5_core_warn(dev, "failed to allocate page\n");
                return -ENOMEM;
index 790da5c4ca4f4ab0c5b47d036943afdd9d9aafbf..f2d3aee909e8be2051c8d7f8207c50856b93e394 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 72c2d002c3b8f65e78b89733c87c44d3ba702089..49e90f2612d8c0b8803e635acfb4c92b51044c8a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 575d853dbe05d1e9e8e3255761a673ff72dc5c30..dc7dbf7e9d98f28d83d55b6632ec44fe4275d9ce 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 38bce93f8314f202145277ed749d09f07f352dfb..f9d25dcd03c1e2616be6434cae3a9146d1e83df5 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 06801d6f595ef99f0d18cc91d37dcac62be445db..5a89bb1d678a8e5ae6002a6ec9122bbd97d19085 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 092dcae0d4a969523e7cd99dfd3380398c3e7c8c..1e0f72b65459027059cd85a3cece83ae6c24a1be 100644 (file)
@@ -2520,7 +2520,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
                        DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
                                  ring->dev->name);
                        if (first_rxdp) {
-                               wmb();
+                               dma_wmb();
                                first_rxdp->Control_1 |= RXD_OWN_XENA;
                        }
                        swstats->mem_alloc_fail_cnt++;
@@ -2634,7 +2634,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
                rxdp->Control_2 |= SET_RXD_MARKER;
                if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
                        if (first_rxdp) {
-                               wmb();
+                               dma_wmb();
                                first_rxdp->Control_1 |= RXD_OWN_XENA;
                        }
                        first_rxdp = rxdp;
@@ -2649,7 +2649,7 @@ end:
         * and other fields are seen by adapter correctly.
         */
        if (first_rxdp) {
-               wmb();
+               dma_wmb();
                first_rxdp->Control_1 |= RXD_OWN_XENA;
        }
 
@@ -6950,7 +6950,7 @@ static  int rxd_owner_bit_reset(struct s2io_nic *sp)
                                }
 
                                set_rxd_buffer_size(sp, rxdp, size);
-                               wmb();
+                               dma_wmb();
                                /* flip the Ownership bit to Hardware */
                                rxdp->Control_1 |= RXD_OWN_XENA;
                        }
index b07d552a27d4d486079b329b238235411a103e4e..be916eb2f2e7304dbbfa35d8df22b618298ca3bc 100644 (file)
 
 #include "vxge-ethtool.h"
 
+static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
+       {"\n DRIVER STATISTICS"},
+       {"vpaths_opened"},
+       {"vpath_open_fail_cnt"},
+       {"link_up_cnt"},
+       {"link_down_cnt"},
+       {"tx_frms"},
+       {"tx_errors"},
+       {"tx_bytes"},
+       {"txd_not_free"},
+       {"txd_out_of_desc"},
+       {"rx_frms"},
+       {"rx_errors"},
+       {"rx_bytes"},
+       {"rx_mcast"},
+       {"pci_map_fail_cnt"},
+       {"skb_alloc_fail_cnt"}
+};
+
 /**
  * vxge_ethtool_sset - Sets different link parameters.
  * @dev: device pointer.
index 6cf3044d7f438283d6f6455aba047322a151544d..065a2c0429a404aabfc0f8205c2ec7c13ca26d84 100644 (file)
 /* Ethtool related variables and Macros. */
 static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset);
 
-static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
-       {"\n DRIVER STATISTICS"},
-       {"vpaths_opened"},
-       {"vpath_open_fail_cnt"},
-       {"link_up_cnt"},
-       {"link_down_cnt"},
-       {"tx_frms"},
-       {"tx_errors"},
-       {"tx_bytes"},
-       {"txd_not_free"},
-       {"txd_out_of_desc"},
-       {"rx_frms"},
-       {"rx_errors"},
-       {"rx_bytes"},
-       {"rx_mcast"},
-       {"pci_map_fail_cnt"},
-       {"skb_alloc_fail_cnt"}
-};
-
 #define VXGE_TITLE_LEN                 5
 #define VXGE_HW_VPATH_STATS_LEN        27
 #define VXGE_HW_AGGR_STATS_LEN         13
index c9558e6d57ad6765dd385dfc8c47c2474dc08403..a87b177bd7234a2610b7183a0a221241575a35b1 100644 (file)
@@ -4937,10 +4937,16 @@ static int rocker_port_master_changed(struct net_device *dev)
        struct net_device *master = netdev_master_upper_dev_get(dev);
        int err = 0;
 
+       /* There are currently three cases handled here:
+        * 1. Joining a bridge
+        * 2. Leaving a previously joined bridge
+        * 3. Other, e.g. being added to or removed from a bond or openvswitch,
+        *    in which case nothing is done
+        */
        if (master && master->rtnl_link_ops &&
            !strcmp(master->rtnl_link_ops->kind, "bridge"))
                err = rocker_port_bridge_join(rocker_port, master);
-       else
+       else if (rocker_port_is_bridged(rocker_port))
                err = rocker_port_bridge_leave(rocker_port);
 
        return err;
index c0a39198337268d88b8996f9607794c8a3f4f14b..2ac9552d1fa385953e261ff3797c74b8d5ad4add 100644 (file)
@@ -97,6 +97,7 @@ struct stmmac_priv {
        int wolopts;
        int wol_irq;
        struct clk *stmmac_clk;
+       struct clk *pclk;
        struct reset_control *stmmac_rst;
        int clk_csr;
        struct timer_list eee_ctrl_timer;
index 5336594abed1c373259b67f1a6cb8ef1866fc92d..06103cad7c77cb4e6c530752b70b808b5e993984 100644 (file)
@@ -2849,6 +2849,16 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
        }
        clk_prepare_enable(priv->stmmac_clk);
 
+       priv->pclk = devm_clk_get(priv->device, "pclk");
+       if (IS_ERR(priv->pclk)) {
+               if (PTR_ERR(priv->pclk) == -EPROBE_DEFER) {
+                       ret = -EPROBE_DEFER;
+                       goto error_pclk_get;
+               }
+               priv->pclk = NULL;
+       }
+       clk_prepare_enable(priv->pclk);
+
        priv->stmmac_rst = devm_reset_control_get(priv->device,
                                                  STMMAC_RESOURCE_NAME);
        if (IS_ERR(priv->stmmac_rst)) {
@@ -2934,6 +2944,8 @@ error_mdio_register:
 error_netdev_register:
        netif_napi_del(&priv->napi);
 error_hw_init:
+       clk_disable_unprepare(priv->pclk);
+error_pclk_get:
        clk_disable_unprepare(priv->stmmac_clk);
 error_clk_get:
        free_netdev(ndev);
@@ -2965,6 +2977,7 @@ int stmmac_dvr_remove(struct net_device *ndev)
        unregister_netdev(ndev);
        if (priv->stmmac_rst)
                reset_control_assert(priv->stmmac_rst);
+       clk_disable_unprepare(priv->pclk);
        clk_disable_unprepare(priv->stmmac_clk);
        free_netdev(ndev);
 
@@ -3011,6 +3024,7 @@ int stmmac_suspend(struct net_device *ndev)
                stmmac_set_mac(priv->ioaddr, false);
                pinctrl_pm_select_sleep_state(priv->device);
                /* Disable clock in case of PWM is off */
+               clk_disable(priv->pclk);
                clk_disable(priv->stmmac_clk);
        }
        spin_unlock_irqrestore(&priv->lock, flags);
@@ -3051,6 +3065,7 @@ int stmmac_resume(struct net_device *ndev)
                pinctrl_pm_select_default_state(priv->device);
                /* enable the clk prevously disabled */
                clk_enable(priv->stmmac_clk);
+               clk_enable(priv->pclk);
                /* reset the phy so that it's ready */
                if (priv->mii)
                        stmmac_mdio_reset(priv->mii);
index 74e9b148378c1b1e1551976f1bfe4aa7c7920abe..e23a642357e7c01d7010ae070b47632b3a926ba9 100644 (file)
@@ -718,7 +718,7 @@ static __inline__ void gem_post_rxds(struct gem *gp, int limit)
        cluster_start = curr = (gp->rx_new & ~(4 - 1));
        count = 0;
        kick = -1;
-       wmb();
+       dma_wmb();
        while (curr != limit) {
                curr = NEXT_RX(curr);
                if (++count == 4) {
@@ -1038,7 +1038,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
                if (gem_intme(entry))
                        ctrl |= TXDCTRL_INTME;
                txd->buffer = cpu_to_le64(mapping);
-               wmb();
+               dma_wmb();
                txd->control_word = cpu_to_le64(ctrl);
                entry = NEXT_TX(entry);
        } else {
@@ -1076,7 +1076,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
 
                        txd = &gp->init_block->txd[entry];
                        txd->buffer = cpu_to_le64(mapping);
-                       wmb();
+                       dma_wmb();
                        txd->control_word = cpu_to_le64(this_ctrl | len);
 
                        if (gem_intme(entry))
@@ -1086,7 +1086,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
                }
                txd = &gp->init_block->txd[first_entry];
                txd->buffer = cpu_to_le64(first_mapping);
-               wmb();
+               dma_wmb();
                txd->control_word =
                        cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len);
        }
@@ -1585,7 +1585,7 @@ static void gem_clean_rings(struct gem *gp)
                        gp->rx_skbs[i] = NULL;
                }
                rxd->status_word = 0;
-               wmb();
+               dma_wmb();
                rxd->buffer = 0;
        }
 
@@ -1647,7 +1647,7 @@ static void gem_init_rings(struct gem *gp)
                                        RX_BUF_ALLOC_SIZE(gp),
                                        PCI_DMA_FROMDEVICE);
                rxd->buffer = cpu_to_le64(dma_addr);
-               wmb();
+               dma_wmb();
                rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
                skb_reserve(skb, RX_OFFSET);
        }
@@ -1656,7 +1656,7 @@ static void gem_init_rings(struct gem *gp)
                struct gem_txd *txd = &gb->txd[i];
 
                txd->control_word = 0;
-               wmb();
+               dma_wmb();
                txd->buffer = 0;
        }
        wmb();
index 7a8ca2c7b7df3b3d4c82828d5c820f7ed2a6f996..cf4dcff051d5b9d3fb3edf20552501cc44c72a5b 100644 (file)
@@ -196,14 +196,14 @@ static u32 sbus_hme_read32(void __iomem *reg)
 static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
 {
        rxd->rx_addr = (__force hme32)addr;
-       wmb();
+       dma_wmb();
        rxd->rx_flags = (__force hme32)flags;
 }
 
 static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
 {
        txd->tx_addr = (__force hme32)addr;
-       wmb();
+       dma_wmb();
        txd->tx_flags = (__force hme32)flags;
 }
 
@@ -225,14 +225,14 @@ static u32 pci_hme_read32(void __iomem *reg)
 static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
 {
        rxd->rx_addr = (__force hme32)cpu_to_le32(addr);
-       wmb();
+       dma_wmb();
        rxd->rx_flags = (__force hme32)cpu_to_le32(flags);
 }
 
 static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
 {
        txd->tx_addr = (__force hme32)cpu_to_le32(addr);
-       wmb();
+       dma_wmb();
        txd->tx_flags = (__force hme32)cpu_to_le32(flags);
 }
 
@@ -268,12 +268,12 @@ static u32 pci_hme_read_desc32(hme32 *p)
        sbus_readl(__reg)
 #define hme_write_rxd(__hp, __rxd, __flags, __addr) \
 do {   (__rxd)->rx_addr = (__force hme32)(u32)(__addr); \
-       wmb(); \
+       dma_wmb(); \
        (__rxd)->rx_flags = (__force hme32)(u32)(__flags); \
 } while(0)
 #define hme_write_txd(__hp, __txd, __flags, __addr) \
 do {   (__txd)->tx_addr = (__force hme32)(u32)(__addr); \
-       wmb(); \
+       dma_wmb(); \
        (__txd)->tx_flags = (__force hme32)(u32)(__flags); \
 } while(0)
 #define hme_read_desc32(__hp, __p)     ((__force u32)(hme32)*(__p))
@@ -293,12 +293,12 @@ do {      (__txd)->tx_addr = (__force hme32)(u32)(__addr); \
        readl(__reg)
 #define hme_write_rxd(__hp, __rxd, __flags, __addr) \
 do {   (__rxd)->rx_addr = (__force hme32)cpu_to_le32(__addr); \
-       wmb(); \
+       dma_wmb(); \
        (__rxd)->rx_flags = (__force hme32)cpu_to_le32(__flags); \
 } while(0)
 #define hme_write_txd(__hp, __txd, __flags, __addr) \
 do {   (__txd)->tx_addr = (__force hme32)cpu_to_le32(__addr); \
-       wmb(); \
+       dma_wmb(); \
        (__txd)->tx_flags = (__force hme32)cpu_to_le32(__flags); \
 } while(0)
 static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p)
index 401abf7254d33bfb0e81e12dc1eeaf4abbf32b51..53fe200e0b7949b810071c23a08af49167ccdf63 100644 (file)
@@ -519,7 +519,7 @@ static int vnet_walk_rx_one(struct vnet_port *port,
        if (desc->hdr.state != VIO_DESC_READY)
                return 1;
 
-       rmb();
+       dma_rmb();
 
        viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
               desc->hdr.state, desc->hdr.ack,
@@ -1380,7 +1380,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        /* This has to be a non-SMP write barrier because we are writing
         * to memory which is shared with the peer LDOM.
         */
-       wmb();
+       dma_wmb();
 
        d->hdr.state = VIO_DESC_READY;
 
@@ -1395,7 +1395,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
         * is marked READY, but start_cons was false.
         * If so, vnet_ack() should send out the missed "start" trigger.
         *
-        * Note that the wmb() above makes sure the cookies et al. are
+        * Note that the dma_wmb() above makes sure the cookies et al. are
         * not globally visible before the VIO_DESC_READY, and that the
         * stores are ordered correctly by the compiler. The consumer will
         * not proceed until the VIO_DESC_READY is visible assuring that
index 309adee6e7910db99cfe2cd08d332c24033c1d15..f0b8b3e0ed7cdf8387d010b4967ca9b172e2c011 100644 (file)
@@ -130,7 +130,6 @@ struct hv_netvsc_packet {
        u32 status;
        bool part_of_skb;
 
-       struct hv_device *device;
        bool is_data_pkt;
        bool xmit_more; /* from skb */
        u16 vlan_tci;
@@ -189,6 +188,7 @@ int netvsc_send(struct hv_device *device,
                struct hv_netvsc_packet *packet);
 void netvsc_linkstatus_callback(struct hv_device *device_obj,
                                struct rndis_message *resp);
+void netvsc_xmit_completion(void *context);
 int netvsc_recv_callback(struct hv_device *device_obj,
                        struct hv_netvsc_packet *packet,
                        struct ndis_tcp_ip_checksum_info *csum_info);
@@ -959,6 +959,10 @@ struct ndis_tcp_lso_info {
 #define NDIS_HASH_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
                sizeof(u32))
 
+/* Total size of all PPI data */
+#define NDIS_ALL_PPI_SIZE (NDIS_VLAN_PPI_SIZE + NDIS_CSUM_PPI_SIZE + \
+                          NDIS_LSO_PPI_SIZE + NDIS_HASH_PPI_SIZE)
+
 /* Format of Information buffer passed in a SetRequest for the OID */
 /* OID_GEN_RNDIS_CONFIG_PARAMETER. */
 struct rndis_config_parameter_info {
@@ -1171,6 +1175,8 @@ struct rndis_message {
 #define RNDIS_HEADER_SIZE      (sizeof(struct rndis_message) - \
                                 sizeof(union rndis_message_container))
 
+#define RNDIS_AND_PPI_SIZE (sizeof(struct rndis_message) + NDIS_ALL_PPI_SIZE)
+
 #define NDIS_PACKET_TYPE_DIRECTED      0x00000001
 #define NDIS_PACKET_TYPE_MULTICAST     0x00000002
 #define NDIS_PACKET_TYPE_ALL_MULTICAST 0x00000004
index f69923695b5b1b6fbb886f523203ba86656ac5f6..4d4d497d5762896d037f7c8e447451f24dac824e 100644 (file)
@@ -878,7 +878,9 @@ int netvsc_send(struct hv_device *device,
                packet->send_buf_index = section_index;
                packet->total_data_buflen += msd_len;
 
-               kfree(msdp->pkt);
+               if (msdp->pkt)
+                       netvsc_xmit_completion(msdp->pkt);
+
                if (packet->xmit_more) {
                        msdp->pkt = packet;
                        msdp->count++;
@@ -902,7 +904,7 @@ int netvsc_send(struct hv_device *device,
                if (m_ret != 0) {
                        netvsc_free_send_slot(net_device,
                                              msd_send->send_buf_index);
-                       kfree(msd_send);
+                       netvsc_xmit_completion(msd_send);
                }
        }
 
@@ -1011,7 +1013,6 @@ static void netvsc_receive(struct netvsc_device *net_device,
        }
 
        count = vmxferpage_packet->range_cnt;
-       netvsc_packet->device = device;
        netvsc_packet->channel = channel;
 
        /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
index f9db6bc513e954b0831d881baae5048ad7cc9f87..448716787e73c237f5de26f072ce03a28ab4986e 100644 (file)
@@ -229,7 +229,7 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
        return q_idx;
 }
 
-static void netvsc_xmit_completion(void *context)
+void netvsc_xmit_completion(void *context)
 {
        struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
        struct sk_buff *skb = (struct sk_buff *)
@@ -370,50 +370,60 @@ not_ip:
 static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
 {
        struct net_device_context *net_device_ctx = netdev_priv(net);
-       struct hv_netvsc_packet *packet;
+       struct hv_netvsc_packet *packet = NULL;
        int ret;
        unsigned int num_data_pgs;
        struct rndis_message *rndis_msg;
        struct rndis_packet *rndis_pkt;
        u32 rndis_msg_size;
        bool isvlan;
+       bool linear = false;
        struct rndis_per_packet_info *ppi;
        struct ndis_tcp_ip_checksum_info *csum_info;
        struct ndis_tcp_lso_info *lso_info;
        int  hdr_offset;
        u32 net_trans_info;
        u32 hash;
-       u32 skb_length = skb->len;
-       u32 head_room = skb_headroom(skb);
+       u32 skb_length;
+       u32 head_room;
        u32 pkt_sz;
        struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
 
 
        /* We will atmost need two pages to describe the rndis
         * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
-        * of pages in a single packet.
+        * of pages in a single packet. If skb is scattered around
+        * more pages we try linearizing it.
         */
+
+check_size:
+       skb_length = skb->len;
+       head_room = skb_headroom(skb);
        num_data_pgs = netvsc_get_slots(skb) + 2;
-       if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
-               netdev_err(net, "Packet too big: %u\n", skb->len);
-               dev_kfree_skb(skb);
-               net->stats.tx_dropped++;
-               return NETDEV_TX_OK;
+       if (num_data_pgs > MAX_PAGE_BUFFER_COUNT && linear) {
+               net_alert_ratelimited("packet too big: %u pages (%u bytes)\n",
+                                     num_data_pgs, skb->len);
+               ret = -EFAULT;
+               goto drop;
+       } else if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
+               if (skb_linearize(skb)) {
+                       net_alert_ratelimited("failed to linearize skb\n");
+                       ret = -ENOMEM;
+                       goto drop;
+               }
+               linear = true;
+               goto check_size;
        }
 
-       pkt_sz = sizeof(struct hv_netvsc_packet) +
-                       sizeof(struct rndis_message) +
-                       NDIS_VLAN_PPI_SIZE + NDIS_CSUM_PPI_SIZE +
-                       NDIS_LSO_PPI_SIZE + NDIS_HASH_PPI_SIZE;
+       pkt_sz = sizeof(struct hv_netvsc_packet) + RNDIS_AND_PPI_SIZE;
 
        if (head_room < pkt_sz) {
                packet = kmalloc(pkt_sz, GFP_ATOMIC);
                if (!packet) {
                        /* out of memory, drop packet */
                        netdev_err(net, "unable to alloc hv_netvsc_packet\n");
-                       dev_kfree_skb(skb);
-                       net->stats.tx_dropped++;
-                       return NETDEV_TX_OK;
+                       ret = -ENOMEM;
+                       goto drop;
                }
                packet->part_of_skb = false;
        } else {
@@ -436,11 +446,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
        packet->rndis_msg = (struct rndis_message *)((unsigned long)packet +
                                sizeof(struct hv_netvsc_packet));
 
-       memset(packet->rndis_msg, 0, sizeof(struct rndis_message) +
-                                       NDIS_VLAN_PPI_SIZE +
-                                       NDIS_CSUM_PPI_SIZE +
-                                       NDIS_LSO_PPI_SIZE +
-                                       NDIS_HASH_PPI_SIZE);
+       memset(packet->rndis_msg, 0, RNDIS_AND_PPI_SIZE);
 
        /* Set the completion routine */
        packet->send_completion = netvsc_xmit_completion;
@@ -581,7 +587,7 @@ drop:
                net->stats.tx_bytes += skb_length;
                net->stats.tx_packets++;
        } else {
-               if (!packet->part_of_skb)
+               if (packet && !packet->part_of_skb)
                        kfree(packet);
                if (ret != -EAGAIN) {
                        dev_kfree_skb_any(skb);
@@ -872,9 +878,7 @@ static int netvsc_probe(struct hv_device *dev,
                return -ENOMEM;
 
        max_needed_headroom = sizeof(struct hv_netvsc_packet) +
-                               sizeof(struct rndis_message) +
-                               NDIS_VLAN_PPI_SIZE + NDIS_CSUM_PPI_SIZE +
-                               NDIS_LSO_PPI_SIZE + NDIS_HASH_PPI_SIZE;
+                             RNDIS_AND_PPI_SIZE;
 
        netif_carrier_off(net);
 
index a1604376aee1a1d9fb420e77e4d6791fc4719fa1..0d92efefd796c9b631beeca1b35ad9979cb36d2f 100644 (file)
@@ -47,8 +47,6 @@ struct rndis_request {
 
        /* Simplify allocation by having a netvsc packet inline */
        struct hv_netvsc_packet pkt;
-       /* Set 2 pages for rndis requests crossing page boundary */
-       struct hv_page_buffer buf[2];
 
        struct rndis_message request_msg;
        /*
index 5ad46f7f514f1332df5f42f6f7546aeef313da30..38026650c0387ecb101d085e24273bf24ded2783 100644 (file)
@@ -1173,7 +1173,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
 }
 
 static int
-at86rf230_set_txpower(struct ieee802154_hw *hw, int db)
+at86rf230_set_txpower(struct ieee802154_hw *hw, s8 db)
 {
        struct at86rf230_local *lp = hw->priv;
 
index 924ea98bd5311b1c7197a9fe4e9b475b095069ce..54549a6223dd2f47f493ae112a06f12f3337120f 100644 (file)
@@ -114,7 +114,9 @@ unsigned int ipvlan_mac_hash(const unsigned char *addr);
 rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb);
 int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev);
 void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr);
-bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6);
+struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
+                                  const void *iaddr, bool is_v6);
+bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6);
 struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
                                        const void *iaddr, bool is_v6);
 void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync);
index 2a175006028b347efe1fbe6d145f311f78c80bd7..c30b5c300c05f6fdd91e48802c84b77103e3a322 100644 (file)
@@ -81,19 +81,20 @@ void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
        hash = (addr->atype == IPVL_IPV6) ?
               ipvlan_get_v6_hash(&addr->ip6addr) :
               ipvlan_get_v4_hash(&addr->ip4addr);
-       hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
+       if (hlist_unhashed(&addr->hlnode))
+               hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
 }
 
 void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync)
 {
-       hlist_del_rcu(&addr->hlnode);
+       hlist_del_init_rcu(&addr->hlnode);
        if (sync)
                synchronize_rcu();
 }
 
-bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
+struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
+                                  const void *iaddr, bool is_v6)
 {
-       struct ipvl_port *port = ipvlan->port;
        struct ipvl_addr *addr;
 
        list_for_each_entry(addr, &ipvlan->addrs, anode) {
@@ -101,12 +102,21 @@ bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
                    ipv6_addr_equal(&addr->ip6addr, iaddr)) ||
                    (!is_v6 && addr->atype == IPVL_IPV4 &&
                    addr->ip4addr.s_addr == ((struct in_addr *)iaddr)->s_addr))
-                       return true;
+                       return addr;
        }
+       return NULL;
+}
+
+bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
+{
+       struct ipvl_dev *ipvlan;
 
-       if (ipvlan_ht_addr_lookup(port, iaddr, is_v6))
-               return true;
+       ASSERT_RTNL();
 
+       list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
+               if (ipvlan_find_addr(ipvlan, iaddr, is_v6))
+                       return true;
+       }
        return false;
 }
 
@@ -192,7 +202,8 @@ static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb,
        if (skb->protocol == htons(ETH_P_PAUSE))
                return;
 
-       list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
+       rcu_read_lock();
+       list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
                if (local && (ipvlan == in_dev))
                        continue;
 
@@ -219,6 +230,7 @@ static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb,
 mcast_acct:
                ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
        }
+       rcu_read_unlock();
 
        /* Locally generated? ...Forward a copy to the main-device as
         * well. On the RX side we'll ignore it (wont give it to any
@@ -330,7 +342,7 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
        struct rtable *rt;
        int err, ret = NET_XMIT_DROP;
        struct flowi4 fl4 = {
-               .flowi4_oif = dev->iflink,
+               .flowi4_oif = dev_get_iflink(dev),
                .flowi4_tos = RT_TOS(ip4h->tos),
                .flowi4_flags = FLOWI_FLAG_ANYSRC,
                .daddr = ip4h->daddr,
index 2950c3780230596270aa64d0ebac69f7c1bd3cbd..77b92a0fe557ade8fea66af377217e9c0f8feded 100644 (file)
@@ -114,7 +114,6 @@ static int ipvlan_init(struct net_device *dev)
        dev->features = phy_dev->features & IPVLAN_FEATURES;
        dev->features |= NETIF_F_LLTX;
        dev->gso_max_size = phy_dev->gso_max_size;
-       dev->iflink = phy_dev->ifindex;
        dev->hard_header_len = phy_dev->hard_header_len;
 
        ipvlan_set_lockdep_class(dev);
@@ -305,6 +304,13 @@ static int ipvlan_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
        return 0;
 }
 
+static int ipvlan_get_iflink(const struct net_device *dev)
+{
+       struct ipvl_dev *ipvlan = netdev_priv(dev);
+
+       return ipvlan->phy_dev->ifindex;
+}
+
 static const struct net_device_ops ipvlan_netdev_ops = {
        .ndo_init               = ipvlan_init,
        .ndo_uninit             = ipvlan_uninit,
@@ -317,6 +323,7 @@ static const struct net_device_ops ipvlan_netdev_ops = {
        .ndo_get_stats64        = ipvlan_get_stats64,
        .ndo_vlan_rx_add_vid    = ipvlan_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = ipvlan_vlan_rx_kill_vid,
+       .ndo_get_iflink         = ipvlan_get_iflink,
 };
 
 static int ipvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
@@ -504,7 +511,7 @@ static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
        if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
                list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
                        ipvlan_ht_addr_del(addr, !dev->dismantle);
-                       list_del_rcu(&addr->anode);
+                       list_del(&addr->anode);
                }
        }
        list_del_rcu(&ipvlan->pnode);
@@ -606,7 +613,7 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
 {
        struct ipvl_addr *addr;
 
-       if (ipvlan_addr_busy(ipvlan, ip6_addr, true)) {
+       if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true)) {
                netif_err(ipvlan, ifup, ipvlan->dev,
                          "Failed to add IPv6=%pI6c addr for %s intf\n",
                          ip6_addr, ipvlan->dev->name);
@@ -619,9 +626,13 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
        addr->master = ipvlan;
        memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr));
        addr->atype = IPVL_IPV6;
-       list_add_tail_rcu(&addr->anode, &ipvlan->addrs);
+       list_add_tail(&addr->anode, &ipvlan->addrs);
        ipvlan->ipv6cnt++;
-       ipvlan_ht_addr_add(ipvlan, addr);
+       /* If the interface is not up, the address will be added to the hash
+        * list by ipvlan_open.
+        */
+       if (netif_running(ipvlan->dev))
+               ipvlan_ht_addr_add(ipvlan, addr);
 
        return 0;
 }
@@ -630,12 +641,12 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
 {
        struct ipvl_addr *addr;
 
-       addr = ipvlan_ht_addr_lookup(ipvlan->port, ip6_addr, true);
+       addr = ipvlan_find_addr(ipvlan, ip6_addr, true);
        if (!addr)
                return;
 
        ipvlan_ht_addr_del(addr, true);
-       list_del_rcu(&addr->anode);
+       list_del(&addr->anode);
        ipvlan->ipv6cnt--;
        WARN_ON(ipvlan->ipv6cnt < 0);
        kfree_rcu(addr, rcu);
@@ -674,7 +685,7 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
 {
        struct ipvl_addr *addr;
 
-       if (ipvlan_addr_busy(ipvlan, ip4_addr, false)) {
+       if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false)) {
                netif_err(ipvlan, ifup, ipvlan->dev,
                          "Failed to add IPv4=%pI4 on %s intf.\n",
                          ip4_addr, ipvlan->dev->name);
@@ -687,9 +698,13 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
        addr->master = ipvlan;
        memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr));
        addr->atype = IPVL_IPV4;
-       list_add_tail_rcu(&addr->anode, &ipvlan->addrs);
+       list_add_tail(&addr->anode, &ipvlan->addrs);
        ipvlan->ipv4cnt++;
-       ipvlan_ht_addr_add(ipvlan, addr);
+       /* If the interface is not up, the address will be added to the hash
+        * list by ipvlan_open.
+        */
+       if (netif_running(ipvlan->dev))
+               ipvlan_ht_addr_add(ipvlan, addr);
        ipvlan_set_broadcast_mac_filter(ipvlan, true);
 
        return 0;
@@ -699,12 +714,12 @@ static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
 {
        struct ipvl_addr *addr;
 
-       addr = ipvlan_ht_addr_lookup(ipvlan->port, ip4_addr, false);
+       addr = ipvlan_find_addr(ipvlan, ip4_addr, false);
        if (!addr)
                return;
 
        ipvlan_ht_addr_del(addr, true);
-       list_del_rcu(&addr->anode);
+       list_del(&addr->anode);
        ipvlan->ipv4cnt--;
        WARN_ON(ipvlan->ipv4cnt < 0);
        if (!ipvlan->ipv4cnt)
index b5e3320ca50620eafbaf91c985451cf5eadad6fc..b227a13f6473404a5082a0a99d4e7067b3daeaf7 100644 (file)
@@ -786,7 +786,6 @@ static int macvlan_init(struct net_device *dev)
        dev->hw_features        |= NETIF_F_LRO;
        dev->vlan_features      = lowerdev->vlan_features & MACVLAN_FEATURES;
        dev->gso_max_size       = lowerdev->gso_max_size;
-       dev->iflink             = lowerdev->ifindex;
        dev->hard_header_len    = lowerdev->hard_header_len;
 
        macvlan_set_lockdep_class(dev);
@@ -995,6 +994,13 @@ static void macvlan_dev_netpoll_cleanup(struct net_device *dev)
 }
 #endif /* CONFIG_NET_POLL_CONTROLLER */
 
+static int macvlan_dev_get_iflink(const struct net_device *dev)
+{
+       struct macvlan_dev *vlan = netdev_priv(dev);
+
+       return vlan->lowerdev->ifindex;
+}
+
 static const struct ethtool_ops macvlan_ethtool_ops = {
        .get_link               = ethtool_op_get_link,
        .get_settings           = macvlan_ethtool_get_settings,
@@ -1025,6 +1031,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
        .ndo_netpoll_setup      = macvlan_dev_netpoll_setup,
        .ndo_netpoll_cleanup    = macvlan_dev_netpoll_cleanup,
 #endif
+       .ndo_get_iflink         = macvlan_dev_get_iflink,
 };
 
 void macvlan_common_setup(struct net_device *dev)
index f80e19ac67041a0e75778761373ec3ce2a982d95..fabf11d32d276d9ac6ef6ac50ccd24609a48f90f 100644 (file)
@@ -192,16 +192,17 @@ static int at803x_probe(struct phy_device *phydev)
 {
        struct device *dev = &phydev->dev;
        struct at803x_priv *priv;
+       struct gpio_desc *gpiod_reset;
 
        priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
        if (!priv)
                return -ENOMEM;
 
-       priv->gpiod_reset = devm_gpiod_get(dev, "reset");
-       if (IS_ERR(priv->gpiod_reset))
-               priv->gpiod_reset = NULL;
-       else
-               gpiod_direction_output(priv->gpiod_reset, 1);
+       gpiod_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+       if (IS_ERR(gpiod_reset))
+               return PTR_ERR(gpiod_reset);
+
+       priv->gpiod_reset = gpiod_reset;
 
        phydev->priv = priv;
 
index a08a3c78ba97b08fb503bace451a3d177fc04053..1960b46add65b3b89f122cc401c872050cebdbe4 100644 (file)
@@ -183,6 +183,35 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
 }
 EXPORT_SYMBOL_GPL(fixed_phy_set_link_update);
 
+int fixed_phy_update_state(struct phy_device *phydev,
+                          const struct fixed_phy_status *status,
+                          const struct fixed_phy_status *changed)
+{
+       struct fixed_mdio_bus *fmb = &platform_fmb;
+       struct fixed_phy *fp;
+
+       if (!phydev || !phydev->bus)
+               return -EINVAL;
+
+       list_for_each_entry(fp, &fmb->phys, node) {
+               if (fp->addr == phydev->addr) {
+#define _UPD(x) if (changed->x) \
+       fp->status.x = status->x
+                       _UPD(link);
+                       _UPD(speed);
+                       _UPD(duplex);
+                       _UPD(pause);
+                       _UPD(asym_pause);
+#undef _UPD
+                       fixed_phy_update_regs(fp);
+                       return 0;
+               }
+       }
+
+       return -ENOENT;
+}
+EXPORT_SYMBOL(fixed_phy_update_state);
+
 int fixed_phy_add(unsigned int irq, int phy_addr,
                  struct fixed_phy_status *status)
 {
index 724a9b50df7a44770ed079081a61caeaa91f5db1..75d6f26729a30e34cdaaf8334a9cc0aaa2a01c82 100644 (file)
@@ -189,7 +189,7 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
                skb_put(skb, sizeof(padbytes));
        }
 
-       usbnet_set_skb_tx_stats(skb, 1);
+       usbnet_set_skb_tx_stats(skb, 1, 0);
        return skb;
 }
 
index 9311a08565bed17cf5082a21b9c6dca7ab02dfe1..4545e78840b0d9dc80ae4392b36cfcf588f17c3a 100644 (file)
@@ -522,6 +522,7 @@ static const struct driver_info wwan_info = {
 #define DELL_VENDOR_ID         0x413C
 #define REALTEK_VENDOR_ID      0x0bda
 #define SAMSUNG_VENDOR_ID      0x04e8
+#define LENOVO_VENDOR_ID       0x17ef
 
 static const struct usb_device_id      products[] = {
 /* BLACKLIST !!
@@ -702,6 +703,13 @@ static const struct usb_device_id  products[] = {
        .driver_info = 0,
 },
 
+/* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
+{
+       USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM,
+                       USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+       .driver_info = 0,
+},
+
 /* WHITELIST!!!
  *
  * CDC Ether uses two interfaces, not necessarily consecutive.
index 70cbea551139efaf5806a03b93aa207be07a20d1..c3e4da9e79ca071a06082e965a3aec5bb206a77e 100644 (file)
@@ -1177,13 +1177,12 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
        ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload;
        ctx->tx_ntbs++;
 
-       /* usbnet has already counted all the framing overhead.
+       /* usbnet will count all the framing overhead by default.
         * Adjust the stats so that the tx_bytes counter show real
         * payload data instead.
         */
-       dev->net->stats.tx_bytes -= skb_out->len - ctx->tx_curr_frame_payload;
-
-       usbnet_set_skb_tx_stats(skb_out, n);
+       usbnet_set_skb_tx_stats(skb_out, n,
+                               ctx->tx_curr_frame_payload - skb_out->len);
 
        return skb_out;
 
index 5065538dd03bd35497365b2d504fa640cfd684ec..ac4d03b328b130ab918175b1fa5c8fe55a0cbc7b 100644 (file)
@@ -493,6 +493,7 @@ enum rtl8152_flags {
 /* Define these values to match your device */
 #define VENDOR_ID_REALTEK              0x0bda
 #define VENDOR_ID_SAMSUNG              0x04e8
+#define VENDOR_ID_LENOVO               0x17ef
 
 #define MCU_TYPE_PLA                   0x0100
 #define MCU_TYPE_USB                   0x0000
@@ -4114,6 +4115,7 @@ static struct usb_device_id rtl8152_table[] = {
        {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)},
        {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
        {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
+       {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x7205)},
        {}
 };
 
index 7650cdc8fe6b0c708b13ecff2650609800c96f3a..953de13267df19d6fcefe60c2008f6222ca5775c 100644 (file)
@@ -144,7 +144,7 @@ static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
                skb_put(skb, sizeof(padbytes));
        }
 
-       usbnet_set_skb_tx_stats(skb, 1);
+       usbnet_set_skb_tx_stats(skb, 1, 0);
        return skb;
 }
 
index 0f3ff285f6a163f97a5254741e85851fba2f3a87..777757ae19732ab10ab2283645464a87a02c7b20 100644 (file)
@@ -1346,9 +1346,19 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
                } else
                        urb->transfer_flags |= URB_ZERO_PACKET;
        }
-       entry->length = urb->transfer_buffer_length = length;
-       if (!(info->flags & FLAG_MULTI_PACKET))
-               usbnet_set_skb_tx_stats(skb, 1);
+       urb->transfer_buffer_length = length;
+
+       if (info->flags & FLAG_MULTI_PACKET) {
+               /* Driver has set number of packets and a length delta.
+                * Calculate the complete length and ensure that it's
+                * positive.
+                */
+               entry->length += length;
+               if (WARN_ON_ONCE(entry->length <= 0))
+                       entry->length = length;
+       } else {
+               usbnet_set_skb_tx_stats(skb, 1, length);
+       }
 
        spin_lock_irqsave(&dev->txq.lock, flags);
        retval = usb_autopm_get_interface_async(dev->intf);
index 4cca36ebc4fb194a22af440f11047ecc0ce7fd1e..c8186ffda1a314f08cb71b6688c5b120e57c1717 100644 (file)
@@ -263,6 +263,20 @@ static void veth_poll_controller(struct net_device *dev)
 }
 #endif /* CONFIG_NET_POLL_CONTROLLER */
 
+static int veth_get_iflink(const struct net_device *dev)
+{
+       struct veth_priv *priv = netdev_priv(dev);
+       struct net_device *peer;
+       int iflink;
+
+       rcu_read_lock();
+       peer = rcu_dereference(priv->peer);
+       iflink = peer ? peer->ifindex : 0;
+       rcu_read_unlock();
+
+       return iflink;
+}
+
 static const struct net_device_ops veth_netdev_ops = {
        .ndo_init            = veth_dev_init,
        .ndo_open            = veth_open,
@@ -275,6 +289,7 @@ static const struct net_device_ops veth_netdev_ops = {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = veth_poll_controller,
 #endif
+       .ndo_get_iflink         = veth_get_iflink,
 };
 
 #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO |    \
index a829930dac150e2910629f4d918b3dca3b493e8a..63c7810e1545a357eda7578af862ed18322de933 100644 (file)
@@ -939,11 +939,15 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
        skb_orphan(skb);
        nf_reset(skb);
 
-       /* It is better to stop queue if running out of space
-        * instead of forcing queuing layer to requeue the skb
-        * by returning TX_BUSY (and cause a BUG message).
-        * Since most packets only take 1 or 2 ring slots
-        * this means 16 slots are typically wasted.
+       /* If running out of space, stop queue to avoid getting packets that we
+        * are then unable to transmit.
+        * An alternative would be to force queuing layer to requeue the skb by
+        * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
+        * returned in a normal path of operation: it means that driver is not
+        * maintaining the TX queue stop/start state properly, and causes
+        * the stack to do a non-trivial amount of useless work.
+        * Since most packets only take 1 or 2 ring slots, stopping the queue
+        * early means 16 slots are typically wasted.
         */
        if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
                netif_stop_subqueue(dev, qnum);
index 4c8a944d58b41f0e42c1d9e34acb83a275c34b1d..c1d0e7a9da04c36c58c43b6c8c432a1d8a8720f1 100644 (file)
@@ -104,7 +104,7 @@ vmxnet3_rq_driver_stats[] = {
                                          rx_buf_alloc_failure) },
 };
 
-/* gloabl stats maintained by the driver */
+/* global stats maintained by the driver */
 static const struct vmxnet3_stat_desc
 vmxnet3_global_stats[] = {
        /* description,         offset */
@@ -272,7 +272,7 @@ int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features)
                        adapter->shared->devRead.misc.uptFeatures &=
                        ~UPT1_F_RXCSUM;
 
-               /* update harware LRO capability accordingly */
+               /* update hardware LRO capability accordingly */
                if (features & NETIF_F_LRO)
                        adapter->shared->devRead.misc.uptFeatures |=
                                                        UPT1_F_LRO;
index a8d345054d233da0949a7abd740a2bdca5a5214f..577c9b071ad9e8568d955a39ce00eb185e52e186 100644 (file)
@@ -989,7 +989,7 @@ out:
 
 /* Watch incoming packets to learn mapping between Ethernet address
  * and Tunnel endpoint.
- * Return true if packet is bogus and should be droppped.
+ * Return true if packet is bogus and should be dropped.
  */
 static bool vxlan_snoop(struct net_device *dev,
                        union vxlan_addr *src_ip, const u8 *src_mac)
@@ -1085,7 +1085,7 @@ void vxlan_sock_release(struct vxlan_sock *vs)
 EXPORT_SYMBOL_GPL(vxlan_sock_release);
 
 /* Update multicast group membership when first VNI on
- * multicast asddress is brought up
+ * multicast address is brought up
  */
 static int vxlan_igmp_join(struct vxlan_dev *vxlan)
 {
@@ -1229,7 +1229,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
                 * this as a malformed packet. This behavior diverges from
                 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
                 * in reserved fields are to be ignored. The approach here
-                * maintains compatbility with previous stack code, and also
+                * maintains compatibility with previous stack code, and also
                 * is more robust and provides a little more security in
                 * adding extensions to VXLAN.
                 */
@@ -1672,7 +1672,8 @@ static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
-static int vxlan6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
+static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk,
+                          struct sk_buff *skb,
                           struct net_device *dev, struct in6_addr *saddr,
                           struct in6_addr *daddr, __u8 prio, __u8 ttl,
                           __be16 src_port, __be16 dst_port,
@@ -1748,7 +1749,7 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
 
        skb_set_inner_protocol(skb, htons(ETH_P_TEB));
 
-       udp_tunnel6_xmit_skb(dst, skb, dev, saddr, daddr, prio,
+       udp_tunnel6_xmit_skb(dst, sk, skb, dev, saddr, daddr, prio,
                             ttl, src_port, dst_port,
                             !!(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX));
        return 0;
@@ -1758,7 +1759,7 @@ err:
 }
 #endif
 
-int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb,
+int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
                   __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
                   __be16 src_port, __be16 dst_port,
                   struct vxlan_metadata *md, bool xnet, u32 vxflags)
@@ -1827,7 +1828,7 @@ int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb,
 
        skb_set_inner_protocol(skb, htons(ETH_P_TEB));
 
-       return udp_tunnel_xmit_skb(rt, skb, src, dst, tos,
+       return udp_tunnel_xmit_skb(rt, sk, skb, src, dst, tos,
                                   ttl, df, src_port, dst_port, xnet,
                                   !(vxflags & VXLAN_F_UDP_CSUM));
 }
@@ -1882,6 +1883,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                           struct vxlan_rdst *rdst, bool did_rsc)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct sock *sk = vxlan->vn_sock->sock->sk;
        struct rtable *rt = NULL;
        const struct iphdr *old_iph;
        struct flowi4 fl4;
@@ -1961,7 +1963,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                md.vni = htonl(vni << 8);
                md.gbp = skb->mark;
 
-               err = vxlan_xmit_skb(rt, skb, fl4.saddr,
+               err = vxlan_xmit_skb(rt, sk, skb, fl4.saddr,
                                     dst->sin.sin_addr.s_addr, tos, ttl, df,
                                     src_port, dst_port, &md,
                                     !net_eq(vxlan->net, dev_net(vxlan->dev)),
@@ -1975,7 +1977,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
 #if IS_ENABLED(CONFIG_IPV6)
        } else {
-               struct sock *sk = vxlan->vn_sock->sock->sk;
                struct dst_entry *ndst;
                struct flowi6 fl6;
                u32 flags;
@@ -2021,7 +2022,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                md.vni = htonl(vni << 8);
                md.gbp = skb->mark;
 
-               err = vxlan6_xmit_skb(ndst, skb, dev, &fl6.saddr, &fl6.daddr,
+               err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr,
                                      0, ttl, src_port, dst_port, &md,
                                      !net_eq(vxlan->net, dev_net(vxlan->dev)),
                                      vxlan->flags);
@@ -2255,11 +2256,8 @@ static int vxlan_stop(struct net_device *dev)
        int ret = 0;
 
        if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
-           !vxlan_group_used(vn, vxlan)) {
+           !vxlan_group_used(vn, vxlan))
                ret = vxlan_igmp_leave(vxlan);
-               if (ret)
-                       return ret;
-       }
 
        del_timer_sync(&vxlan->age_timer);
 
index 88d121d43c08bedf2efc3265964188cf2b7f94a7..bcfa01add7cc479ca25d5f2f198f9623fc966415 100644 (file)
@@ -579,6 +579,7 @@ static int cosa_probe(int base, int irq, int dma)
                /* Register the network interface */
                if (!(chan->netdev = alloc_hdlcdev(chan))) {
                        pr_warn("%s: alloc_hdlcdev failed\n", chan->name);
+                       err = -ENOMEM;
                        goto err_hdlcdev;
                }
                dev_to_hdlc(chan->netdev)->attach = cosa_net_attach;
index bea0f313a7a80cbd7528795ff04e67162f77f347..317bc79cc8b9b22eace33116de7504c216b60a39 100644 (file)
@@ -850,6 +850,7 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        dev = alloc_hdlcdev(sc);
        if (!dev) {
                printk(KERN_ERR "lmc:alloc_netdev for device failed\n");
+               err = -ENOMEM;
                goto err_hdlcdev;
        }
 
index f92050617ae682e02bb48b6676a16298ae2dfa4f..5147ebe4cd05d13d12db78aacbd3081fe83a9727 100644 (file)
@@ -779,8 +779,6 @@ static void ar5523_tx(struct ieee80211_hw *hw,
                ieee80211_stop_queues(hw);
        }
 
-       data->skb = skb;
-
        spin_lock_irqsave(&ar->tx_data_list_lock, flags);
        list_add_tail(&data->list, &ar->tx_queue_pending);
        spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
@@ -817,10 +815,13 @@ static void ar5523_tx_work_locked(struct ar5523 *ar)
                if (!data)
                        break;
 
-               skb = data->skb;
+               txi = container_of((void *)data, struct ieee80211_tx_info,
+                                  driver_data);
                txqid = 0;
-               txi = IEEE80211_SKB_CB(skb);
+
+               skb = container_of((void *)txi, struct sk_buff, cb);
                paylen = skb->len;
+
                urb = usb_alloc_urb(0, GFP_KERNEL);
                if (!urb) {
                        ar5523_err(ar, "Failed to allocate TX urb\n");
index 00c6fd346d48225e92a865ff8938a1c9f921e6aa..9a322a65cdb548180a75940cb3997989226768e4 100644 (file)
@@ -74,7 +74,6 @@ struct ar5523_tx_cmd {
 struct ar5523_tx_data {
        struct list_head        list;
        struct ar5523           *ar;
-       struct sk_buff          *skb;
        struct urb              *urb;
 };
 
index 1eebe2ea3dfb0b3d67ff387d2568c5aa434cdc7c..7e9481099a8e6ba9dd06e13a6f60559993956516 100644 (file)
@@ -131,6 +131,9 @@ struct ath_ops {
        void (*enable_write_buffer)(void *);
        void (*write_flush) (void *);
        u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
+       void (*enable_rmw_buffer)(void *);
+       void (*rmw_flush) (void *);
+
 };
 
 struct ath_common;
index 1ed7a88aeea9cd7ba7890fd308a1952888cfd28c..7ca0d6f930fd2e6e11021ac8a5d09ef002e80fc5 100644 (file)
@@ -1283,6 +1283,7 @@ struct ath5k_hw {
 #define ATH_STAT_PROMISC       1
 #define ATH_STAT_LEDSOFT       2               /* enable LED gpio status */
 #define ATH_STAT_STARTED       3               /* opened & irqs enabled */
+#define ATH_STAT_RESET         4               /* hw reset */
 
        unsigned int            filter_flags;   /* HW flags, AR5K_RX_FILTER_* */
        unsigned int            fif_filter_flags; /* Current FIF_* filter flags */
index 57a80e89822d7b36ffce951c98e9f12d158942cb..a6131825c9f6eb82c2f2a75bb4c00b4279b6dea4 100644 (file)
@@ -1523,6 +1523,9 @@ ath5k_set_current_imask(struct ath5k_hw *ah)
        enum ath5k_int imask;
        unsigned long flags;
 
+       if (test_bit(ATH_STAT_RESET, ah->status))
+               return;
+
        spin_lock_irqsave(&ah->irqlock, flags);
        imask = ah->imask;
        if (ah->rx_pending)
@@ -2858,10 +2861,12 @@ ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
 {
        struct ath_common *common = ath5k_hw_common(ah);
        int ret, ani_mode;
-       bool fast;
+       bool fast = chan && modparam_fastchanswitch ? 1 : 0;
 
        ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "resetting\n");
 
+       __set_bit(ATH_STAT_RESET, ah->status);
+
        ath5k_hw_set_imr(ah, 0);
        synchronize_irq(ah->irq);
        ath5k_stop_tasklets(ah);
@@ -2876,11 +2881,29 @@ ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
         * so we should also free any remaining
         * tx buffers */
        ath5k_drain_tx_buffs(ah);
+
+       /* Stop PCU */
+       ath5k_hw_stop_rx_pcu(ah);
+
+       /* Stop DMA
+        *
+        * Note: If DMA didn't stop continue
+        * since only a reset will fix it.
+        */
+       ret = ath5k_hw_dma_stop(ah);
+
+       /* RF Bus grant won't work if we have pending
+        * frames
+        */
+       if (ret && fast) {
+               ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
+                         "DMA didn't stop, falling back to normal reset\n");
+               fast = false;
+       }
+
        if (chan)
                ah->curchan = chan;
 
-       fast = ((chan != NULL) && modparam_fastchanswitch) ? 1 : 0;
-
        ret = ath5k_hw_reset(ah, ah->opmode, ah->curchan, fast, skip_pcu);
        if (ret) {
                ATH5K_ERR(ah, "can't reset hardware (%d)\n", ret);
@@ -2934,6 +2957,8 @@ ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
         */
 /*     ath5k_chan_change(ah, c); */
 
+       __clear_bit(ATH_STAT_RESET, ah->status);
+
        ath5k_beacon_config(ah);
        /* intrs are enabled by ath5k_beacon_config */
 
index b9b651ea985156d0cd5dcbb814d75b927bf7d238..99e62f99a182db5c205a7dca60492b4e312f6f84 100644 (file)
@@ -1169,30 +1169,6 @@ ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
        if (ah->ah_version == AR5K_AR5212)
                ath5k_hw_set_sleep_clock(ah, false);
 
-       /*
-        * Stop PCU
-        */
-       ath5k_hw_stop_rx_pcu(ah);
-
-       /*
-        * Stop DMA
-        *
-        * Note: If DMA didn't stop continue
-        * since only a reset will fix it.
-        */
-       ret = ath5k_hw_dma_stop(ah);
-
-       /* RF Bus grant won't work if we have pending
-        * frames */
-       if (ret && fast) {
-               ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
-                       "DMA didn't stop, falling back to normal reset\n");
-               fast = false;
-               /* Non fatal, just continue with
-                * normal reset */
-               ret = 0;
-       }
-
        mode = channel->hw_value;
        switch (mode) {
        case AR5K_MODE_11A:
index 473972288a84a901a69cc555802b2d8228e274f7..ecda613c2d547d4278ac0665c8c0eeaaf594c66a 100644 (file)
@@ -46,7 +46,8 @@ ath9k_hw-y:=  \
 ath9k_hw-$(CONFIG_ATH9K_WOW) += ar9003_wow.o
 
 ath9k_hw-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += btcoex.o \
-                                          ar9003_mci.o
+                                          ar9003_mci.o \
+                                          ar9003_aic.o
 
 ath9k_hw-$(CONFIG_ATH9K_PCOEM) += ar9003_rtt.o
 
index ca01d17d130fec62067e2e2f9da78f836e2fa643..25e45e4d1a605ba69092f9c76af4dcf3ebfae056 100644 (file)
@@ -107,11 +107,21 @@ static const struct ani_cck_level_entry cck_level_table[] = {
 static void ath9k_hw_update_mibstats(struct ath_hw *ah,
                                     struct ath9k_mib_stats *stats)
 {
-       stats->ackrcv_bad += REG_READ(ah, AR_ACK_FAIL);
-       stats->rts_bad += REG_READ(ah, AR_RTS_FAIL);
-       stats->fcs_bad += REG_READ(ah, AR_FCS_FAIL);
-       stats->rts_good += REG_READ(ah, AR_RTS_OK);
-       stats->beacons += REG_READ(ah, AR_BEACON_CNT);
+       u32 addr[5] = {AR_RTS_OK, AR_RTS_FAIL, AR_ACK_FAIL,
+                      AR_FCS_FAIL, AR_BEACON_CNT};
+       u32 data[5];
+
+       REG_READ_MULTI(ah, &addr[0], &data[0], 5);
+       /* AR_RTS_OK */
+       stats->rts_good += data[0];
+       /* AR_RTS_FAIL */
+       stats->rts_bad += data[1];
+       /* AR_ACK_FAIL */
+       stats->ackrcv_bad += data[2];
+       /* AR_FCS_FAIL */
+       stats->fcs_bad += data[3];
+       /* AR_BEACON_CNT */
+       stats->beacons += data[4];
 }
 
 static void ath9k_ani_restart(struct ath_hw *ah)
index f273427fdd29ff93a3406d582928499ae22398c7..6c23d279525f5dfa8671342b3c30b25163b6e0ba 100644 (file)
@@ -681,12 +681,13 @@ static void ar5008_hw_set_channel_regs(struct ath_hw *ah,
                        phymode |= AR_PHY_FC_DYN2040_PRI_CH;
 
        }
+       ENABLE_REGWRITE_BUFFER(ah);
        REG_WRITE(ah, AR_PHY_TURBO, phymode);
 
+       /* This function do only REG_WRITE, so
+        * we can include it to REGWRITE_BUFFER. */
        ath9k_hw_set11nmac2040(ah, chan);
 
-       ENABLE_REGWRITE_BUFFER(ah);
-
        REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
        REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
 
index 42190b67c6719594bcbe57b9d70e02186645bace..50fcd343c41af12d865d55f1ceb5427797bd20d7 100644 (file)
@@ -430,46 +430,43 @@ static void ar9271_hw_pa_cal(struct ath_hw *ah, bool is_reset)
        u32 regVal;
        unsigned int i;
        u32 regList[][2] = {
-               { 0x786c, 0 },
-               { 0x7854, 0 },
-               { 0x7820, 0 },
-               { 0x7824, 0 },
-               { 0x7868, 0 },
-               { 0x783c, 0 },
-               { 0x7838, 0 } ,
-               { 0x7828, 0 } ,
+               { AR9285_AN_TOP3, 0 },
+               { AR9285_AN_RXTXBB1, 0 },
+               { AR9285_AN_RF2G1, 0 },
+               { AR9285_AN_RF2G2, 0 },
+               { AR9285_AN_TOP2, 0 },
+               { AR9285_AN_RF2G8, 0 },
+               { AR9285_AN_RF2G7, 0 },
+               { AR9285_AN_RF2G3, 0 },
        };
 
-       for (i = 0; i < ARRAY_SIZE(regList); i++)
-               regList[i][1] = REG_READ(ah, regList[i][0]);
-
-       regVal = REG_READ(ah, 0x7834);
-       regVal &= (~(0x1));
-       REG_WRITE(ah, 0x7834, regVal);
-       regVal = REG_READ(ah, 0x9808);
-       regVal |= (0x1 << 27);
-       REG_WRITE(ah, 0x9808, regVal);
+       REG_READ_ARRAY(ah, regList, ARRAY_SIZE(regList));
 
+       ENABLE_REG_RMW_BUFFER(ah);
+       /* 7834, b1=0 */
+       REG_CLR_BIT(ah, AR9285_AN_RF2G6, 1 << 0);
+       /* 9808, b27=1 */
+       REG_SET_BIT(ah, 0x9808, 1 << 27);
        /* 786c,b23,1, pwddac=1 */
-       REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1);
+       REG_SET_BIT(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC);
        /* 7854, b5,1, pdrxtxbb=1 */
-       REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1);
+       REG_SET_BIT(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1);
        /* 7854, b7,1, pdv2i=1 */
-       REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1);
+       REG_SET_BIT(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I);
        /* 7854, b8,1, pddacinterface=1 */
-       REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1);
+       REG_SET_BIT(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF);
        /* 7824,b12,0, offcal=0 */
-       REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0);
+       REG_CLR_BIT(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL);
        /* 7838, b1,0, pwddb=0 */
-       REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0);
+       REG_CLR_BIT(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB);
        /* 7820,b11,0, enpacal=0 */
-       REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0);
+       REG_CLR_BIT(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL);
        /* 7820,b25,1, pdpadrv1=0 */
-       REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0);
+       REG_CLR_BIT(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1);
        /* 7820,b24,0, pdpadrv2=0 */
-       REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2, 0);
+       REG_CLR_BIT(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2);
        /* 7820,b23,0, pdpaout=0 */
-       REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0);
+       REG_CLR_BIT(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT);
        /* 783c,b14-16,7, padrvgn2tab_0=7 */
        REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
        /*
@@ -477,8 +474,9 @@ static void ar9271_hw_pa_cal(struct ath_hw *ah, bool is_reset)
         * does not matter since we turn it off
         */
        REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
-
+       /* 7828, b0-11, ccom=fff */
        REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_CCOMP, 0xfff);
+       REG_RMW_BUFFER_FLUSH(ah);
 
        /* Set:
         * localmode=1,bmode=1,bmoderxtx=1,synthon=1,
@@ -490,15 +488,16 @@ static void ar9271_hw_pa_cal(struct ath_hw *ah, bool is_reset)
 
        /* find off_6_1; */
        for (i = 6; i > 0; i--) {
-               regVal = REG_READ(ah, 0x7834);
+               regVal = REG_READ(ah, AR9285_AN_RF2G6);
                regVal |= (1 << (20 + i));
-               REG_WRITE(ah, 0x7834, regVal);
+               REG_WRITE(ah, AR9285_AN_RF2G6, regVal);
                udelay(1);
                /* regVal = REG_READ(ah, 0x7834); */
                regVal &= (~(0x1 << (20 + i)));
-               regVal |= (MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9)
+               regVal |= (MS(REG_READ(ah, AR9285_AN_RF2G9),
+                             AR9285_AN_RXTXBB1_SPARE9)
                            << (20 + i));
-               REG_WRITE(ah, 0x7834, regVal);
+               REG_WRITE(ah, AR9285_AN_RF2G6, regVal);
        }
 
        regVal = (regVal >> 20) & 0x7f;
@@ -515,15 +514,15 @@ static void ar9271_hw_pa_cal(struct ath_hw *ah, bool is_reset)
                ah->pacal_info.prev_offset = regVal;
        }
 
-       ENABLE_REGWRITE_BUFFER(ah);
 
-       regVal = REG_READ(ah, 0x7834);
-       regVal |= 0x1;
-       REG_WRITE(ah, 0x7834, regVal);
-       regVal = REG_READ(ah, 0x9808);
-       regVal &= (~(0x1 << 27));
-       REG_WRITE(ah, 0x9808, regVal);
+       ENABLE_REG_RMW_BUFFER(ah);
+       /* 7834, b1=1 */
+       REG_SET_BIT(ah, AR9285_AN_RF2G6, 1 << 0);
+       /* 9808, b27=0 */
+       REG_CLR_BIT(ah, 0x9808, 1 << 27);
+       REG_RMW_BUFFER_FLUSH(ah);
 
+       ENABLE_REGWRITE_BUFFER(ah);
        for (i = 0; i < ARRAY_SIZE(regList); i++)
                REG_WRITE(ah, regList[i][0], regList[i][1]);
 
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_aic.c b/drivers/net/wireless/ath/ath9k/ar9003_aic.c
new file mode 100644 (file)
index 0000000..1db119d
--- /dev/null
@@ -0,0 +1,599 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "hw-ops.h"
+#include "ar9003_mci.h"
+#include "ar9003_aic.h"
+#include "ar9003_phy.h"
+#include "reg_aic.h"
+
+static const u8 com_att_db_table[ATH_AIC_MAX_COM_ATT_DB_TABLE] = {
+       0, 3, 9, 15, 21, 27
+};
+
+static const u16 aic_lin_table[ATH_AIC_MAX_AIC_LIN_TABLE] = {
+       8191, 7300, 6506, 5799, 5168, 4606, 4105, 3659,
+       3261, 2906, 2590, 2309, 2057, 1834, 1634, 1457,
+       1298, 1157, 1031, 919,  819,  730,  651,  580,
+       517,  461,  411,  366,  326,  291,  259,  231,
+       206,  183,  163,  146,  130,  116,  103,  92,
+       82,   73,   65,   58,   52,   46,   41,   37,
+       33,   29,   26,   23,   21,   18,   16,   15,
+       13,   12,   10,   9,    8,    7,    7,    6,
+       5,    5,    4,    4,    3
+};
+
+static bool ar9003_hw_is_aic_enabled(struct ath_hw *ah)
+{
+       struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
+
+       /*
+        * Disable AIC for now, until we have all the
+        * HW code and the driver-layer support ready.
+        */
+       return false;
+
+       if (mci_hw->config & ATH_MCI_CONFIG_DISABLE_AIC)
+               return false;
+
+       return true;
+}
+
+static int16_t ar9003_aic_find_valid(struct ath_aic_sram_info *cal_sram,
+                                    bool dir, u8 index)
+{
+       int16_t i;
+
+       if (dir) {
+               for (i = index + 1; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+                       if (cal_sram[i].valid)
+                               break;
+               }
+       } else {
+               for (i = index - 1; i >= 0; i--) {
+                       if (cal_sram[i].valid)
+                               break;
+               }
+       }
+
+       if ((i >= ATH_AIC_MAX_BT_CHANNEL) || (i < 0))
+               i = -1;
+
+       return i;
+}
+
+/*
+ * type 0: aic_lin_table, 1: com_att_db_table
+ */
+static int16_t ar9003_aic_find_index(u8 type, int16_t value)
+{
+       int16_t i = -1;
+
+       if (type == 0) {
+               for (i = ATH_AIC_MAX_AIC_LIN_TABLE - 1; i >= 0; i--) {
+                       if (aic_lin_table[i] >= value)
+                               break;
+               }
+       } else if (type == 1) {
+               for (i = 0; i < ATH_AIC_MAX_COM_ATT_DB_TABLE; i++) {
+                       if (com_att_db_table[i] > value) {
+                               i--;
+                               break;
+                       }
+               }
+
+               if (i >= ATH_AIC_MAX_COM_ATT_DB_TABLE)
+                       i = -1;
+       }
+
+       return i;
+}
+
+static void ar9003_aic_gain_table(struct ath_hw *ah)
+{
+       u32 aic_atten_word[19], i;
+
+       /* Config LNA gain difference */
+       REG_WRITE(ah, AR_PHY_BT_COEX_4, 0x2c200a00);
+       REG_WRITE(ah, AR_PHY_BT_COEX_5, 0x5c4e4438);
+
+       /* Program gain table */
+       aic_atten_word[0] = (0x1 & 0xf) << 14 | (0x1f & 0x1f) << 9 | (0x0 & 0xf) << 5 |
+               (0x1f & 0x1f); /* -01 dB: 4'd1, 5'd31,  00 dB: 4'd0, 5'd31 */
+       aic_atten_word[1] = (0x3 & 0xf) << 14 | (0x1f & 0x1f) << 9 | (0x2 & 0xf) << 5 |
+               (0x1f & 0x1f); /* -03 dB: 4'd3, 5'd31, -02 dB: 4'd2, 5'd31 */
+       aic_atten_word[2] = (0x5 & 0xf) << 14 | (0x1f & 0x1f) << 9 | (0x4 & 0xf) << 5 |
+               (0x1f & 0x1f); /* -05 dB: 4'd5, 5'd31, -04 dB: 4'd4, 5'd31 */
+       aic_atten_word[3] = (0x1 & 0xf) << 14 | (0x1e & 0x1f) << 9 | (0x0 & 0xf) << 5 |
+               (0x1e & 0x1f); /* -07 dB: 4'd1, 5'd30, -06 dB: 4'd0, 5'd30 */
+       aic_atten_word[4] = (0x3 & 0xf) << 14 | (0x1e & 0x1f) << 9 | (0x2 & 0xf) << 5 |
+               (0x1e & 0x1f); /* -09 dB: 4'd3, 5'd30, -08 dB: 4'd2, 5'd30 */
+       aic_atten_word[5] = (0x5 & 0xf) << 14 | (0x1e & 0x1f) << 9 | (0x4 & 0xf) << 5 |
+               (0x1e & 0x1f); /* -11 dB: 4'd5, 5'd30, -10 dB: 4'd4, 5'd30 */
+       aic_atten_word[6] = (0x1 & 0xf) << 14 | (0xf & 0x1f) << 9  | (0x0 & 0xf) << 5 |
+               (0xf & 0x1f);  /* -13 dB: 4'd1, 5'd15, -12 dB: 4'd0, 5'd15 */
+       aic_atten_word[7] = (0x3 & 0xf) << 14 | (0xf & 0x1f) << 9  | (0x2 & 0xf) << 5 |
+               (0xf & 0x1f);  /* -15 dB: 4'd3, 5'd15, -14 dB: 4'd2, 5'd15 */
+       aic_atten_word[8] = (0x5 & 0xf) << 14 | (0xf & 0x1f) << 9  | (0x4 & 0xf) << 5 |
+               (0xf & 0x1f);  /* -17 dB: 4'd5, 5'd15, -16 dB: 4'd4, 5'd15 */
+       aic_atten_word[9] = (0x1 & 0xf) << 14 | (0x7 & 0x1f) << 9  | (0x0 & 0xf) << 5 |
+               (0x7 & 0x1f);  /* -19 dB: 4'd1, 5'd07, -18 dB: 4'd0, 5'd07 */
+       aic_atten_word[10] = (0x3 & 0xf) << 14 | (0x7 & 0x1f) << 9  | (0x2 & 0xf) << 5 |
+               (0x7 & 0x1f);  /* -21 dB: 4'd3, 5'd07, -20 dB: 4'd2, 5'd07 */
+       aic_atten_word[11] = (0x5 & 0xf) << 14 | (0x7 & 0x1f) << 9  | (0x4 & 0xf) << 5 |
+               (0x7 & 0x1f);  /* -23 dB: 4'd5, 5'd07, -22 dB: 4'd4, 5'd07 */
+       aic_atten_word[12] = (0x7 & 0xf) << 14 | (0x7 & 0x1f) << 9  | (0x6 & 0xf) << 5 |
+               (0x7 & 0x1f);  /* -25 dB: 4'd7, 5'd07, -24 dB: 4'd6, 5'd07 */
+       aic_atten_word[13] = (0x3 & 0xf) << 14 | (0x3 & 0x1f) << 9  | (0x2 & 0xf) << 5 |
+               (0x3 & 0x1f);  /* -27 dB: 4'd3, 5'd03, -26 dB: 4'd2, 5'd03 */
+       aic_atten_word[14] = (0x5 & 0xf) << 14 | (0x3 & 0x1f) << 9  | (0x4 & 0xf) << 5 |
+               (0x3 & 0x1f);  /* -29 dB: 4'd5, 5'd03, -28 dB: 4'd4, 5'd03 */
+       aic_atten_word[15] = (0x1 & 0xf) << 14 | (0x1 & 0x1f) << 9  | (0x0 & 0xf) << 5 |
+               (0x1 & 0x1f);  /* -31 dB: 4'd1, 5'd01, -30 dB: 4'd0, 5'd01 */
+       aic_atten_word[16] = (0x3 & 0xf) << 14 | (0x1 & 0x1f) << 9  | (0x2 & 0xf) << 5 |
+               (0x1 & 0x1f);  /* -33 dB: 4'd3, 5'd01, -32 dB: 4'd2, 5'd01 */
+       aic_atten_word[17] = (0x5 & 0xf) << 14 | (0x1 & 0x1f) << 9  | (0x4 & 0xf) << 5 |
+               (0x1 & 0x1f);  /* -35 dB: 4'd5, 5'd01, -34 dB: 4'd4, 5'd01 */
+       aic_atten_word[18] = (0x7 & 0xf) << 14 | (0x1 & 0x1f) << 9  | (0x6 & 0xf) << 5 |
+               (0x1 & 0x1f);  /* -37 dB: 4'd7, 5'd01, -36 dB: 4'd6, 5'd01 */
+
+       /* Write to Gain table with auto increment enabled. */
+       REG_WRITE(ah, (AR_PHY_AIC_SRAM_ADDR_B0 + 0x3000),
+                 (ATH_AIC_SRAM_AUTO_INCREMENT |
+                  ATH_AIC_SRAM_GAIN_TABLE_OFFSET));
+
+       for (i = 0; i < 19; i++) {
+               REG_WRITE(ah, (AR_PHY_AIC_SRAM_DATA_B0 + 0x3000),
+                         aic_atten_word[i]);
+       }
+}
+
+static u8 ar9003_aic_cal_start(struct ath_hw *ah, u8 min_valid_count)
+{
+       struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
+       int i;
+
+       /* Write to Gain table with auto increment enabled. */
+       REG_WRITE(ah, (AR_PHY_AIC_SRAM_ADDR_B0 + 0x3000),
+                 (ATH_AIC_SRAM_AUTO_INCREMENT |
+                  ATH_AIC_SRAM_CAL_OFFSET));
+
+       for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+               REG_WRITE(ah, (AR_PHY_AIC_SRAM_DATA_B0 + 0x3000), 0);
+               aic->aic_sram[i] = 0;
+       }
+
+       REG_WRITE(ah, AR_PHY_AIC_CTRL_0_B0,
+                 (SM(0, AR_PHY_AIC_MON_ENABLE) |
+                  SM(127, AR_PHY_AIC_CAL_MAX_HOP_COUNT) |
+                  SM(min_valid_count, AR_PHY_AIC_CAL_MIN_VALID_COUNT) |
+                  SM(37, AR_PHY_AIC_F_WLAN) |
+                  SM(1, AR_PHY_AIC_CAL_CH_VALID_RESET) |
+                  SM(0, AR_PHY_AIC_CAL_ENABLE) |
+                  SM(0x40, AR_PHY_AIC_BTTX_PWR_THR) |
+                  SM(0, AR_PHY_AIC_ENABLE)));
+
+       REG_WRITE(ah, AR_PHY_AIC_CTRL_0_B1,
+                 (SM(0, AR_PHY_AIC_MON_ENABLE) |
+                  SM(1, AR_PHY_AIC_CAL_CH_VALID_RESET) |
+                  SM(0, AR_PHY_AIC_CAL_ENABLE) |
+                  SM(0x40, AR_PHY_AIC_BTTX_PWR_THR) |
+                  SM(0, AR_PHY_AIC_ENABLE)));
+
+       REG_WRITE(ah, AR_PHY_AIC_CTRL_1_B0,
+                 (SM(8, AR_PHY_AIC_CAL_BT_REF_DELAY) |
+                  SM(0, AR_PHY_AIC_BT_IDLE_CFG) |
+                  SM(1, AR_PHY_AIC_STDBY_COND) |
+                  SM(37, AR_PHY_AIC_STDBY_ROT_ATT_DB) |
+                  SM(5, AR_PHY_AIC_STDBY_COM_ATT_DB) |
+                  SM(15, AR_PHY_AIC_RSSI_MAX) |
+                  SM(0, AR_PHY_AIC_RSSI_MIN)));
+
+       REG_WRITE(ah, AR_PHY_AIC_CTRL_1_B1,
+                 (SM(15, AR_PHY_AIC_RSSI_MAX) |
+                  SM(0, AR_PHY_AIC_RSSI_MIN)));
+
+       REG_WRITE(ah, AR_PHY_AIC_CTRL_2_B0,
+                 (SM(44, AR_PHY_AIC_RADIO_DELAY) |
+                  SM(8, AR_PHY_AIC_CAL_STEP_SIZE_CORR) |
+                  SM(12, AR_PHY_AIC_CAL_ROT_IDX_CORR) |
+                  SM(2, AR_PHY_AIC_CAL_CONV_CHECK_FACTOR) |
+                  SM(5, AR_PHY_AIC_ROT_IDX_COUNT_MAX) |
+                  SM(0, AR_PHY_AIC_CAL_SYNTH_TOGGLE) |
+                  SM(0, AR_PHY_AIC_CAL_SYNTH_AFTER_BTRX) |
+                  SM(200, AR_PHY_AIC_CAL_SYNTH_SETTLING)));
+
+       REG_WRITE(ah, AR_PHY_AIC_CTRL_3_B0,
+                 (SM(2, AR_PHY_AIC_MON_MAX_HOP_COUNT) |
+                  SM(1, AR_PHY_AIC_MON_MIN_STALE_COUNT) |
+                  SM(1, AR_PHY_AIC_MON_PWR_EST_LONG) |
+                  SM(2, AR_PHY_AIC_MON_PD_TALLY_SCALING) |
+                  SM(10, AR_PHY_AIC_MON_PERF_THR) |
+                  SM(2, AR_PHY_AIC_CAL_TARGET_MAG_SETTING) |
+                  SM(1, AR_PHY_AIC_CAL_PERF_CHECK_FACTOR) |
+                  SM(1, AR_PHY_AIC_CAL_PWR_EST_LONG)));
+
+       REG_WRITE(ah, AR_PHY_AIC_CTRL_4_B0,
+                 (SM(2, AR_PHY_AIC_CAL_ROT_ATT_DB_EST_ISO) |
+                  SM(3, AR_PHY_AIC_CAL_COM_ATT_DB_EST_ISO) |
+                  SM(0, AR_PHY_AIC_CAL_ISO_EST_INIT_SETTING) |
+                  SM(2, AR_PHY_AIC_CAL_COM_ATT_DB_BACKOFF) |
+                  SM(1, AR_PHY_AIC_CAL_COM_ATT_DB_FIXED)));
+
+       REG_WRITE(ah, AR_PHY_AIC_CTRL_4_B1,
+                 (SM(2, AR_PHY_AIC_CAL_ROT_ATT_DB_EST_ISO) |
+                  SM(3, AR_PHY_AIC_CAL_COM_ATT_DB_EST_ISO) |
+                  SM(0, AR_PHY_AIC_CAL_ISO_EST_INIT_SETTING) |
+                  SM(2, AR_PHY_AIC_CAL_COM_ATT_DB_BACKOFF) |
+                  SM(1, AR_PHY_AIC_CAL_COM_ATT_DB_FIXED)));
+
+       ar9003_aic_gain_table(ah);
+
+       /* Need to enable AIC reference signal in BT modem. */
+       REG_WRITE(ah, ATH_AIC_BT_JUPITER_CTRL,
+                 (REG_READ(ah, ATH_AIC_BT_JUPITER_CTRL) |
+                  ATH_AIC_BT_AIC_ENABLE));
+
+       aic->aic_cal_start_time = REG_READ(ah, AR_TSF_L32);
+
+       /* Start calibration */
+       REG_CLR_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_ENABLE);
+       REG_SET_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_CH_VALID_RESET);
+       REG_SET_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_ENABLE);
+
+       aic->aic_caled_chan = 0;
+       aic->aic_cal_state = AIC_CAL_STATE_STARTED;
+
+       return aic->aic_cal_state;
+}
+
+static bool ar9003_aic_cal_post_process(struct ath_hw *ah)
+{
+       struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
+       struct ath_aic_sram_info cal_sram[ATH_AIC_MAX_BT_CHANNEL];
+       struct ath_aic_out_info aic_sram[ATH_AIC_MAX_BT_CHANNEL];
+       u32 dir_path_gain_idx, quad_path_gain_idx, value;
+       u32 fixed_com_att_db;
+       int8_t dir_path_sign, quad_path_sign;
+       int16_t i;
+       bool ret = true;
+
+       memset(&cal_sram, 0, sizeof(cal_sram));
+       memset(&aic_sram, 0, sizeof(aic_sram));
+
+       for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+               value = aic->aic_sram[i];
+
+               cal_sram[i].valid =
+                       MS(value, AR_PHY_AIC_SRAM_VALID);
+               cal_sram[i].rot_quad_att_db =
+                       MS(value, AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB);
+               cal_sram[i].vga_quad_sign =
+                       MS(value, AR_PHY_AIC_SRAM_VGA_QUAD_SIGN);
+               cal_sram[i].rot_dir_att_db =
+                       MS(value, AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB);
+               cal_sram[i].vga_dir_sign =
+                       MS(value, AR_PHY_AIC_SRAM_VGA_DIR_SIGN);
+               cal_sram[i].com_att_6db =
+                       MS(value, AR_PHY_AIC_SRAM_COM_ATT_6DB);
+
+               if (cal_sram[i].valid) {
+                       dir_path_gain_idx = cal_sram[i].rot_dir_att_db +
+                               com_att_db_table[cal_sram[i].com_att_6db];
+                       quad_path_gain_idx = cal_sram[i].rot_quad_att_db +
+                               com_att_db_table[cal_sram[i].com_att_6db];
+
+                       dir_path_sign = (cal_sram[i].vga_dir_sign) ? 1 : -1;
+                       quad_path_sign = (cal_sram[i].vga_quad_sign) ? 1 : -1;
+
+                       aic_sram[i].dir_path_gain_lin = dir_path_sign *
+                               aic_lin_table[dir_path_gain_idx];
+                       aic_sram[i].quad_path_gain_lin = quad_path_sign *
+                               aic_lin_table[quad_path_gain_idx];
+               }
+       }
+
+       for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+               int16_t start_idx, end_idx;
+
+               if (cal_sram[i].valid)
+                       continue;
+
+               start_idx = ar9003_aic_find_valid(cal_sram, 0, i);
+               end_idx = ar9003_aic_find_valid(cal_sram, 1, i);
+
+               if (start_idx < 0) {
+                       /* extrapolation */
+                       start_idx = end_idx;
+                       end_idx = ar9003_aic_find_valid(cal_sram, 1, start_idx);
+
+                       if (end_idx < 0) {
+                               ret = false;
+                               break;
+                       }
+
+                       aic_sram[i].dir_path_gain_lin =
+                               ((aic_sram[start_idx].dir_path_gain_lin -
+                                 aic_sram[end_idx].dir_path_gain_lin) *
+                                (start_idx - i) + ((end_idx - i) >> 1)) /
+                               (end_idx - i) +
+                               aic_sram[start_idx].dir_path_gain_lin;
+                       aic_sram[i].quad_path_gain_lin =
+                               ((aic_sram[start_idx].quad_path_gain_lin -
+                                 aic_sram[end_idx].quad_path_gain_lin) *
+                                (start_idx - i) + ((end_idx - i) >> 1)) /
+                               (end_idx - i) +
+                               aic_sram[start_idx].quad_path_gain_lin;
+               }
+
+               if (end_idx < 0) {
+                       /* extrapolation */
+                       end_idx = ar9003_aic_find_valid(cal_sram, 0, start_idx);
+
+                       if (end_idx < 0) {
+                               ret = false;
+                               break;
+                       }
+
+                       aic_sram[i].dir_path_gain_lin =
+                               ((aic_sram[start_idx].dir_path_gain_lin -
+                                 aic_sram[end_idx].dir_path_gain_lin) *
+                                (i - start_idx) + ((start_idx - end_idx) >> 1)) /
+                               (start_idx - end_idx) +
+                               aic_sram[start_idx].dir_path_gain_lin;
+                       aic_sram[i].quad_path_gain_lin =
+                               ((aic_sram[start_idx].quad_path_gain_lin -
+                                 aic_sram[end_idx].quad_path_gain_lin) *
+                                (i - start_idx) + ((start_idx - end_idx) >> 1)) /
+                               (start_idx - end_idx) +
+                               aic_sram[start_idx].quad_path_gain_lin;
+
+               } else if (start_idx >= 0){
+                       /* interpolation */
+                       aic_sram[i].dir_path_gain_lin =
+                               (((end_idx - i) * aic_sram[start_idx].dir_path_gain_lin) +
+                                ((i - start_idx) * aic_sram[end_idx].dir_path_gain_lin) +
+                                ((end_idx - start_idx) >> 1)) /
+                               (end_idx - start_idx);
+                       aic_sram[i].quad_path_gain_lin =
+                               (((end_idx - i) * aic_sram[start_idx].quad_path_gain_lin) +
+                                ((i - start_idx) * aic_sram[end_idx].quad_path_gain_lin) +
+                                ((end_idx - start_idx) >> 1))/
+                               (end_idx - start_idx);
+               }
+       }
+
+       /* From dir/quad_path_gain_lin to sram. */
+       i = ar9003_aic_find_valid(cal_sram, 1, 0);
+       if (i < 0) {
+               i = 0;
+               ret = false;
+       }
+       fixed_com_att_db = com_att_db_table[cal_sram[i].com_att_6db];
+
+       for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+               int16_t rot_dir_path_att_db, rot_quad_path_att_db;
+
+               aic_sram[i].sram.vga_dir_sign =
+                       (aic_sram[i].dir_path_gain_lin >= 0) ? 1 : 0;
+               aic_sram[i].sram.vga_quad_sign=
+                       (aic_sram[i].quad_path_gain_lin >= 0) ? 1 : 0;
+
+               rot_dir_path_att_db =
+                       ar9003_aic_find_index(0, abs(aic_sram[i].dir_path_gain_lin)) -
+                       fixed_com_att_db;
+               rot_quad_path_att_db =
+                       ar9003_aic_find_index(0, abs(aic_sram[i].quad_path_gain_lin)) -
+                       fixed_com_att_db;
+
+               aic_sram[i].sram.com_att_6db =
+                       ar9003_aic_find_index(1, fixed_com_att_db);
+
+               aic_sram[i].sram.valid = 1;
+
+               aic_sram[i].sram.rot_dir_att_db =
+                       min(max(rot_dir_path_att_db,
+                               (int16_t)ATH_AIC_MIN_ROT_DIR_ATT_DB),
+                           ATH_AIC_MAX_ROT_DIR_ATT_DB);
+               aic_sram[i].sram.rot_quad_att_db =
+                       min(max(rot_quad_path_att_db,
+                               (int16_t)ATH_AIC_MIN_ROT_QUAD_ATT_DB),
+                           ATH_AIC_MAX_ROT_QUAD_ATT_DB);
+       }
+
+       for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+               aic->aic_sram[i] = (SM(aic_sram[i].sram.vga_dir_sign,
+                                      AR_PHY_AIC_SRAM_VGA_DIR_SIGN) |
+                                   SM(aic_sram[i].sram.vga_quad_sign,
+                                      AR_PHY_AIC_SRAM_VGA_QUAD_SIGN) |
+                                   SM(aic_sram[i].sram.com_att_6db,
+                                      AR_PHY_AIC_SRAM_COM_ATT_6DB) |
+                                   SM(aic_sram[i].sram.valid,
+                                      AR_PHY_AIC_SRAM_VALID) |
+                                   SM(aic_sram[i].sram.rot_dir_att_db,
+                                      AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB) |
+                                   SM(aic_sram[i].sram.rot_quad_att_db,
+                                      AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB));
+       }
+
+       return ret;
+}
+
+static void ar9003_aic_cal_done(struct ath_hw *ah)
+{
+       struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
+
+       /* Disable AIC reference signal in BT modem. */
+       REG_WRITE(ah, ATH_AIC_BT_JUPITER_CTRL,
+                 (REG_READ(ah, ATH_AIC_BT_JUPITER_CTRL) &
+                  ~ATH_AIC_BT_AIC_ENABLE));
+
+       if (ar9003_aic_cal_post_process(ah))
+               aic->aic_cal_state = AIC_CAL_STATE_DONE;
+       else
+               aic->aic_cal_state = AIC_CAL_STATE_ERROR;
+}
+
+static u8 ar9003_aic_cal_continue(struct ath_hw *ah, bool cal_once)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
+       struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
+       int i, num_chan;
+
+       num_chan = MS(mci_hw->config, ATH_MCI_CONFIG_AIC_CAL_NUM_CHAN);
+
+       if (!num_chan) {
+               aic->aic_cal_state = AIC_CAL_STATE_ERROR;
+               return aic->aic_cal_state;
+       }
+
+       if (cal_once) {
+               for (i = 0; i < 10000; i++) {
+                       if ((REG_READ(ah, AR_PHY_AIC_CTRL_0_B1) &
+                            AR_PHY_AIC_CAL_ENABLE) == 0)
+                               break;
+
+                       udelay(100);
+               }
+       }
+
+       /*
+        * Use AR_PHY_AIC_CAL_ENABLE bit instead of AR_PHY_AIC_CAL_DONE.
+        * Sometimes CAL_DONE bit is not asserted.
+        */
+       if ((REG_READ(ah, AR_PHY_AIC_CTRL_0_B1) &
+            AR_PHY_AIC_CAL_ENABLE) != 0) {
+               ath_dbg(common, MCI, "AIC cal is not done after 40ms");
+               goto exit;
+       }
+
+       REG_WRITE(ah, AR_PHY_AIC_SRAM_ADDR_B1,
+                 (ATH_AIC_SRAM_CAL_OFFSET | ATH_AIC_SRAM_AUTO_INCREMENT));
+
+       for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+               u32 value;
+
+               value = REG_READ(ah, AR_PHY_AIC_SRAM_DATA_B1);
+
+               if (value & 0x01) {
+                       if (aic->aic_sram[i] == 0)
+                               aic->aic_caled_chan++;
+
+                       aic->aic_sram[i] = value;
+
+                       if (!cal_once)
+                               break;
+               }
+       }
+
+       if ((aic->aic_caled_chan >= num_chan) || cal_once) {
+               ar9003_aic_cal_done(ah);
+       } else {
+               /* Start calibration */
+               REG_CLR_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_ENABLE);
+               REG_SET_BIT(ah, AR_PHY_AIC_CTRL_0_B1,
+                           AR_PHY_AIC_CAL_CH_VALID_RESET);
+               REG_SET_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_ENABLE);
+       }
+exit:
+       return aic->aic_cal_state;
+
+}
+
+u8 ar9003_aic_calibration(struct ath_hw *ah)
+{
+       struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
+       u8 cal_ret = AIC_CAL_STATE_ERROR;
+
+       switch (aic->aic_cal_state) {
+       case AIC_CAL_STATE_IDLE:
+               cal_ret = ar9003_aic_cal_start(ah, 1);
+               break;
+       case AIC_CAL_STATE_STARTED:
+               cal_ret = ar9003_aic_cal_continue(ah, false);
+               break;
+       case AIC_CAL_STATE_DONE:
+               cal_ret = AIC_CAL_STATE_DONE;
+               break;
+       default:
+               break;
+       }
+
+       return cal_ret;
+}
+
+u8 ar9003_aic_start_normal(struct ath_hw *ah)
+{
+       struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
+       int16_t i;
+
+       if (aic->aic_cal_state != AIC_CAL_STATE_DONE)
+               return 1;
+
+       ar9003_aic_gain_table(ah);
+
+       REG_WRITE(ah, AR_PHY_AIC_SRAM_ADDR_B1, ATH_AIC_SRAM_AUTO_INCREMENT);
+
+       for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+               REG_WRITE(ah, AR_PHY_AIC_SRAM_DATA_B1, aic->aic_sram[i]);
+       }
+
+       /* FIXME: Replace these with proper register names */
+       REG_WRITE(ah, 0xa6b0, 0x80);
+       REG_WRITE(ah, 0xa6b4, 0x5b2df0);
+       REG_WRITE(ah, 0xa6b8, 0x10762cc8);
+       REG_WRITE(ah, 0xa6bc, 0x1219a4b);
+       REG_WRITE(ah, 0xa6c0, 0x1e01);
+       REG_WRITE(ah, 0xb6b4, 0xf0);
+       REG_WRITE(ah, 0xb6c0, 0x1e01);
+       REG_WRITE(ah, 0xb6b0, 0x81);
+       REG_WRITE(ah, AR_PHY_65NM_CH1_RXTX4, 0x40000000);
+
+       aic->aic_enabled = true;
+
+       return 0;
+}
+
+u8 ar9003_aic_cal_reset(struct ath_hw *ah)
+{
+       struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
+
+       aic->aic_cal_state = AIC_CAL_STATE_IDLE;
+       return aic->aic_cal_state;
+}
+
+u8 ar9003_aic_calibration_single(struct ath_hw *ah)
+{
+       struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
+       u8 cal_ret;
+       int num_chan;
+
+       num_chan = MS(mci_hw->config, ATH_MCI_CONFIG_AIC_CAL_NUM_CHAN);
+
+       (void) ar9003_aic_cal_start(ah, num_chan);
+       cal_ret = ar9003_aic_cal_continue(ah, true);
+
+       return cal_ret;
+}
+
+void ar9003_hw_attach_aic_ops(struct ath_hw *ah)
+{
+       struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+
+       priv_ops->is_aic_enabled = ar9003_hw_is_aic_enabled;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_aic.h b/drivers/net/wireless/ath/ath9k/ar9003_aic.h
new file mode 100644 (file)
index 0000000..86f4064
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef AR9003_AIC_H
+#define AR9003_AIC_H
+
+#define ATH_AIC_MAX_COM_ATT_DB_TABLE    6
+#define ATH_AIC_MAX_AIC_LIN_TABLE       69
+#define ATH_AIC_MIN_ROT_DIR_ATT_DB      0
+#define ATH_AIC_MIN_ROT_QUAD_ATT_DB     0
+#define ATH_AIC_MAX_ROT_DIR_ATT_DB      37
+#define ATH_AIC_MAX_ROT_QUAD_ATT_DB     37
+#define ATH_AIC_SRAM_AUTO_INCREMENT     0x80000000
+#define ATH_AIC_SRAM_GAIN_TABLE_OFFSET  0x280
+#define ATH_AIC_SRAM_CAL_OFFSET         0x140
+#define ATH_AIC_SRAM_OFFSET             0x00
+#define ATH_AIC_MEAS_MAG_THRESH         20
+#define ATH_AIC_BT_JUPITER_CTRL         0x66820
+#define ATH_AIC_BT_AIC_ENABLE           0x02
+
+enum aic_cal_state {
+       AIC_CAL_STATE_IDLE = 0,
+       AIC_CAL_STATE_STARTED,
+       AIC_CAL_STATE_DONE,
+       AIC_CAL_STATE_ERROR
+};
+
+struct ath_aic_sram_info {
+       bool valid:1;
+       bool vga_quad_sign:1;
+       bool vga_dir_sign:1;
+       u8 rot_quad_att_db;
+       u8 rot_dir_att_db;
+       u8 com_att_6db;
+};
+
+struct ath_aic_out_info {
+       int16_t dir_path_gain_lin;
+       int16_t quad_path_gain_lin;
+       struct ath_aic_sram_info sram;
+};
+
+u8 ar9003_aic_calibration(struct ath_hw *ah);
+u8 ar9003_aic_start_normal(struct ath_hw *ah);
+u8 ar9003_aic_cal_reset(struct ath_hw *ah);
+u8 ar9003_aic_calibration_single(struct ath_hw *ah);
+
+#endif /* AR9003_AIC_H */
index 4335ccbe7d7e078537216301a4fdb2120d3ee7fb..79fd3b2dcbdef9fc117b99177cb44fe46a581ae4 100644 (file)
@@ -195,16 +195,16 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
                INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
                               ar9485_1_1_baseband_core_txfir_coeff_japan_2484);
 
-               if (ah->config.no_pll_pwrsave) {
+               if (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) {
                        INIT_INI_ARRAY(&ah->iniPcieSerdes,
-                                      ar9485_1_1_pcie_phy_clkreq_disable_L1);
+                                      ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1);
                        INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
-                                      ar9485_1_1_pcie_phy_clkreq_disable_L1);
+                                      ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1);
                } else {
                        INIT_INI_ARRAY(&ah->iniPcieSerdes,
-                                      ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1);
+                                      ar9485_1_1_pcie_phy_clkreq_disable_L1);
                        INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
-                                      ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1);
+                                      ar9485_1_1_pcie_phy_clkreq_disable_L1);
                }
        } else if (AR_SREV_9462_21(ah)) {
                INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -231,10 +231,20 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
                               ar9462_2p1_modes_fast_clock);
                INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
                               ar9462_2p1_baseband_core_txfir_coeff_japan_2484);
-               INIT_INI_ARRAY(&ah->iniPcieSerdes,
-                              ar9462_2p1_pciephy_clkreq_disable_L1);
-               INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
-                              ar9462_2p1_pciephy_clkreq_disable_L1);
+
+               /* Awake -> Sleep Setting */
+               if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+                   (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D3)) {
+                       INIT_INI_ARRAY(&ah->iniPcieSerdes,
+                                      ar9462_2p1_pciephy_clkreq_disable_L1);
+               }
+
+               /* Sleep -> Awake Setting */
+               if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+                   (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D0)) {
+                       INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+                                      ar9462_2p1_pciephy_clkreq_disable_L1);
+               }
        } else if (AR_SREV_9462_20(ah)) {
 
                INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], ar9462_2p0_mac_core);
@@ -262,11 +272,18 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
                                ar9462_2p0_common_rx_gain);
 
                /* Awake -> Sleep Setting */
-               INIT_INI_ARRAY(&ah->iniPcieSerdes,
-                              ar9462_2p0_pciephy_clkreq_disable_L1);
+               if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+                   (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D3)) {
+                       INIT_INI_ARRAY(&ah->iniPcieSerdes,
+                                      ar9462_2p0_pciephy_clkreq_disable_L1);
+               }
+
                /* Sleep -> Awake Setting */
-               INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
-                              ar9462_2p0_pciephy_clkreq_disable_L1);
+               if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+                   (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D0)) {
+                       INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+                                      ar9462_2p0_pciephy_clkreq_disable_L1);
+               }
 
                /* Fast clock modal settings */
                INIT_INI_ARRAY(&ah->iniModesFastClock,
@@ -456,10 +473,19 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
                INIT_INI_ARRAY(&ah->iniModesTxGain,
                               ar9565_1p1_Modes_lowest_ob_db_tx_gain_table);
 
-               INIT_INI_ARRAY(&ah->iniPcieSerdes,
-                              ar9565_1p1_pciephy_clkreq_disable_L1);
-               INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
-                              ar9565_1p1_pciephy_clkreq_disable_L1);
+               /* Awake -> Sleep Setting */
+               if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+                   (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D3)) {
+                       INIT_INI_ARRAY(&ah->iniPcieSerdes,
+                                      ar9565_1p1_pciephy_clkreq_disable_L1);
+               }
+
+               /* Sleep -> Awake Setting */
+               if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+                   (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D0)) {
+                       INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+                                      ar9565_1p1_pciephy_clkreq_disable_L1);
+               }
 
                INIT_INI_ARRAY(&ah->iniModesFastClock,
                                ar9565_1p1_modes_fast_clock);
@@ -491,10 +517,19 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
                INIT_INI_ARRAY(&ah->iniModesTxGain,
                               ar9565_1p0_Modes_lowest_ob_db_tx_gain_table);
 
-               INIT_INI_ARRAY(&ah->iniPcieSerdes,
-                              ar9565_1p0_pciephy_clkreq_disable_L1);
-               INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
-                              ar9565_1p0_pciephy_clkreq_disable_L1);
+               /* Awake -> Sleep Setting */
+               if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+                   (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D3)) {
+                       INIT_INI_ARRAY(&ah->iniPcieSerdes,
+                                      ar9565_1p0_pciephy_clkreq_disable_L1);
+               }
+
+               /* Sleep -> Awake Setting */
+               if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+                   (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D0)) {
+                       INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+                                      ar9565_1p0_pciephy_clkreq_disable_L1);
+               }
 
                INIT_INI_ARRAY(&ah->iniModesFastClock,
                                ar9565_1p0_modes_fast_clock);
@@ -1130,6 +1165,12 @@ void ar9003_hw_attach_ops(struct ath_hw *ah)
        struct ath_hw_ops *ops = ath9k_hw_ops(ah);
 
        ar9003_hw_init_mode_regs(ah);
+
+       if (AR_SREV_9003_PCOEM(ah)) {
+               WARN_ON(!ah->iniPcieSerdes.ia_array);
+               WARN_ON(!ah->iniPcieSerdesLowPower.ia_array);
+       }
+
        priv_ops->init_mode_gain_regs = ar9003_hw_init_mode_gain_regs;
        priv_ops->init_hang_checks = ar9003_hw_init_hang_checks;
        priv_ops->detect_mac_hang = ar9003_hw_detect_mac_hang;
@@ -1139,4 +1180,5 @@ void ar9003_hw_attach_ops(struct ath_hw *ah)
        ar9003_hw_attach_phy_ops(ah);
        ar9003_hw_attach_calib_ops(ah);
        ar9003_hw_attach_mac_ops(ah);
+       ar9003_hw_attach_aic_ops(ah);
 }
index bd169fae32a1b7f8bad03dcc7564a56bd4ff456f..af5ee416a560dab726805c58850cddfe65291e03 100644 (file)
@@ -19,6 +19,7 @@
 #include "hw-ops.h"
 #include "ar9003_phy.h"
 #include "ar9003_mci.h"
+#include "ar9003_aic.h"
 
 static void ar9003_mci_reset_req_wakeup(struct ath_hw *ah)
 {
@@ -1016,6 +1017,9 @@ int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
        if (en_int)
                ar9003_mci_enable_interrupt(ah);
 
+       if (ath9k_hw_is_aic_enabled(ah))
+               ar9003_aic_start_normal(ah);
+
        return 0;
 }
 
@@ -1362,6 +1366,22 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
                value = (!mci->unhalt_bt_gpm && mci->need_flush_btinfo) ? 1 : 0;
                mci->need_flush_btinfo = false;
                break;
+       case MCI_STATE_AIC_CAL:
+               if (ath9k_hw_is_aic_enabled(ah))
+                       value = ar9003_aic_calibration(ah);
+               break;
+       case MCI_STATE_AIC_START:
+               if (ath9k_hw_is_aic_enabled(ah))
+                       ar9003_aic_start_normal(ah);
+               break;
+       case MCI_STATE_AIC_CAL_RESET:
+               if (ath9k_hw_is_aic_enabled(ah))
+                       value = ar9003_aic_cal_reset(ah);
+               break;
+       case MCI_STATE_AIC_CAL_SINGLE:
+               if (ath9k_hw_is_aic_enabled(ah))
+                       value = ar9003_aic_calibration_single(ah);
+               break;
        default:
                break;
        }
index c311b2bfdb004a68ebfef3f7ccba229b056216b3..fc595b92ac56007a024bc9cb5d871e19cef4ba9c 100644 (file)
 #define AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE                0x0000ff00
 #define AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE_S      8
 
-/* AIC Registers */
-#define AR_PHY_AIC_CTRL_0_B0   (AR_SM_BASE + 0x4b0)
-#define AR_PHY_AIC_CTRL_1_B0   (AR_SM_BASE + 0x4b4)
-#define AR_PHY_AIC_CTRL_2_B0   (AR_SM_BASE + 0x4b8)
-#define AR_PHY_AIC_CTRL_3_B0   (AR_SM_BASE + 0x4bc)
-#define AR_PHY_AIC_STAT_0_B0   (AR_SM_BASE + 0x4c4))
-#define AR_PHY_AIC_STAT_1_B0   (AR_SM_BASE + 0x4c8))
-#define AR_PHY_AIC_CTRL_4_B0   (AR_SM_BASE + 0x4c0)
-#define AR_PHY_AIC_STAT_2_B0   (AR_SM_BASE + 0x4cc)
-
 #define AR_PHY_65NM_CH0_TXRF3       0x16048
 #define AR_PHY_65NM_CH0_TXRF3_CAPDIV2G         0x0000001e
 #define AR_PHY_65NM_CH0_TXRF3_CAPDIV2G_S       1
 #define AR_PHY_TX_IQCAL_STATUS_B1   (AR_SM1_BASE + 0x48c)
 #define AR_PHY_TX_IQCAL_CORR_COEFF_B1(_i)    (AR_SM1_BASE + 0x450 + ((_i) << 2))
 
-/* SM 1 AIC Registers */
-
-#define AR_PHY_AIC_CTRL_0_B1   (AR_SM1_BASE + 0x4b0)
-#define AR_PHY_AIC_CTRL_1_B1   (AR_SM1_BASE + 0x4b4)
-#define AR_PHY_AIC_CTRL_2_B1   (AR_SM1_BASE + 0x4b8)
-#define AR_PHY_AIC_STAT_0_B1   (AR_SM1_BASE + (AR_SREV_9462_10(ah) ? \
-                                       0x4c0 : 0x4c4))
-#define AR_PHY_AIC_STAT_1_B1   (AR_SM1_BASE + (AR_SREV_9462_10(ah) ? \
-                                       0x4c4 : 0x4c8))
-#define AR_PHY_AIC_CTRL_4_B1   (AR_SM1_BASE + 0x4c0)
-#define AR_PHY_AIC_STAT_2_B1   (AR_SM1_BASE + 0x4cc)
-
-#define AR_PHY_AIC_SRAM_ADDR_B1        (AR_SM1_BASE + 0x5f0)
-#define AR_PHY_AIC_SRAM_DATA_B1        (AR_SM1_BASE + 0x5f4)
-
 #define AR_PHY_RTT_TABLE_SW_INTF_B(i)  (0x384 + ((i) ? \
                                        AR_SM1_BASE : AR_SM_BASE))
 #define AR_PHY_RTT_TABLE_SW_INTF_1_B(i)        (0x388 + ((i) ? \
index 934418872e8e156a641145a512280f3bee6f86bd..e4d11fa7fe8ce8d6d9a8191928cde773210dec2a 100644 (file)
@@ -106,7 +106,7 @@ void ar9003_hw_rtt_load_hist(struct ath_hw *ah)
        int chain, i;
 
        for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
-               if (!(ah->rxchainmask & (1 << chain)))
+               if (!(ah->caps.rx_chainmask & (1 << chain)))
                        continue;
                for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) {
                        ar9003_hw_rtt_load_hist_entry(ah, chain, i,
@@ -171,7 +171,7 @@ void ar9003_hw_rtt_fill_hist(struct ath_hw *ah)
        int chain, i;
 
        for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
-               if (!(ah->rxchainmask & (1 << chain)))
+               if (!(ah->caps.rx_chainmask & (1 << chain)))
                        continue;
                for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) {
                        ah->caldata->rtt_table[chain][i] =
@@ -193,7 +193,7 @@ void ar9003_hw_rtt_clear_hist(struct ath_hw *ah)
        int chain, i;
 
        for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
-               if (!(ah->rxchainmask & (1 << chain)))
+               if (!(ah->caps.rx_chainmask & (1 << chain)))
                        continue;
                for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++)
                        ar9003_hw_rtt_load_hist_entry(ah, chain, i, 0);
index 7e89236c0e13795fb1419f91af825685c0e2b0bc..a7a81b3969cec7e79b2cb73959c4d8ff1fb7489e 100644 (file)
@@ -184,12 +184,12 @@ struct ath_frame_info {
        struct ath_buf *bf;
        u16 framelen;
        s8 txq;
-       enum ath9k_key_type keytype;
        u8 keyix;
        u8 rtscts_rate;
        u8 retries : 7;
        u8 baw_tracked : 1;
        u8 tx_power;
+       enum ath9k_key_type keytype:2;
 };
 
 struct ath_rxbuf {
index cb366adc820b17a667a908d79db82da8519f3783..f50a6bc5d06ee0b2bb3c83c971cd5f1d81afd690 100644 (file)
@@ -219,12 +219,15 @@ void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif)
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_vif *avp = (void *)vif->drv_priv;
        struct ath_buf *bf = avp->av_bcbuf;
+       struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon;
 
        ath_dbg(common, CONFIG, "Removing interface at beacon slot: %d\n",
                avp->av_bslot);
 
        tasklet_disable(&sc->bcon_tasklet);
 
+       cur_conf->enable_beacon &= ~BIT(avp->av_bslot);
+
        if (bf && bf->bf_mpdu) {
                struct sk_buff *skb = bf->bf_mpdu;
                dma_unmap_single(sc->dev, bf->bf_buf_addr,
@@ -521,8 +524,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc,
        }
 
        if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) {
-               if ((vif->type != NL80211_IFTYPE_AP) ||
-                   (sc->nbcnvifs > 1)) {
+               if (vif->type != NL80211_IFTYPE_AP) {
                        ath_dbg(common, CONFIG,
                                "An AP interface is already present !\n");
                        return false;
@@ -616,12 +618,14 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
         * enabling/disabling SWBA.
         */
        if (changed & BSS_CHANGED_BEACON_ENABLED) {
-               if (!bss_conf->enable_beacon &&
-                   (sc->nbcnvifs <= 1)) {
-                       cur_conf->enable_beacon = false;
-               } else if (bss_conf->enable_beacon) {
-                       cur_conf->enable_beacon = true;
-                       ath9k_cache_beacon_config(sc, ctx, bss_conf);
+               bool enabled = cur_conf->enable_beacon;
+
+               if (!bss_conf->enable_beacon) {
+                       cur_conf->enable_beacon &= ~BIT(avp->av_bslot);
+               } else {
+                       cur_conf->enable_beacon |= BIT(avp->av_bslot);
+                       if (!enabled)
+                               ath9k_cache_beacon_config(sc, ctx, bss_conf);
                }
        }
 
index 5fe62ff2223b4311829671327f2d9f9f189948ae..cd2f0a2373cb92f7eeb47f0fa0ab21e5a78c3a9e 100644 (file)
@@ -44,6 +44,9 @@
 
 #define AR9300_NUM_BT_WEIGHTS   4
 #define AR9300_NUM_WLAN_WEIGHTS 4
+
+#define ATH_AIC_MAX_BT_CHANNEL  79
+
 /* Defines the BT AR_BT_COEX_WGHT used */
 enum ath_stomp_type {
        ATH_BTCOEX_STOMP_ALL,
@@ -93,9 +96,18 @@ struct ath9k_hw_mci {
        u32 last_recovery;
 };
 
+struct ath9k_hw_aic {
+       bool aic_enabled;
+       u8 aic_cal_state;
+       u8 aic_caled_chan;
+       u32 aic_sram[ATH_AIC_MAX_BT_CHANNEL];
+       u32 aic_cal_start_time;
+};
+
 struct ath_btcoex_hw {
        enum ath_btcoex_scheme scheme;
        struct ath9k_hw_mci mci;
+       struct ath9k_hw_aic aic;
        bool enabled;
        u8 wlanactive_gpio;
        u8 btactive_gpio;
index e200a6e3aca5f4e4ce98354814358724cb0444d3..3e2e24e4843fdbf94ef4d4484a4f05ef1f3bb21a 100644 (file)
@@ -238,7 +238,6 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
 {
        struct ath9k_nfcal_hist *h = NULL;
        unsigned i, j;
-       int32_t val;
        u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
        struct ath_common *common = ath9k_hw_common(ah);
        s16 default_nf = ath9k_hw_get_default_nf(ah, chan);
@@ -246,6 +245,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
        if (ah->caldata)
                h = ah->caldata->nfCalHist;
 
+       ENABLE_REG_RMW_BUFFER(ah);
        for (i = 0; i < NUM_NF_READINGS; i++) {
                if (chainmask & (1 << i)) {
                        s16 nfval;
@@ -258,10 +258,8 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
                        else
                                nfval = default_nf;
 
-                       val = REG_READ(ah, ah->nf_regs[i]);
-                       val &= 0xFFFFFE00;
-                       val |= (((u32) nfval << 1) & 0x1ff);
-                       REG_WRITE(ah, ah->nf_regs[i], val);
+                       REG_RMW(ah, ah->nf_regs[i],
+                               (((u32) nfval << 1) & 0x1ff), 0x1ff);
                }
        }
 
@@ -274,6 +272,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
        REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
                    AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
        REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
+       REG_RMW_BUFFER_FLUSH(ah);
 
        /*
         * Wait for load to complete, should be fast, a few 10s of us.
@@ -309,19 +308,17 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
         * by the median we just loaded.  This will be initial (and max) value
         * of next noise floor calibration the baseband does.
         */
-       ENABLE_REGWRITE_BUFFER(ah);
+       ENABLE_REG_RMW_BUFFER(ah);
        for (i = 0; i < NUM_NF_READINGS; i++) {
                if (chainmask & (1 << i)) {
                        if ((i >= AR5416_MAX_CHAINS) && !IS_CHAN_HT40(chan))
                                continue;
 
-                       val = REG_READ(ah, ah->nf_regs[i]);
-                       val &= 0xFFFFFE00;
-                       val |= (((u32) (-50) << 1) & 0x1ff);
-                       REG_WRITE(ah, ah->nf_regs[i], val);
+                       REG_RMW(ah, ah->nf_regs[i],
+                                       (((u32) (-50) << 1) & 0x1ff), 0x1ff);
                }
        }
-       REGWRITE_BUFFER_FLUSH(ah);
+       REG_RMW_BUFFER_FLUSH(ah);
 
        return 0;
 }
index 2b79a568e8032c1fbc33a0fb550a5da6140f2865..d23737342f4fe7b311c84a7d556cad6e94ecf1f1 100644 (file)
@@ -54,7 +54,7 @@ struct ath_beacon_config {
        u16 dtim_period;
        u16 bmiss_timeout;
        u8 dtim_count;
-       bool enable_beacon;
+       u8 enable_beacon;
        bool ibss_creator;
        u32 nexttbtt;
        u32 intval;
index 726271c7c3306e8a9255fd645bcbe0d3c6d8fb3e..e98a9eaba7ff3f1b84a85945e63e901c8a216207 100644 (file)
@@ -126,8 +126,19 @@ ath9k_postprocess_radar_event(struct ath_softc *sc,
        DFS_STAT_INC(sc, pulses_detected);
        return true;
 }
-#undef PRI_CH_RADAR_FOUND
-#undef EXT_CH_RADAR_FOUND
+
+static void
+ath9k_dfs_process_radar_pulse(struct ath_softc *sc, struct pulse_event *pe)
+{
+       struct dfs_pattern_detector *pd = sc->dfs_detector;
+       DFS_STAT_INC(sc, pulses_processed);
+       if (pd == NULL)
+               return;
+       if (!pd->add_pulse(pd, pe))
+               return;
+       DFS_STAT_INC(sc, radar_detected);
+       ieee80211_radar_detected(sc->hw);
+}
 
 /*
  * DFS: check PHY-error for radar pulse and feed the detector
@@ -176,18 +187,21 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
        ard.pulse_length_pri = vdata_end[-3];
        pe.freq = ah->curchan->channel;
        pe.ts = mactime;
-       if (ath9k_postprocess_radar_event(sc, &ard, &pe)) {
-               struct dfs_pattern_detector *pd = sc->dfs_detector;
-               ath_dbg(common, DFS,
-                       "ath9k_dfs_process_phyerr: channel=%d, ts=%llu, "
-                       "width=%d, rssi=%d, delta_ts=%llu\n",
-                       pe.freq, pe.ts, pe.width, pe.rssi,
-                       pe.ts - sc->dfs_prev_pulse_ts);
-               sc->dfs_prev_pulse_ts = pe.ts;
-               DFS_STAT_INC(sc, pulses_processed);
-               if (pd != NULL && pd->add_pulse(pd, &pe)) {
-                       DFS_STAT_INC(sc, radar_detected);
-                       ieee80211_radar_detected(sc->hw);
-               }
+       if (!ath9k_postprocess_radar_event(sc, &ard, &pe))
+               return;
+
+       ath_dbg(common, DFS,
+               "ath9k_dfs_process_phyerr: type=%d, freq=%d, ts=%llu, "
+               "width=%d, rssi=%d, delta_ts=%llu\n",
+               ard.pulse_bw_info, pe.freq, pe.ts, pe.width, pe.rssi,
+               pe.ts - sc->dfs_prev_pulse_ts);
+       sc->dfs_prev_pulse_ts = pe.ts;
+       if (ard.pulse_bw_info & PRI_CH_RADAR_FOUND)
+               ath9k_dfs_process_radar_pulse(sc, &pe);
+       if (ard.pulse_bw_info & EXT_CH_RADAR_FOUND) {
+               pe.freq += IS_CHAN_HT40PLUS(ah->curchan) ? 20 : -20;
+               ath9k_dfs_process_radar_pulse(sc, &pe);
        }
 }
+#undef PRI_CH_RADAR_FOUND
+#undef EXT_CH_RADAR_FOUND
index 971d770722cf239bde42cdfedc5fe0fa6c52c7dc..cc81482c934d61e6a40c3b181435348040c93cd2 100644 (file)
@@ -27,12 +27,7 @@ void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val)
 void ath9k_hw_analog_shift_rmw(struct ath_hw *ah, u32 reg, u32 mask,
                               u32 shift, u32 val)
 {
-       u32 regVal;
-
-       regVal = REG_READ(ah, reg) & ~mask;
-       regVal |= (val << shift) & mask;
-
-       REG_WRITE(ah, reg, regVal);
+       REG_RMW(ah, reg, ((val << shift) & mask), mask);
 
        if (ah->config.analog_shiftreg)
                udelay(100);
index e5a78d4fd66e570765a0ac5fd020be9115894009..4773da6dc6f2d7d96b8e8a51e7ca252737cfe88b 100644 (file)
@@ -389,6 +389,7 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
                }
        }
 
+       ENABLE_REG_RMW_BUFFER(ah);
        REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_NUM_PD_GAIN,
                      (numXpdGain - 1) & 0x3);
        REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_1,
@@ -396,6 +397,7 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
        REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_2,
                      xpdGainValues[1]);
        REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_3, 0);
+       REG_RMW_BUFFER_FLUSH(ah);
 
        for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) {
                regChainOffset = i * 0x1000;
@@ -770,15 +772,14 @@ static void ath9k_hw_4k_set_gain(struct ath_hw *ah,
                                 struct ar5416_eeprom_4k *eep,
                                 u8 txRxAttenLocal)
 {
-       REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0,
-                 pModal->antCtrlChain[0]);
+       ENABLE_REG_RMW_BUFFER(ah);
+       REG_RMW(ah, AR_PHY_SWITCH_CHAIN_0,
+               pModal->antCtrlChain[0], 0);
 
-       REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0),
-                 (REG_READ(ah, AR_PHY_TIMING_CTRL4(0)) &
-                  ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF |
-                    AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) |
-                 SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) |
-                 SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF));
+       REG_RMW(ah, AR_PHY_TIMING_CTRL4(0),
+               SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) |
+               SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF),
+               AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF | AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF);
 
        if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
            AR5416_EEP_MINOR_VER_3) {
@@ -817,6 +818,7 @@ static void ath9k_hw_4k_set_gain(struct ath_hw *ah,
                      AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal);
        REG_RMW_FIELD(ah, AR_PHY_RXGAIN + 0x1000,
                      AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]);
+       REG_RMW_BUFFER_FLUSH(ah);
 }
 
 /*
@@ -928,6 +930,7 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
                }
        }
 
+       ENABLE_REG_RMW_BUFFER(ah);
        if (AR_SREV_9271(ah)) {
                ath9k_hw_analog_shift_rmw(ah,
                                          AR9285_AN_RF2G3,
@@ -1032,18 +1035,19 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
                                          AR9285_AN_RF2G4_DB2_4_S,
                                          db2[4]);
        }
+       REG_RMW_BUFFER_FLUSH(ah);
 
-
+       ENABLE_REG_RMW_BUFFER(ah);
        REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH,
                      pModal->switchSettling);
        REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_ADC,
                      pModal->adcDesiredSize);
 
-       REG_WRITE(ah, AR_PHY_RF_CTL4,
-                 SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF) |
-                 SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAB_OFF) |
-                 SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAA_ON)  |
-                 SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAB_ON));
+       REG_RMW(ah, AR_PHY_RF_CTL4,
+               SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF) |
+               SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAB_OFF) |
+               SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAA_ON)  |
+               SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAB_ON), 0);
 
        REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON,
                      pModal->txEndToRxOn);
@@ -1072,6 +1076,8 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
                                      pModal->swSettleHt40);
        }
 
+       REG_RMW_BUFFER_FLUSH(ah);
+
        bb_desired_scale = (pModal->bb_scale_smrt_antenna &
                        EEP_4K_BB_DESIRED_SCALE_MASK);
        if ((pBase->txGainType == 0) && (bb_desired_scale != 0)) {
@@ -1080,6 +1086,7 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
                mask = BIT(0)|BIT(5)|BIT(10)|BIT(15)|BIT(20)|BIT(25);
                pwrctrl = mask * bb_desired_scale;
                clr = mask * 0x1f;
+               ENABLE_REG_RMW_BUFFER(ah);
                REG_RMW(ah, AR_PHY_TX_PWRCTRL8, pwrctrl, clr);
                REG_RMW(ah, AR_PHY_TX_PWRCTRL10, pwrctrl, clr);
                REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL12, pwrctrl, clr);
@@ -1094,6 +1101,7 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
                clr = mask * 0x1f;
                REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL11, pwrctrl, clr);
                REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL13, pwrctrl, clr);
+               REG_RMW_BUFFER_FLUSH(ah);
        }
 }
 
index 098059039351fb065cafcb252a5c91b1e071d43b..056f516bf017629e4be0ced579140acca5355868 100644 (file)
@@ -466,6 +466,7 @@ static void ath9k_hw_def_set_gain(struct ath_hw *ah,
                                  struct ar5416_eeprom_def *eep,
                                  u8 txRxAttenLocal, int regChainOffset, int i)
 {
+       ENABLE_REG_RMW_BUFFER(ah);
        if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) {
                txRxAttenLocal = pModal->txRxAttenCh[i];
 
@@ -483,16 +484,12 @@ static void ath9k_hw_def_set_gain(struct ath_hw *ah,
                              AR_PHY_GAIN_2GHZ_XATTEN2_DB,
                              pModal->xatten2Db[i]);
                } else {
-                       REG_WRITE(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
-                         (REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) &
-                          ~AR_PHY_GAIN_2GHZ_BSW_MARGIN)
-                         | SM(pModal-> bswMargin[i],
-                              AR_PHY_GAIN_2GHZ_BSW_MARGIN));
-                       REG_WRITE(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
-                         (REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) &
-                          ~AR_PHY_GAIN_2GHZ_BSW_ATTEN)
-                         | SM(pModal->bswAtten[i],
-                              AR_PHY_GAIN_2GHZ_BSW_ATTEN));
+                       REG_RMW(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
+                               SM(pModal-> bswMargin[i], AR_PHY_GAIN_2GHZ_BSW_MARGIN),
+                               AR_PHY_GAIN_2GHZ_BSW_MARGIN);
+                       REG_RMW(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
+                               SM(pModal->bswAtten[i], AR_PHY_GAIN_2GHZ_BSW_ATTEN),
+                               AR_PHY_GAIN_2GHZ_BSW_ATTEN);
                }
        }
 
@@ -504,17 +501,14 @@ static void ath9k_hw_def_set_gain(struct ath_hw *ah,
                      AR_PHY_RXGAIN + regChainOffset,
                      AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[i]);
        } else {
-               REG_WRITE(ah,
-                         AR_PHY_RXGAIN + regChainOffset,
-                         (REG_READ(ah, AR_PHY_RXGAIN + regChainOffset) &
-                          ~AR_PHY_RXGAIN_TXRX_ATTEN)
-                         | SM(txRxAttenLocal, AR_PHY_RXGAIN_TXRX_ATTEN));
-               REG_WRITE(ah,
-                         AR_PHY_GAIN_2GHZ + regChainOffset,
-                         (REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) &
-                          ~AR_PHY_GAIN_2GHZ_RXTX_MARGIN) |
-                         SM(pModal->rxTxMarginCh[i], AR_PHY_GAIN_2GHZ_RXTX_MARGIN));
+               REG_RMW(ah, AR_PHY_RXGAIN + regChainOffset,
+                       SM(txRxAttenLocal, AR_PHY_RXGAIN_TXRX_ATTEN),
+                       AR_PHY_RXGAIN_TXRX_ATTEN);
+               REG_RMW(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
+                       SM(pModal->rxTxMarginCh[i], AR_PHY_GAIN_2GHZ_RXTX_MARGIN),
+                       AR_PHY_GAIN_2GHZ_RXTX_MARGIN);
        }
+       REG_RMW_BUFFER_FLUSH(ah);
 }
 
 static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
index 86d46c196966f0e65b3c963011b124318f592f8b..284706798c71deda9ecc8a400bcff6e26b8999c5 100644 (file)
@@ -69,9 +69,15 @@ void ath_fill_led_pin(struct ath_softc *sc)
 {
        struct ath_hw *ah = sc->sc_ah;
 
-       if (AR_SREV_9100(ah) || (ah->led_pin >= 0))
+       if (AR_SREV_9100(ah))
                return;
 
+       if (ah->led_pin >= 0) {
+               if (!((1 << ah->led_pin) & AR_GPIO_OE_OUT_MASK))
+                       ath9k_hw_request_gpio(ah, ah->led_pin, "ath9k-led");
+               return;
+       }
+
        if (AR_SREV_9287(ah))
                ah->led_pin = ATH_LED_PIN_9287;
        else if (AR_SREV_9485(sc->sc_ah))
index 300d3671d0ef820dc99cb5c202cd7945437a13b6..e82a0d4ce23f99247ea540be2a00dd4c26f9bd90 100644 (file)
@@ -444,6 +444,10 @@ static inline void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv)
 #define OP_BT_SCAN                 BIT(4)
 #define OP_TSF_RESET               BIT(6)
 
+enum htc_op_flags {
+       HTC_FWFLAG_NO_RMW,
+};
+
 struct ath9k_htc_priv {
        struct device *dev;
        struct ieee80211_hw *hw;
@@ -482,6 +486,7 @@ struct ath9k_htc_priv {
        bool reconfig_beacon;
        unsigned int rxfilter;
        unsigned long op_flags;
+       unsigned long fw_flags;
 
        struct ath9k_hw_cal_data caldata;
        struct ath_spec_scan_priv spec_priv;
index fd229409f6762249061f1215a5c4e635de3ec9ae..d7beefe60683df8bd22b134e6a4418d9f19e5bbd 100644 (file)
@@ -376,17 +376,139 @@ static void ath9k_regwrite_flush(void *hw_priv)
        mutex_unlock(&priv->wmi->multi_write_mutex);
 }
 
-static u32 ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
+static void ath9k_reg_rmw_buffer(void *hw_priv,
+                                u32 reg_offset, u32 set, u32 clr)
+{
+       struct ath_hw *ah = (struct ath_hw *) hw_priv;
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+       u32 rsp_status;
+       int r;
+
+       mutex_lock(&priv->wmi->multi_rmw_mutex);
+
+       /* Store the register/value */
+       priv->wmi->multi_rmw[priv->wmi->multi_rmw_idx].reg =
+               cpu_to_be32(reg_offset);
+       priv->wmi->multi_rmw[priv->wmi->multi_rmw_idx].set =
+               cpu_to_be32(set);
+       priv->wmi->multi_rmw[priv->wmi->multi_rmw_idx].clr =
+               cpu_to_be32(clr);
+
+       priv->wmi->multi_rmw_idx++;
+
+       /* If the buffer is full, send it out. */
+       if (priv->wmi->multi_rmw_idx == MAX_RMW_CMD_NUMBER) {
+               r = ath9k_wmi_cmd(priv->wmi, WMI_REG_RMW_CMDID,
+                         (u8 *) &priv->wmi->multi_rmw,
+                         sizeof(struct register_write) * priv->wmi->multi_rmw_idx,
+                         (u8 *) &rsp_status, sizeof(rsp_status),
+                         100);
+               if (unlikely(r)) {
+                       ath_dbg(common, WMI,
+                               "REGISTER RMW FAILED, multi len: %d\n",
+                               priv->wmi->multi_rmw_idx);
+               }
+               priv->wmi->multi_rmw_idx = 0;
+       }
+
+       mutex_unlock(&priv->wmi->multi_rmw_mutex);
+}
+
+static void ath9k_reg_rmw_flush(void *hw_priv)
 {
-       u32 val;
+       struct ath_hw *ah = (struct ath_hw *) hw_priv;
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+       u32 rsp_status;
+       int r;
+
+       if (test_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags))
+               return;
+
+       atomic_dec(&priv->wmi->m_rmw_cnt);
 
-       val = ath9k_regread(hw_priv, reg_offset);
-       val &= ~clr;
-       val |= set;
-       ath9k_regwrite(hw_priv, val, reg_offset);
+       mutex_lock(&priv->wmi->multi_rmw_mutex);
+
+       if (priv->wmi->multi_rmw_idx) {
+               r = ath9k_wmi_cmd(priv->wmi, WMI_REG_RMW_CMDID,
+                         (u8 *) &priv->wmi->multi_rmw,
+                         sizeof(struct register_rmw) * priv->wmi->multi_rmw_idx,
+                         (u8 *) &rsp_status, sizeof(rsp_status),
+                         100);
+               if (unlikely(r)) {
+                       ath_dbg(common, WMI,
+                               "REGISTER RMW FAILED, multi len: %d\n",
+                               priv->wmi->multi_rmw_idx);
+               }
+               priv->wmi->multi_rmw_idx = 0;
+       }
+
+       mutex_unlock(&priv->wmi->multi_rmw_mutex);
+}
+
+static void ath9k_enable_rmw_buffer(void *hw_priv)
+{
+       struct ath_hw *ah = (struct ath_hw *) hw_priv;
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+       if (test_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags))
+               return;
+
+       atomic_inc(&priv->wmi->m_rmw_cnt);
+}
+
+static u32 ath9k_reg_rmw_single(void *hw_priv,
+                                u32 reg_offset, u32 set, u32 clr)
+{
+       struct ath_hw *ah = (struct ath_hw *) hw_priv;
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+       struct register_rmw buf, buf_ret;
+       int ret;
+       u32 val = 0;
+
+       buf.reg = cpu_to_be32(reg_offset);
+       buf.set = cpu_to_be32(set);
+       buf.clr = cpu_to_be32(clr);
+
+       ret = ath9k_wmi_cmd(priv->wmi, WMI_REG_RMW_CMDID,
+                         (u8 *) &buf, sizeof(buf),
+                         (u8 *) &buf_ret, sizeof(buf_ret),
+                         100);
+       if (unlikely(ret)) {
+               ath_dbg(common, WMI, "REGISTER RMW FAILED:(0x%04x, %d)\n",
+                       reg_offset, ret);
+       }
        return val;
 }
 
+static u32 ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
+{
+       struct ath_hw *ah = (struct ath_hw *) hw_priv;
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+       if (test_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags)) {
+               u32 val;
+
+               val = REG_READ(ah, reg_offset);
+               val &= ~clr;
+               val |= set;
+               REG_WRITE(ah, reg_offset, val);
+
+               return 0;
+       }
+
+       if (atomic_read(&priv->wmi->m_rmw_cnt))
+               ath9k_reg_rmw_buffer(hw_priv, reg_offset, set, clr);
+       else
+               ath9k_reg_rmw_single(hw_priv, reg_offset, set, clr);
+
+       return 0;
+}
+
 static void ath_usb_read_cachesize(struct ath_common *common, int *csz)
 {
        *csz = L1_CACHE_BYTES >> 2;
@@ -501,6 +623,8 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
        ah->reg_ops.write = ath9k_regwrite;
        ah->reg_ops.enable_write_buffer = ath9k_enable_regwrite_buffer;
        ah->reg_ops.write_flush = ath9k_regwrite_flush;
+       ah->reg_ops.enable_rmw_buffer = ath9k_enable_rmw_buffer;
+       ah->reg_ops.rmw_flush = ath9k_reg_rmw_flush;
        ah->reg_ops.rmw = ath9k_reg_rmw;
        priv->ah = ah;
 
@@ -686,6 +810,12 @@ static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv)
                return -EINVAL;
        }
 
+       if (priv->fw_version_major == 1 && priv->fw_version_minor < 4)
+               set_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags);
+
+       dev_info(priv->dev, "FW RMW support: %s\n",
+               test_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags) ? "Off" : "On");
+
        return 0;
 }
 
index 88769b64b20b29d9f9cb71c6ffbca27cc3d314d4..232339b0554020f0875b8bc4bb9d255b66413e61 100644 (file)
@@ -108,6 +108,14 @@ static inline void ath9k_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
                ath9k_hw_ops(ah)->set_bt_ant_diversity(ah, enable);
 }
 
+static inline bool ath9k_hw_is_aic_enabled(struct ath_hw *ah)
+{
+       if (ath9k_hw_private_ops(ah)->is_aic_enabled)
+               return ath9k_hw_private_ops(ah)->is_aic_enabled(ah);
+
+       return false;
+}
+
 #endif
 
 /* Private hardware call ops */
index 60aa8d71e753fa936909dcc98ef915c2a2208f60..5e15e8e10ed39f0b605783176fe260faa387d083 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/time.h>
 #include <linux/bitops.h>
 #include <linux/etherdevice.h>
+#include <linux/gpio.h>
 #include <asm/unaligned.h>
 
 #include "hw.h"
@@ -121,6 +122,36 @@ void ath9k_hw_write_array(struct ath_hw *ah, const struct ar5416IniArray *array,
        REGWRITE_BUFFER_FLUSH(ah);
 }
 
+void ath9k_hw_read_array(struct ath_hw *ah, u32 array[][2], int size)
+{
+       u32 *tmp_reg_list, *tmp_data;
+       int i;
+
+       tmp_reg_list = kmalloc(size * sizeof(u32), GFP_KERNEL);
+       if (!tmp_reg_list) {
+               dev_err(ah->dev, "%s: tmp_reg_list: alloc filed\n", __func__);
+               return;
+       }
+
+       tmp_data = kmalloc(size * sizeof(u32), GFP_KERNEL);
+       if (!tmp_data) {
+               dev_err(ah->dev, "%s tmp_data: alloc filed\n", __func__);
+               goto error_tmp_data;
+       }
+
+       for (i = 0; i < size; i++)
+               tmp_reg_list[i] = array[i][0];
+
+       REG_READ_MULTI(ah, tmp_reg_list, tmp_data, size);
+
+       for (i = 0; i < size; i++)
+               array[i][1] = tmp_data[i];
+
+       kfree(tmp_data);
+error_tmp_data:
+       kfree(tmp_reg_list);
+}
+
 u32 ath9k_hw_reverse_bits(u32 val, u32 n)
 {
        u32 retval;
@@ -366,6 +397,9 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
                ah->config.rimt_first = 700;
        }
 
+       if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
+               ah->config.pll_pwrsave = 7;
+
        /*
         * We need this for PCI devices only (Cardbus, PCI, miniPCI)
         * _and_ if on non-uniprocessor systems (Multiprocessor/HT).
@@ -424,7 +458,7 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
        ah->power_mode = ATH9K_PM_UNDEFINED;
        ah->htc_reset_init = true;
 
-       ah->tpc_enabled = true;
+       ah->tpc_enabled = false;
 
        ah->ani_function = ATH9K_ANI_ALL;
        if (!AR_SREV_9300_20_OR_LATER(ah))
@@ -1197,6 +1231,7 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
        u32 mask = AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC;
        u32 set = AR_STA_ID1_KSRCH_MODE;
 
+       ENABLE_REG_RMW_BUFFER(ah);
        switch (opmode) {
        case NL80211_IFTYPE_ADHOC:
                if (!AR_SREV_9340_13(ah)) {
@@ -1218,6 +1253,7 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
                break;
        }
        REG_RMW(ah, AR_STA_ID1, set, mask);
+       REG_RMW_BUFFER_FLUSH(ah);
 }
 
 void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
@@ -1930,6 +1966,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
        if (!ath9k_hw_mci_is_enabled(ah))
                REG_WRITE(ah, AR_OBS, 8);
 
+       ENABLE_REG_RMW_BUFFER(ah);
        if (ah->config.rx_intr_mitigation) {
                REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, ah->config.rimt_last);
                REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, ah->config.rimt_first);
@@ -1939,6 +1976,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
                REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_LAST, 300);
                REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_FIRST, 750);
        }
+       REG_RMW_BUFFER_FLUSH(ah);
 
        ath9k_hw_init_bb(ah, chan);
 
@@ -2674,11 +2712,23 @@ void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
        if (AR_SREV_9271(ah))
                val = ~val;
 
-       REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio),
-               AR_GPIO_BIT(gpio));
+       if ((1 << gpio) & AR_GPIO_OE_OUT_MASK)
+               REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio),
+                       AR_GPIO_BIT(gpio));
+       else
+               gpio_set_value(gpio, val & 1);
 }
 EXPORT_SYMBOL(ath9k_hw_set_gpio);
 
+void ath9k_hw_request_gpio(struct ath_hw *ah, u32 gpio, const char *label)
+{
+       if (gpio >= ah->caps.num_gpio_pins)
+               return;
+
+       gpio_request_one(gpio, GPIOF_DIR_OUT | GPIOF_INIT_LOW, label);
+}
+EXPORT_SYMBOL(ath9k_hw_request_gpio);
+
 void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna)
 {
        REG_WRITE(ah, AR_DEF_ANTENNA, (antenna & 0x7));
index 29a25d92add7453161d51eca5c6bb99b802328f3..c1d2d0340febadb445bed891754f0565025f8f09 100644 (file)
                        (_ah)->reg_ops.write_flush((_ah));      \
        } while (0)
 
+#define ENABLE_REG_RMW_BUFFER(_ah)                                     \
+       do {                                                            \
+               if ((_ah)->reg_ops.enable_rmw_buffer)   \
+                       (_ah)->reg_ops.enable_rmw_buffer((_ah)); \
+       } while (0)
+
+#define REG_RMW_BUFFER_FLUSH(_ah)                                      \
+       do {                                                            \
+               if ((_ah)->reg_ops.rmw_flush)           \
+                       (_ah)->reg_ops.rmw_flush((_ah));        \
+       } while (0)
+
 #define PR_EEP(_s, _val)                                               \
        do {                                                            \
                len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
 
 #define REG_WRITE_ARRAY(iniarray, column, regWr) \
        ath9k_hw_write_array(ah, iniarray, column, &(regWr))
+#define REG_READ_ARRAY(ah, array, size) \
+       ath9k_hw_read_array(ah, array, size)
 
 #define AR_GPIO_OUTPUT_MUX_AS_OUTPUT             0
 #define AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED 1
@@ -309,6 +323,12 @@ enum ath9k_hw_hang_checks {
        HW_MAC_HANG               = BIT(5),
 };
 
+#define AR_PCIE_PLL_PWRSAVE_CONTROL BIT(0)
+#define AR_PCIE_PLL_PWRSAVE_ON_D3   BIT(1)
+#define AR_PCIE_PLL_PWRSAVE_ON_D0   BIT(2)
+#define AR_PCIE_CDR_PWRSAVE_ON_D3   BIT(3)
+#define AR_PCIE_CDR_PWRSAVE_ON_D0   BIT(4)
+
 struct ath9k_ops_config {
        int dma_beacon_response_time;
        int sw_beacon_response_time;
@@ -335,7 +355,7 @@ struct ath9k_ops_config {
        u32 ant_ctrl_comm2g_switch_enable;
        bool xatten_margin_cfg;
        bool alt_mingainidx;
-       bool no_pll_pwrsave;
+       u8 pll_pwrsave;
        bool tx_gain_buffalo;
        bool led_active_high;
 };
@@ -647,6 +667,10 @@ struct ath_hw_private_ops {
 
        /* ANI */
        void (*ani_cache_ini_regs)(struct ath_hw *ah);
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+       bool (*is_aic_enabled)(struct ath_hw *ah);
+#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
 };
 
 /**
@@ -1000,6 +1024,7 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio);
 void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
                         u32 ah_signal_type);
 void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val);
+void ath9k_hw_request_gpio(struct ath_hw *ah, u32 gpio, const char *label);
 void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna);
 
 /* General Operation */
@@ -1008,6 +1033,7 @@ void ath9k_hw_synth_delay(struct ath_hw *ah, struct ath9k_channel *chan,
 bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout);
 void ath9k_hw_write_array(struct ath_hw *ah, const struct ar5416IniArray *array,
                          int column, unsigned int *writecnt);
+void ath9k_hw_read_array(struct ath_hw *ah, u32 array[][2], int size);
 u32 ath9k_hw_reverse_bits(u32 val, u32 n);
 u16 ath9k_hw_computetxtime(struct ath_hw *ah,
                           u8 phy, int kbps,
@@ -1117,6 +1143,7 @@ void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us);
 void ath9k_hw_setslottime(struct ath_hw *ah, u32 us);
 
 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+void ar9003_hw_attach_aic_ops(struct ath_hw *ah);
 static inline bool ath9k_hw_btcoex_is_enabled(struct ath_hw *ah)
 {
        return ah->btcoex_hw.enabled;
@@ -1134,6 +1161,9 @@ ath9k_hw_get_btcoex_scheme(struct ath_hw *ah)
        return ah->btcoex_hw.scheme;
 }
 #else
+static inline void ar9003_hw_attach_aic_ops(struct ath_hw *ah)
+{
+}
 static inline bool ath9k_hw_btcoex_is_enabled(struct ath_hw *ah)
 {
        return false;
index 6c6e88495394e1897edb4db3768bd6de8aee24e7..f8d11efa7b0f1fd7fef0da1fce74ea4ada23f0ee 100644 (file)
@@ -141,6 +141,16 @@ static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
        return val;
 }
 
+static void ath9k_multi_ioread32(void *hw_priv, u32 *addr,
+                                u32 *val, u16 count)
+{
+       int i;
+
+       for (i = 0; i < count; i++)
+               val[i] = ath9k_ioread32(hw_priv, addr[i]);
+}
+
+
 static unsigned int __ath9k_reg_rmw(struct ath_softc *sc, u32 reg_offset,
                                    u32 set, u32 clr)
 {
@@ -437,8 +447,15 @@ static void ath9k_init_pcoem_platform(struct ath_softc *sc)
                ath_info(common, "Enable WAR for ASPM D3/L1\n");
        }
 
+       /*
+        * The default value of pll_pwrsave is 1.
+        * For certain AR9485 cards, it is set to 0.
+        * For AR9462, AR9565 it's set to 7.
+        */
+       ah->config.pll_pwrsave = 1;
+
        if (sc->driver_data & ATH9K_PCI_NO_PLL_PWRSAVE) {
-               ah->config.no_pll_pwrsave = true;
+               ah->config.pll_pwrsave = 0;
                ath_info(common, "Disable PLL PowerSave\n");
        }
 
@@ -530,6 +547,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
        ah->hw = sc->hw;
        ah->hw_version.devid = devid;
        ah->reg_ops.read = ath9k_ioread32;
+       ah->reg_ops.multi_read = ath9k_multi_ioread32;
        ah->reg_ops.write = ath9k_iowrite32;
        ah->reg_ops.rmw = ath9k_reg_rmw;
        pCap = &ah->caps;
@@ -763,7 +781,8 @@ static const struct ieee80211_iface_combination if_comb[] = {
                .num_different_channels = 1,
                .beacon_int_infra_match = true,
                .radar_detect_widths =  BIT(NL80211_CHAN_WIDTH_20_NOHT) |
-                                       BIT(NL80211_CHAN_WIDTH_20),
+                                       BIT(NL80211_CHAN_WIDTH_20) |
+                                       BIT(NL80211_CHAN_WIDTH_40),
        }
 #endif
 };
index 1234399a43dd78692a52507a78185729a219a329..caba54ddad25c3dc4d7abd932e48602c4c885b94 100644 (file)
 
 #define AR_SREV_9550(_ah) \
        (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9550))
+#define AR_SREV_9550_OR_LATER(_ah) \
+       (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9550))
 
 #define AR_SREV_9580(_ah) \
        (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9580) && \
@@ -1128,6 +1130,8 @@ enum {
 
 #define AR_GPIO_OE_OUT                           (AR_SREV_9340(ah) ? 0x4030 : \
                                                  (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c))
+#define AR_GPIO_OE_OUT_MASK                     (AR_SREV_9550_OR_LATER(ah) ? \
+                                                 0x0000000F : 0xFFFFFFFF)
 #define AR_GPIO_OE_OUT_DRV                       0x3
 #define AR_GPIO_OE_OUT_DRV_NO                    0x0
 #define AR_GPIO_OE_OUT_DRV_LOW                   0x1
diff --git a/drivers/net/wireless/ath/ath9k/reg_aic.h b/drivers/net/wireless/ath/ath9k/reg_aic.h
new file mode 100644 (file)
index 0000000..955147a
--- /dev/null
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef REG_AIC_H
+#define REG_AIC_H
+
+#define AR_SM_BASE                              0xa200
+#define AR_SM1_BASE                             0xb200
+#define AR_AGC_BASE                             0x9e00
+
+#define AR_PHY_AIC_CTRL_0_B0                    (AR_SM_BASE + 0x4b0)
+#define AR_PHY_AIC_CTRL_1_B0                    (AR_SM_BASE + 0x4b4)
+#define AR_PHY_AIC_CTRL_2_B0                    (AR_SM_BASE + 0x4b8)
+#define AR_PHY_AIC_CTRL_3_B0                    (AR_SM_BASE + 0x4bc)
+#define AR_PHY_AIC_CTRL_4_B0                    (AR_SM_BASE + 0x4c0)
+
+#define AR_PHY_AIC_STAT_0_B0                    (AR_SM_BASE + 0x4c4)
+#define AR_PHY_AIC_STAT_1_B0                    (AR_SM_BASE + 0x4c8)
+#define AR_PHY_AIC_STAT_2_B0                    (AR_SM_BASE + 0x4cc)
+
+#define AR_PHY_AIC_CTRL_0_B1                    (AR_SM1_BASE + 0x4b0)
+#define AR_PHY_AIC_CTRL_1_B1                    (AR_SM1_BASE + 0x4b4)
+#define AR_PHY_AIC_CTRL_4_B1                    (AR_SM1_BASE + 0x4c0)
+
+#define AR_PHY_AIC_STAT_0_B1                    (AR_SM1_BASE + 0x4c4)
+#define AR_PHY_AIC_STAT_1_B1                    (AR_SM1_BASE + 0x4c8)
+#define AR_PHY_AIC_STAT_2_B1                    (AR_SM1_BASE + 0x4cc)
+
+#define AR_PHY_AIC_SRAM_ADDR_B0                 (AR_SM_BASE + 0x5f0)
+#define AR_PHY_AIC_SRAM_DATA_B0                 (AR_SM_BASE + 0x5f4)
+
+#define AR_PHY_AIC_SRAM_ADDR_B1                 (AR_SM1_BASE + 0x5f0)
+#define AR_PHY_AIC_SRAM_DATA_B1                 (AR_SM1_BASE + 0x5f4)
+
+#define AR_PHY_BT_COEX_4                        (AR_AGC_BASE + 0x60)
+#define AR_PHY_BT_COEX_5                        (AR_AGC_BASE + 0x64)
+
+/* AIC fields */
+#define AR_PHY_AIC_MON_ENABLE                   0x80000000
+#define AR_PHY_AIC_MON_ENABLE_S                 31
+#define AR_PHY_AIC_CAL_MAX_HOP_COUNT            0x7F000000
+#define AR_PHY_AIC_CAL_MAX_HOP_COUNT_S          24
+#define AR_PHY_AIC_CAL_MIN_VALID_COUNT          0x00FE0000
+#define AR_PHY_AIC_CAL_MIN_VALID_COUNT_S        17
+#define AR_PHY_AIC_F_WLAN                       0x0001FC00
+#define AR_PHY_AIC_F_WLAN_S                     10
+#define AR_PHY_AIC_CAL_CH_VALID_RESET           0x00000200
+#define AR_PHY_AIC_CAL_CH_VALID_RESET_S         9
+#define AR_PHY_AIC_CAL_ENABLE                   0x00000100
+#define AR_PHY_AIC_CAL_ENABLE_S                 8
+#define AR_PHY_AIC_BTTX_PWR_THR                 0x000000FE
+#define AR_PHY_AIC_BTTX_PWR_THR_S               1
+#define AR_PHY_AIC_ENABLE                       0x00000001
+#define AR_PHY_AIC_ENABLE_S                     0
+#define AR_PHY_AIC_CAL_BT_REF_DELAY             0x00F00000
+#define AR_PHY_AIC_CAL_BT_REF_DELAY_S           20
+#define AR_PHY_AIC_BT_IDLE_CFG                  0x00080000
+#define AR_PHY_AIC_BT_IDLE_CFG_S                19
+#define AR_PHY_AIC_STDBY_COND                   0x00060000
+#define AR_PHY_AIC_STDBY_COND_S                 17
+#define AR_PHY_AIC_STDBY_ROT_ATT_DB             0x0001F800
+#define AR_PHY_AIC_STDBY_ROT_ATT_DB_S           11
+#define AR_PHY_AIC_STDBY_COM_ATT_DB             0x00000700
+#define AR_PHY_AIC_STDBY_COM_ATT_DB_S           8
+#define AR_PHY_AIC_RSSI_MAX                     0x000000F0
+#define AR_PHY_AIC_RSSI_MAX_S                   4
+#define AR_PHY_AIC_RSSI_MIN                     0x0000000F
+#define AR_PHY_AIC_RSSI_MIN_S                   0
+#define AR_PHY_AIC_RADIO_DELAY                  0x7F000000
+#define AR_PHY_AIC_RADIO_DELAY_S                24
+#define AR_PHY_AIC_CAL_STEP_SIZE_CORR           0x00F00000
+#define AR_PHY_AIC_CAL_STEP_SIZE_CORR_S         20
+#define AR_PHY_AIC_CAL_ROT_IDX_CORR             0x000F8000
+#define AR_PHY_AIC_CAL_ROT_IDX_CORR_S           15
+#define AR_PHY_AIC_CAL_CONV_CHECK_FACTOR        0x00006000
+#define AR_PHY_AIC_CAL_CONV_CHECK_FACTOR_S      13
+#define AR_PHY_AIC_ROT_IDX_COUNT_MAX            0x00001C00
+#define AR_PHY_AIC_ROT_IDX_COUNT_MAX_S          10
+#define AR_PHY_AIC_CAL_SYNTH_TOGGLE             0x00000200
+#define AR_PHY_AIC_CAL_SYNTH_TOGGLE_S           9
+#define AR_PHY_AIC_CAL_SYNTH_AFTER_BTRX         0x00000100
+#define AR_PHY_AIC_CAL_SYNTH_AFTER_BTRX_S       8
+#define AR_PHY_AIC_CAL_SYNTH_SETTLING           0x000000FF
+#define AR_PHY_AIC_CAL_SYNTH_SETTLING_S         0
+#define AR_PHY_AIC_MON_MAX_HOP_COUNT            0x07F00000
+#define AR_PHY_AIC_MON_MAX_HOP_COUNT_S          20
+#define AR_PHY_AIC_MON_MIN_STALE_COUNT          0x000FE000
+#define AR_PHY_AIC_MON_MIN_STALE_COUNT_S        13
+#define AR_PHY_AIC_MON_PWR_EST_LONG             0x00001000
+#define AR_PHY_AIC_MON_PWR_EST_LONG_S           12
+#define AR_PHY_AIC_MON_PD_TALLY_SCALING         0x00000C00
+#define AR_PHY_AIC_MON_PD_TALLY_SCALING_S       10
+#define AR_PHY_AIC_MON_PERF_THR                 0x000003E0
+#define AR_PHY_AIC_MON_PERF_THR_S               5
+#define AR_PHY_AIC_CAL_TARGET_MAG_SETTING       0x00000018
+#define AR_PHY_AIC_CAL_TARGET_MAG_SETTING_S     3
+#define AR_PHY_AIC_CAL_PERF_CHECK_FACTOR        0x00000006
+#define AR_PHY_AIC_CAL_PERF_CHECK_FACTOR_S      1
+#define AR_PHY_AIC_CAL_PWR_EST_LONG             0x00000001
+#define AR_PHY_AIC_CAL_PWR_EST_LONG_S           0
+#define AR_PHY_AIC_MON_DONE                     0x80000000
+#define AR_PHY_AIC_MON_DONE_S                   31
+#define AR_PHY_AIC_MON_ACTIVE                   0x40000000
+#define AR_PHY_AIC_MON_ACTIVE_S                 30
+#define AR_PHY_AIC_MEAS_COUNT                   0x3F000000
+#define AR_PHY_AIC_MEAS_COUNT_S                 24
+#define AR_PHY_AIC_CAL_ANT_ISO_EST              0x00FC0000
+#define AR_PHY_AIC_CAL_ANT_ISO_EST_S            18
+#define AR_PHY_AIC_CAL_HOP_COUNT                0x0003F800
+#define AR_PHY_AIC_CAL_HOP_COUNT_S              11
+#define AR_PHY_AIC_CAL_VALID_COUNT              0x000007F0
+#define AR_PHY_AIC_CAL_VALID_COUNT_S            4
+#define AR_PHY_AIC_CAL_BT_TOO_WEAK_ERR          0x00000008
+#define AR_PHY_AIC_CAL_BT_TOO_WEAK_ERR_S        3
+#define AR_PHY_AIC_CAL_BT_TOO_STRONG_ERR        0x00000004
+#define AR_PHY_AIC_CAL_BT_TOO_STRONG_ERR_S      2
+#define AR_PHY_AIC_CAL_DONE                     0x00000002
+#define AR_PHY_AIC_CAL_DONE_S                   1
+#define AR_PHY_AIC_CAL_ACTIVE                   0x00000001
+#define AR_PHY_AIC_CAL_ACTIVE_S                 0
+
+#define AR_PHY_AIC_MEAS_MAG_MIN                 0xFFC00000
+#define AR_PHY_AIC_MEAS_MAG_MIN_S               22
+#define AR_PHY_AIC_MON_STALE_COUNT              0x003F8000
+#define AR_PHY_AIC_MON_STALE_COUNT_S            15
+#define AR_PHY_AIC_MON_HOP_COUNT                0x00007F00
+#define AR_PHY_AIC_MON_HOP_COUNT_S              8
+#define AR_PHY_AIC_CAL_AIC_SM                   0x000000F8
+#define AR_PHY_AIC_CAL_AIC_SM_S                 3
+#define AR_PHY_AIC_SM                           0x00000007
+#define AR_PHY_AIC_SM_S                         0
+#define AR_PHY_AIC_SRAM_VALID                   0x00000001
+#define AR_PHY_AIC_SRAM_VALID_S                 0
+#define AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB         0x0000007E
+#define AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB_S       1
+#define AR_PHY_AIC_SRAM_VGA_QUAD_SIGN           0x00000080
+#define AR_PHY_AIC_SRAM_VGA_QUAD_SIGN_S         7
+#define AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB          0x00003F00
+#define AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB_S        8
+#define AR_PHY_AIC_SRAM_VGA_DIR_SIGN            0x00004000
+#define AR_PHY_AIC_SRAM_VGA_DIR_SIGN_S          14
+#define AR_PHY_AIC_SRAM_COM_ATT_6DB             0x00038000
+#define AR_PHY_AIC_SRAM_COM_ATT_6DB_S           15
+#define AR_PHY_AIC_CAL_ROT_ATT_DB_EST_ISO       0x0000E000
+#define AR_PHY_AIC_CAL_ROT_ATT_DB_EST_ISO_S     13
+#define AR_PHY_AIC_CAL_COM_ATT_DB_EST_ISO       0x00001E00
+#define AR_PHY_AIC_CAL_COM_ATT_DB_EST_ISO_S     9
+#define AR_PHY_AIC_CAL_ISO_EST_INIT_SETTING     0x000001F8
+#define AR_PHY_AIC_CAL_ISO_EST_INIT_SETTING_S   3
+#define AR_PHY_AIC_CAL_COM_ATT_DB_BACKOFF       0x00000006
+#define AR_PHY_AIC_CAL_COM_ATT_DB_BACKOFF_S     1
+#define AR_PHY_AIC_CAL_COM_ATT_DB_FIXED         0x00000001
+#define AR_PHY_AIC_CAL_COM_ATT_DB_FIXED_S       0
+
+#endif /* REG_AIC_H */
index 65c8894c5f81040361fbdd5da9aa7823e6d38900..ca533b4321bddc9ee7621ca4e3dbbe5f03377236 100644 (file)
@@ -61,6 +61,8 @@ static const char *wmi_cmd_to_name(enum wmi_cmd_id wmi_cmd)
                return "WMI_REG_READ_CMDID";
        case WMI_REG_WRITE_CMDID:
                return "WMI_REG_WRITE_CMDID";
+       case WMI_REG_RMW_CMDID:
+               return "WMI_REG_RMW_CMDID";
        case WMI_RC_STATE_CHANGE_CMDID:
                return "WMI_RC_STATE_CHANGE_CMDID";
        case WMI_RC_RATE_UPDATE_CMDID:
@@ -101,6 +103,7 @@ struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv)
        spin_lock_init(&wmi->event_lock);
        mutex_init(&wmi->op_mutex);
        mutex_init(&wmi->multi_write_mutex);
+       mutex_init(&wmi->multi_rmw_mutex);
        init_completion(&wmi->cmd_wait);
        INIT_LIST_HEAD(&wmi->pending_tx_events);
        tasklet_init(&wmi->wmi_event_tasklet, ath9k_wmi_event_tasklet,
@@ -224,7 +227,7 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
 
        /* Check if there has been a timeout. */
        spin_lock(&wmi->wmi_lock);
-       if (cmd_id != wmi->last_cmd_id) {
+       if (be16_to_cpu(hdr->seq_no) != wmi->last_seq_id) {
                spin_unlock(&wmi->wmi_lock);
                goto free_skb;
        }
@@ -272,11 +275,16 @@ static int ath9k_wmi_cmd_issue(struct wmi *wmi,
                               enum wmi_cmd_id cmd, u16 len)
 {
        struct wmi_cmd_hdr *hdr;
+       unsigned long flags;
 
        hdr = (struct wmi_cmd_hdr *) skb_push(skb, sizeof(struct wmi_cmd_hdr));
        hdr->command_id = cpu_to_be16(cmd);
        hdr->seq_no = cpu_to_be16(++wmi->tx_seq_id);
 
+       spin_lock_irqsave(&wmi->wmi_lock, flags);
+       wmi->last_seq_id = wmi->tx_seq_id;
+       spin_unlock_irqrestore(&wmi->wmi_lock, flags);
+
        return htc_send_epid(wmi->htc, skb, wmi->ctrl_epid);
 }
 
@@ -292,7 +300,6 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
        struct sk_buff *skb;
        u8 *data;
        int time_left, ret = 0;
-       unsigned long flags;
 
        if (ah->ah_flags & AH_UNPLUGGED)
                return 0;
@@ -320,10 +327,6 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
        wmi->cmd_rsp_buf = rsp_buf;
        wmi->cmd_rsp_len = rsp_len;
 
-       spin_lock_irqsave(&wmi->wmi_lock, flags);
-       wmi->last_cmd_id = cmd_id;
-       spin_unlock_irqrestore(&wmi->wmi_lock, flags);
-
        ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len);
        if (ret)
                goto out;
index 0db37f230018ee2692ef4fcbff7cf3f0b3135e8e..380175d5ecd7a7d54e22d27fb434efa65dfcdd10 100644 (file)
@@ -112,6 +112,7 @@ enum wmi_cmd_id {
        WMI_TX_STATS_CMDID,
        WMI_RX_STATS_CMDID,
        WMI_BITRATE_MASK_CMDID,
+       WMI_REG_RMW_CMDID,
 };
 
 enum wmi_event_id {
@@ -125,12 +126,19 @@ enum wmi_event_id {
 };
 
 #define MAX_CMD_NUMBER 62
+#define MAX_RMW_CMD_NUMBER 15
 
 struct register_write {
        __be32 reg;
        __be32 val;
 };
 
+struct register_rmw {
+       __be32 reg;
+       __be32 set;
+       __be32 clr;
+} __packed;
+
 struct ath9k_htc_tx_event {
        int count;
        struct __wmi_event_txstatus txs;
@@ -143,7 +151,7 @@ struct wmi {
        enum htc_endpoint_id ctrl_epid;
        struct mutex op_mutex;
        struct completion cmd_wait;
-       enum wmi_cmd_id last_cmd_id;
+       u16 last_seq_id;
        struct sk_buff_head wmi_event_queue;
        struct tasklet_struct wmi_event_tasklet;
        u16 tx_seq_id;
@@ -156,10 +164,18 @@ struct wmi {
 
        spinlock_t wmi_lock;
 
+       /* multi write section */
        atomic_t mwrite_cnt;
        struct register_write multi_write[MAX_CMD_NUMBER];
        u32 multi_write_idx;
        struct mutex multi_write_mutex;
+
+       /* multi rmw section */
+       atomic_t m_rmw_cnt;
+       struct register_rmw multi_rmw[MAX_RMW_CMD_NUMBER];
+       u32 multi_rmw_idx;
+       struct mutex multi_rmw_mutex;
+
 };
 
 struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv);
index 3d57f877238921b979fea04c7f038f48bb0abcc9..c657ca26a71a7c8e2d75dd931ce22d87a372dd45 100644 (file)
@@ -289,7 +289,7 @@ dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event)
                                "count=%d, count_false=%d\n",
                                event->freq, pd->rs->type_id,
                                ps->pri, ps->count, ps->count_falses);
-                       channel_detector_reset(dpd, cd);
+                       pd->reset(pd, dpd->last_pulse_ts);
                        return true;
                }
        }
index 47d14db59b939b7bd18631f89ed730a676b2307a..b97172667bc7b3c5c3ea3463c9b6573811a6d064 100644 (file)
@@ -14,6 +14,7 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
+#include <linux/etherdevice.h>
 #include "wil6210.h"
 #include "wmi.h"
 
@@ -217,7 +218,7 @@ static int wil_cfg80211_dump_station(struct wiphy *wiphy,
        if (cid < 0)
                return -ENOENT;
 
-       memcpy(mac, wil->sta[cid].addr, ETH_ALEN);
+       ether_addr_copy(mac, wil->sta[cid].addr);
        wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid);
 
        rc = wil_cid_fill_sinfo(wil, cid, sinfo);
@@ -478,8 +479,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
        }
        conn.channel = ch - 1;
 
-       memcpy(conn.bssid, bss->bssid, ETH_ALEN);
-       memcpy(conn.dst_mac, bss->bssid, ETH_ALEN);
+       ether_addr_copy(conn.bssid, bss->bssid);
+       ether_addr_copy(conn.dst_mac, bss->bssid);
 
        set_bit(wil_status_fwconnecting, wil->status);
 
@@ -782,8 +783,17 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
        rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype,
                           channel->hw_value);
        if (rc)
-               netif_carrier_off(ndev);
+               goto err_pcp_start;
 
+       rc = wil_bcast_init(wil);
+       if (rc)
+               goto err_bcast;
+
+       goto out; /* success */
+err_bcast:
+       wmi_pcp_stop(wil);
+err_pcp_start:
+       netif_carrier_off(ndev);
 out:
        mutex_unlock(&wil->mutex);
        return rc;
@@ -917,6 +927,21 @@ static int wil_cfg80211_probe_client(struct wiphy *wiphy,
        return 0;
 }
 
+static int wil_cfg80211_change_bss(struct wiphy *wiphy,
+                                  struct net_device *dev,
+                                  struct bss_parameters *params)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+       if (params->ap_isolate >= 0) {
+               wil_dbg_misc(wil, "%s(ap_isolate %d => %d)\n", __func__,
+                            wil->ap_isolate, params->ap_isolate);
+               wil->ap_isolate = params->ap_isolate;
+       }
+
+       return 0;
+}
+
 static struct cfg80211_ops wil_cfg80211_ops = {
        .scan = wil_cfg80211_scan,
        .connect = wil_cfg80211_connect,
@@ -937,6 +962,7 @@ static struct cfg80211_ops wil_cfg80211_ops = {
        .stop_ap = wil_cfg80211_stop_ap,
        .del_station = wil_cfg80211_del_station,
        .probe_client = wil_cfg80211_probe_client,
+       .change_bss = wil_cfg80211_change_bss,
 };
 
 static void wil_wiphy_init(struct wiphy *wiphy)
index 3830cc20d4fa525f87a9c72f3391c8ce76501a9b..bbc22d88f78f27dadd73ea64134f5484ca9df497 100644 (file)
@@ -121,12 +121,18 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data)
 
                        snprintf(name, sizeof(name), "tx_%2d", i);
 
-                       seq_printf(s,
-                               "\n%pM CID %d TID %d BACK([%d] %d TU A%s) [%3d|%3d] idle %s\n",
-                               wil->sta[cid].addr, cid, tid,
-                               txdata->agg_wsize, txdata->agg_timeout,
-                               txdata->agg_amsdu ? "+" : "-",
-                               used, avail, sidle);
+                       if (cid < WIL6210_MAX_CID)
+                               seq_printf(s,
+                                          "\n%pM CID %d TID %d BACK([%u] %u TU A%s) [%3d|%3d] idle %s\n",
+                                          wil->sta[cid].addr, cid, tid,
+                                          txdata->agg_wsize,
+                                          txdata->agg_timeout,
+                                          txdata->agg_amsdu ? "+" : "-",
+                                          used, avail, sidle);
+                       else
+                               seq_printf(s,
+                                          "\nBroadcast [%3d|%3d] idle %s\n",
+                                          used, avail, sidle);
 
                        wil_print_vring(s, wil, name, vring, '_', 'H');
                }
@@ -1405,6 +1411,7 @@ static const struct dbg_off dbg_wil_off[] = {
        WIL_FIELD(fw_version,   S_IRUGO,                doff_u32),
        WIL_FIELD(hw_version,   S_IRUGO,                doff_x32),
        WIL_FIELD(recovery_count, S_IRUGO,              doff_u32),
+       WIL_FIELD(ap_isolate,   S_IRUGO,                doff_u32),
        {},
 };
 
index db74e811f5c424667bae5e136984750298fc92dd..c2a238426425462c7ff40f61c8e98fec9dadca6d 100644 (file)
@@ -68,6 +68,7 @@ MODULE_PARM_DESC(mtu_max, " Max MTU value.");
 
 static uint rx_ring_order = WIL_RX_RING_SIZE_ORDER_DEFAULT;
 static uint tx_ring_order = WIL_TX_RING_SIZE_ORDER_DEFAULT;
+static uint bcast_ring_order = WIL_BCAST_RING_SIZE_ORDER_DEFAULT;
 
 static int ring_order_set(const char *val, const struct kernel_param *kp)
 {
@@ -216,6 +217,7 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
        switch (wdev->iftype) {
        case NL80211_IFTYPE_STATION:
        case NL80211_IFTYPE_P2P_CLIENT:
+               wil_bcast_fini(wil);
                netif_tx_stop_all_queues(ndev);
                netif_carrier_off(ndev);
 
@@ -360,6 +362,35 @@ static int wil_find_free_vring(struct wil6210_priv *wil)
        return -EINVAL;
 }
 
+int wil_bcast_init(struct wil6210_priv *wil)
+{
+       int ri = wil->bcast_vring, rc;
+
+       if ((ri >= 0) && wil->vring_tx[ri].va)
+               return 0;
+
+       ri = wil_find_free_vring(wil);
+       if (ri < 0)
+               return ri;
+
+       rc = wil_vring_init_bcast(wil, ri, 1 << bcast_ring_order);
+       if (rc == 0)
+               wil->bcast_vring = ri;
+
+       return rc;
+}
+
+void wil_bcast_fini(struct wil6210_priv *wil)
+{
+       int ri = wil->bcast_vring;
+
+       if (ri < 0)
+               return;
+
+       wil->bcast_vring = -1;
+       wil_vring_fini_tx(wil, ri);
+}
+
 static void wil_connect_worker(struct work_struct *work)
 {
        int rc;
@@ -407,6 +438,7 @@ int wil_priv_init(struct wil6210_priv *wil)
        init_completion(&wil->wmi_call);
 
        wil->pending_connect_cid = -1;
+       wil->bcast_vring = -1;
        setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil);
        setup_timer(&wil->scan_timer, wil_scan_timer_fn, (ulong)wil);
 
@@ -656,6 +688,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 
        cancel_work_sync(&wil->disconnect_worker);
        wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
+       wil_bcast_fini(wil);
 
        /* prevent NAPI from being scheduled */
        bitmap_zero(wil->status, wil_status_last);
@@ -714,6 +747,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 
        /* init after reset */
        wil->pending_connect_cid = -1;
+       wil->ap_isolate = 0;
        reinit_completion(&wil->wmi_ready);
        reinit_completion(&wil->wmi_call);
 
@@ -723,6 +757,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 
                /* we just started MAC, wait for FW ready */
                rc = wil_wait_for_fw_ready(wil);
+               if (rc == 0) /* check FW is responsive */
+                       rc = wmi_echo(wil);
        }
 
        return rc;
index ace30c1b5c64133210e88597c183b71e9aaa5e25..f2f7ea29558e058f27d7f934ba36769db36306c2 100644 (file)
@@ -82,7 +82,7 @@ static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget)
        wil_rx_handle(wil, &quota);
        done = budget - quota;
 
-       if (done <= 1) { /* burst ends - only one packet processed */
+       if (done < budget) {
                napi_complete(napi);
                wil6210_unmask_irq_rx(wil);
                wil_dbg_txrx(wil, "NAPI RX complete\n");
@@ -110,7 +110,7 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
                tx_done += wil_tx_complete(wil, i);
        }
 
-       if (tx_done <= 1) { /* burst ends - only one packet processed */
+       if (tx_done < budget) {
                napi_complete(napi);
                wil6210_unmask_irq_tx(wil);
                wil_dbg_txrx(wil, "NAPI TX complete\n");
index 25343cffe229e08fc45f0f582202f3f9ff6bce07..109986114abfa10d6bd6083fa52fb29de2912fea 100644 (file)
@@ -246,8 +246,6 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        wil6210_debugfs_init(wil);
 
-       /* check FW is alive */
-       wmi_echo(wil);
 
        return 0;
 
index 7f2f560b86382827276cc66215aa6d83116a7fc7..e8bd512d81a9b7bbd70b7d56e65e0133e39881b0 100644 (file)
@@ -33,6 +33,15 @@ module_param(rtap_include_phy_info, bool, S_IRUGO);
 MODULE_PARM_DESC(rtap_include_phy_info,
                 " Include PHY info in the radiotap header, default - no");
 
+bool rx_align_2;
+module_param(rx_align_2, bool, S_IRUGO);
+MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
+
+static inline uint wil_rx_snaplen(void)
+{
+       return rx_align_2 ? 6 : 0;
+}
+
 static inline int wil_vring_is_empty(struct vring *vring)
 {
        return vring->swhead == vring->swtail;
@@ -209,7 +218,7 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
                               u32 i, int headroom)
 {
        struct device *dev = wil_to_dev(wil);
-       unsigned int sz = mtu_max + ETH_HLEN;
+       unsigned int sz = mtu_max + ETH_HLEN + wil_rx_snaplen();
        struct vring_rx_desc dd, *d = &dd;
        volatile struct vring_rx_desc *_d = &vring->va[i].rx;
        dma_addr_t pa;
@@ -365,10 +374,12 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
        struct vring_rx_desc *d;
        struct sk_buff *skb;
        dma_addr_t pa;
-       unsigned int sz = mtu_max + ETH_HLEN;
+       unsigned int snaplen = wil_rx_snaplen();
+       unsigned int sz = mtu_max + ETH_HLEN + snaplen;
        u16 dmalen;
        u8 ftype;
        int cid;
+       int i = (int)vring->swhead;
        struct wil_net_stats *stats;
 
        BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
@@ -376,24 +387,28 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
        if (unlikely(wil_vring_is_empty(vring)))
                return NULL;
 
-       _d = &vring->va[vring->swhead].rx;
+       _d = &vring->va[i].rx;
        if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
                /* it is not error, we just reached end of Rx done area */
                return NULL;
        }
 
-       skb = vring->ctx[vring->swhead].skb;
+       skb = vring->ctx[i].skb;
+       vring->ctx[i].skb = NULL;
+       wil_vring_advance_head(vring, 1);
+       if (!skb) {
+               wil_err(wil, "No Rx skb at [%d]\n", i);
+               return NULL;
+       }
        d = wil_skb_rxdesc(skb);
        *d = *_d;
        pa = wil_desc_addr(&d->dma.addr);
-       vring->ctx[vring->swhead].skb = NULL;
-       wil_vring_advance_head(vring, 1);
 
        dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
        dmalen = le16_to_cpu(d->dma.length);
 
-       trace_wil6210_rx(vring->swhead, d);
-       wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", vring->swhead, dmalen);
+       trace_wil6210_rx(i, d);
+       wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen);
        wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4,
                          (const void *)d, sizeof(*d), false);
 
@@ -433,7 +448,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
                return NULL;
        }
 
-       if (unlikely(skb->len < ETH_HLEN)) {
+       if (unlikely(skb->len < ETH_HLEN + snaplen)) {
                wil_err(wil, "Short frame, len = %d\n", skb->len);
                /* TODO: process it (i.e. BAR) */
                kfree_skb(skb);
@@ -455,6 +470,17 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
                 */
        }
 
+       if (snaplen) {
+               /* Packet layout
+                * +-------+-------+---------+------------+------+
+                * | SA(6) | DA(6) | SNAP(6) | ETHTYPE(2) | DATA |
+                * +-------+-------+---------+------------+------+
+                * Need to remove SNAP, shifting SA and DA forward
+                */
+               memmove(skb->data + snaplen, skb->data, 2 * ETH_ALEN);
+               skb_pull(skb, snaplen);
+       }
+
        return skb;
 }
 
@@ -492,17 +518,71 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
  */
 void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
 {
-       gro_result_t rc;
+       gro_result_t rc = GRO_NORMAL;
        struct wil6210_priv *wil = ndev_to_wil(ndev);
+       struct wireless_dev *wdev = wil_to_wdev(wil);
        unsigned int len = skb->len;
        struct vring_rx_desc *d = wil_skb_rxdesc(skb);
-       int cid = wil_rxdesc_cid(d);
+       int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
+       struct ethhdr *eth = (void *)skb->data;
+       /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
+        * is not suitable, need to look at data
+        */
+       int mcast = is_multicast_ether_addr(eth->h_dest);
        struct wil_net_stats *stats = &wil->sta[cid].stats;
+       struct sk_buff *xmit_skb = NULL;
+       static const char * const gro_res_str[] = {
+               [GRO_MERGED]            = "GRO_MERGED",
+               [GRO_MERGED_FREE]       = "GRO_MERGED_FREE",
+               [GRO_HELD]              = "GRO_HELD",
+               [GRO_NORMAL]            = "GRO_NORMAL",
+               [GRO_DROP]              = "GRO_DROP",
+       };
 
        skb_orphan(skb);
 
-       rc = napi_gro_receive(&wil->napi_rx, skb);
+       if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) {
+               if (mcast) {
+                       /* send multicast frames both to higher layers in
+                        * local net stack and back to the wireless medium
+                        */
+                       xmit_skb = skb_copy(skb, GFP_ATOMIC);
+               } else {
+                       int xmit_cid = wil_find_cid(wil, eth->h_dest);
+
+                       if (xmit_cid >= 0) {
+                               /* The destination station is associated to
+                                * this AP (in this VLAN), so send the frame
+                                * directly to it and do not pass it to local
+                                * net stack.
+                                */
+                               xmit_skb = skb;
+                               skb = NULL;
+                       }
+               }
+       }
+       if (xmit_skb) {
+               /* Send to wireless media and increase priority by 256 to
+                * keep the received priority instead of reclassifying
+                * the frame (see cfg80211_classify8021d).
+                */
+               xmit_skb->dev = ndev;
+               xmit_skb->priority += 256;
+               xmit_skb->protocol = htons(ETH_P_802_3);
+               skb_reset_network_header(xmit_skb);
+               skb_reset_mac_header(xmit_skb);
+               wil_dbg_txrx(wil, "Rx -> Tx %d bytes\n", len);
+               dev_queue_xmit(xmit_skb);
+       }
 
+       if (skb) { /* deliver to local stack */
+
+               skb->protocol = eth_type_trans(skb, ndev);
+               rc = napi_gro_receive(&wil->napi_rx, skb);
+               wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
+                            len, gro_res_str[rc]);
+       }
+       /* statistics. rc set to GRO_NORMAL for AP bridging */
        if (unlikely(rc == GRO_DROP)) {
                ndev->stats.rx_dropped++;
                stats->rx_dropped++;
@@ -512,17 +592,8 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
                stats->rx_packets++;
                ndev->stats.rx_bytes += len;
                stats->rx_bytes += len;
-       }
-       {
-               static const char * const gro_res_str[] = {
-                       [GRO_MERGED]            = "GRO_MERGED",
-                       [GRO_MERGED_FREE]       = "GRO_MERGED_FREE",
-                       [GRO_HELD]              = "GRO_HELD",
-                       [GRO_NORMAL]            = "GRO_NORMAL",
-                       [GRO_DROP]              = "GRO_DROP",
-               };
-               wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
-                            len, gro_res_str[rc]);
+               if (mcast)
+                       ndev->stats.multicast++;
        }
 }
 
@@ -553,7 +624,6 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota)
                        skb->protocol = htons(ETH_P_802_2);
                        wil_netif_rx_any(skb, ndev);
                } else {
-                       skb->protocol = eth_type_trans(skb, ndev);
                        wil_rx_reorder(wil, skb);
                }
        }
@@ -679,6 +749,72 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
        return rc;
 }
 
+int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
+{
+       int rc;
+       struct wmi_bcast_vring_cfg_cmd cmd = {
+               .action = cpu_to_le32(WMI_VRING_CMD_ADD),
+               .vring_cfg = {
+                       .tx_sw_ring = {
+                               .max_mpdu_size =
+                                       cpu_to_le16(wil_mtu2macbuf(mtu_max)),
+                               .ring_size = cpu_to_le16(size),
+                       },
+                       .ringid = id,
+                       .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
+               },
+       };
+       struct {
+               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_vring_cfg_done_event cmd;
+       } __packed reply;
+       struct vring *vring = &wil->vring_tx[id];
+       struct vring_tx_data *txdata = &wil->vring_tx_data[id];
+
+       wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__,
+                    cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
+
+       if (vring->va) {
+               wil_err(wil, "Tx ring [%d] already allocated\n", id);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       memset(txdata, 0, sizeof(*txdata));
+       spin_lock_init(&txdata->lock);
+       vring->size = size;
+       rc = wil_vring_alloc(wil, vring);
+       if (rc)
+               goto out;
+
+       wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */
+       wil->vring2cid_tid[id][1] = 0; /* TID */
+
+       cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
+
+       rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, &cmd, sizeof(cmd),
+                     WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
+       if (rc)
+               goto out_free;
+
+       if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
+               wil_err(wil, "Tx config failed, status 0x%02x\n",
+                       reply.cmd.status);
+               rc = -EINVAL;
+               goto out_free;
+       }
+       vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
+
+       txdata->enabled = 1;
+
+       return 0;
+ out_free:
+       wil_vring_free(wil, vring, 1);
+ out:
+
+       return rc;
+}
+
 void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
 {
        struct vring *vring = &wil->vring_tx[id];
@@ -702,7 +838,7 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
        memset(txdata, 0, sizeof(*txdata));
 }
 
-static struct vring *wil_find_tx_vring(struct wil6210_priv *wil,
+static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
                                       struct sk_buff *skb)
 {
        int i;
@@ -735,15 +871,6 @@ static struct vring *wil_find_tx_vring(struct wil6210_priv *wil,
        return NULL;
 }
 
-static void wil_set_da_for_vring(struct wil6210_priv *wil,
-                                struct sk_buff *skb, int vring_index)
-{
-       struct ethhdr *eth = (void *)skb->data;
-       int cid = wil->vring2cid_tid[vring_index][0];
-
-       memcpy(eth->h_dest, wil->sta[cid].addr, ETH_ALEN);
-}
-
 static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
                        struct sk_buff *skb);
 
@@ -764,6 +891,9 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
                        continue;
 
                cid = wil->vring2cid_tid[i][0];
+               if (cid >= WIL6210_MAX_CID) /* skip BCAST */
+                       continue;
+
                if (!wil->sta[cid].data_port_open &&
                    (skb->protocol != cpu_to_be16(ETH_P_PAE)))
                        break;
@@ -778,17 +908,51 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
        return NULL;
 }
 
-/*
- * Find 1-st vring and return it; set dest address for this vring in skb
- * duplicate skb and send it to other active vrings
+/* Use one of 2 strategies:
+ *
+ * 1. New (real broadcast):
+ *    use dedicated broadcast vring
+ * 2. Old (pseudo-DMS):
+ *    Find 1-st vring and return it;
+ *    duplicate skb and send it to other active vrings;
+ *    in all cases override dest address to unicast peer's address
+ * Use old strategy when new is not supported yet:
+ *  - for PBSS
+ *  - for secure link
  */
-static struct vring *wil_tx_bcast(struct wil6210_priv *wil,
-                                 struct sk_buff *skb)
+static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
+                                        struct sk_buff *skb)
+{
+       struct vring *v;
+       int i = wil->bcast_vring;
+
+       if (i < 0)
+               return NULL;
+       v = &wil->vring_tx[i];
+       if (!v->va)
+               return NULL;
+
+       return v;
+}
+
+static void wil_set_da_for_vring(struct wil6210_priv *wil,
+                                struct sk_buff *skb, int vring_index)
+{
+       struct ethhdr *eth = (void *)skb->data;
+       int cid = wil->vring2cid_tid[vring_index][0];
+
+       ether_addr_copy(eth->h_dest, wil->sta[cid].addr);
+}
+
+static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
+                                        struct sk_buff *skb)
 {
        struct vring *v, *v2;
        struct sk_buff *skb2;
        int i;
        u8 cid;
+       struct ethhdr *eth = (void *)skb->data;
+       char *src = eth->h_source;
 
        /* find 1-st vring eligible for data */
        for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
@@ -797,9 +961,15 @@ static struct vring *wil_tx_bcast(struct wil6210_priv *wil,
                        continue;
 
                cid = wil->vring2cid_tid[i][0];
+               if (cid >= WIL6210_MAX_CID) /* skip BCAST */
+                       continue;
                if (!wil->sta[cid].data_port_open)
                        continue;
 
+               /* don't Tx back to source when re-routing Rx->Tx at the AP */
+               if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
+                       continue;
+
                goto found;
        }
 
@@ -817,9 +987,14 @@ found:
                if (!v2->va)
                        continue;
                cid = wil->vring2cid_tid[i][0];
+               if (cid >= WIL6210_MAX_CID) /* skip BCAST */
+                       continue;
                if (!wil->sta[cid].data_port_open)
                        continue;
 
+               if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
+                       continue;
+
                skb2 = skb_copy(skb, GFP_ATOMIC);
                if (skb2) {
                        wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
@@ -833,6 +1008,20 @@ found:
        return v;
 }
 
+static struct vring *wil_find_tx_bcast(struct wil6210_priv *wil,
+                                      struct sk_buff *skb)
+{
+       struct wireless_dev *wdev = wil->wdev;
+
+       if (wdev->iftype != NL80211_IFTYPE_AP)
+               return wil_find_tx_bcast_2(wil, skb);
+
+       if (wil->privacy)
+               return wil_find_tx_bcast_2(wil, skb);
+
+       return wil_find_tx_bcast_1(wil, skb);
+}
+
 static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
                           int vring_index)
 {
@@ -925,6 +1114,8 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
        uint i = swhead;
        dma_addr_t pa;
        int used;
+       bool mcast = (vring_index == wil->bcast_vring);
+       uint len = skb_headlen(skb);
 
        wil_dbg_txrx(wil, "%s()\n", __func__);
 
@@ -950,7 +1141,17 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
                return -EINVAL;
        vring->ctx[i].mapped_as = wil_mapped_as_single;
        /* 1-st segment */
-       wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index);
+       wil_tx_desc_map(d, pa, len, vring_index);
+       if (unlikely(mcast)) {
+               d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
+               if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) {
+                       /* set MCS 1 */
+                       d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
+                       /* packet mode 2 */
+                       d->mac.d[1] |= BIT(MAC_CFG_DESC_TX_1_PKT_MODE_EN_POS) |
+                                      (2 << MAC_CFG_DESC_TX_1_PKT_MODE_POS);
+               }
+       }
        /* Process TCP/UDP checksum offloading */
        if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) {
                wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
@@ -1056,6 +1257,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 {
        struct wil6210_priv *wil = ndev_to_wil(ndev);
        struct ethhdr *eth = (void *)skb->data;
+       bool bcast = is_multicast_ether_addr(eth->h_dest);
        struct vring *vring;
        static bool pr_once_fw;
        int rc;
@@ -1083,10 +1285,8 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                /* in STA mode (ESS), all to same VRING */
                vring = wil_find_tx_vring_sta(wil, skb);
        } else { /* direct communication, find matching VRING */
-               if (is_unicast_ether_addr(eth->h_dest))
-                       vring = wil_find_tx_vring(wil, skb);
-               else
-                       vring = wil_tx_bcast(wil, skb);
+               vring = bcast ? wil_find_tx_bcast(wil, skb) :
+                               wil_find_tx_ucast(wil, skb);
        }
        if (unlikely(!vring)) {
                wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest);
@@ -1149,7 +1349,7 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
        struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
        int done = 0;
        int cid = wil->vring2cid_tid[ringid][0];
-       struct wil_net_stats *stats = &wil->sta[cid].stats;
+       struct wil_net_stats *stats = NULL;
        volatile struct vring_tx_desc *_d;
        int used_before_complete;
        int used_new;
@@ -1168,6 +1368,9 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
 
        used_before_complete = wil_vring_used_tx(vring);
 
+       if (cid < WIL6210_MAX_CID)
+               stats = &wil->sta[cid].stats;
+
        while (!wil_vring_is_empty(vring)) {
                int new_swtail;
                struct wil_ctx *ctx = &vring->ctx[vring->swtail];
@@ -1209,12 +1412,15 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
                        if (skb) {
                                if (likely(d->dma.error == 0)) {
                                        ndev->stats.tx_packets++;
-                                       stats->tx_packets++;
                                        ndev->stats.tx_bytes += skb->len;
-                                       stats->tx_bytes += skb->len;
+                                       if (stats) {
+                                               stats->tx_packets++;
+                                               stats->tx_bytes += skb->len;
+                                       }
                                } else {
                                        ndev->stats.tx_errors++;
-                                       stats->tx_errors++;
+                                       if (stats)
+                                               stats->tx_errors++;
                                }
                                wil_consume_skb(skb, d->dma.error == 0);
                        }
index b6e65c37d410eccfb93ed51e56b52b3fdc192b8b..4310972c9e1687b5b12dc4c101c9076932d907a8 100644 (file)
@@ -28,6 +28,7 @@ extern unsigned int mtu_max;
 extern unsigned short rx_ring_overflow_thrsh;
 extern int agg_wsize;
 extern u32 vring_idle_trsh;
+extern bool rx_align_2;
 
 #define WIL_NAME "wil6210"
 #define WIL_FW_NAME "wil6210.fw" /* code */
@@ -49,6 +50,8 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
 #define WIL_TX_Q_LEN_DEFAULT           (4000)
 #define WIL_RX_RING_SIZE_ORDER_DEFAULT (10)
 #define WIL_TX_RING_SIZE_ORDER_DEFAULT (10)
+#define WIL_BCAST_RING_SIZE_ORDER_DEFAULT      (7)
+#define WIL_BCAST_MCS0_LIMIT           (1024) /* limit for MCS0 frame size */
 /* limit ring size in range [32..32k] */
 #define WIL_RING_SIZE_ORDER_MIN        (5)
 #define WIL_RING_SIZE_ORDER_MAX        (15)
@@ -542,6 +545,7 @@ struct wil6210_priv {
        u32 monitor_flags;
        u32 privacy; /* secure connection? */
        int sinfo_gen;
+       u32 ap_isolate; /* no intra-BSS communication */
        /* interrupt moderation */
        u32 tx_max_burst_duration;
        u32 tx_interframe_timeout;
@@ -593,6 +597,7 @@ struct wil6210_priv {
        struct vring_tx_data vring_tx_data[WIL6210_MAX_TX_RINGS];
        u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */
        struct wil_sta_info sta[WIL6210_MAX_CID];
+       int bcast_vring;
        /* scan */
        struct cfg80211_scan_request *scan_request;
 
@@ -755,6 +760,9 @@ void wil_rx_fini(struct wil6210_priv *wil);
 int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
                      int cid, int tid);
 void wil_vring_fini_tx(struct wil6210_priv *wil, int id);
+int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size);
+int wil_bcast_init(struct wil6210_priv *wil);
+void wil_bcast_fini(struct wil6210_priv *wil);
 
 netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev);
 int wil_tx_complete(struct wil6210_priv *wil, int ringid);
index 0213135249137d873627c35c3bd102c954bdfd62..9fe2085be2c5b86d77e9346f003c7f38656ff3d3 100644 (file)
@@ -466,7 +466,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
 
        /* FIXME FW can transmit only ucast frames to peer */
        /* FIXME real ring_id instead of hard coded 0 */
-       memcpy(wil->sta[evt->cid].addr, evt->bssid, ETH_ALEN);
+       ether_addr_copy(wil->sta[evt->cid].addr, evt->bssid);
        wil->sta[evt->cid].status = wil_sta_conn_pending;
 
        wil->pending_connect_cid = evt->cid;
@@ -524,8 +524,8 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
        }
 
        eth = (struct ethhdr *)skb_put(skb, ETH_HLEN);
-       memcpy(eth->h_dest, ndev->dev_addr, ETH_ALEN);
-       memcpy(eth->h_source, evt->src_mac, ETH_ALEN);
+       ether_addr_copy(eth->h_dest, ndev->dev_addr);
+       ether_addr_copy(eth->h_source, evt->src_mac);
        eth->h_proto = cpu_to_be16(ETH_P_PAE);
        memcpy(skb_put(skb, eapol_len), evt->eapol, eapol_len);
        skb->protocol = eth_type_trans(skb, ndev);
@@ -851,7 +851,7 @@ int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
 {
        struct wmi_set_mac_address_cmd cmd;
 
-       memcpy(cmd.mac, addr, ETH_ALEN);
+       ether_addr_copy(cmd.mac, addr);
 
        wil_dbg_wmi(wil, "Set MAC %pM\n", addr);
 
@@ -1109,6 +1109,11 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
                 */
                cmd.l3_l4_ctrl |= (1 << L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS);
        }
+
+       if (rx_align_2)
+               cmd.l2_802_3_offload_ctrl |=
+                               L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK;
+
        /* typical time for secure PCP is 840ms */
        rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd),
                      WMI_CFG_RX_CHAIN_DONE_EVENTID, &evt, sizeof(evt), 2000);
@@ -1157,7 +1162,8 @@ int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason)
        struct wmi_disconnect_sta_cmd cmd = {
                .disconnect_reason = cpu_to_le16(reason),
        };
-       memcpy(cmd.dst_mac, mac, ETH_ALEN);
+
+       ether_addr_copy(cmd.dst_mac, mac);
 
        wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason);
 
index 8a4af613e191856d9db24bb4525b52302358abf5..b2905531535039f6ae91835ee592a2960f140f83 100644 (file)
@@ -70,7 +70,6 @@ enum wmi_command_id {
        WMI_SET_UCODE_IDLE_CMDID        = 0x0813,
        WMI_SET_WORK_MODE_CMDID         = 0x0815,
        WMI_LO_LEAKAGE_CALIB_CMDID      = 0x0816,
-       WMI_MARLON_R_ACTIVATE_CMDID     = 0x0817,
        WMI_MARLON_R_READ_CMDID         = 0x0818,
        WMI_MARLON_R_WRITE_CMDID        = 0x0819,
        WMI_MARLON_R_TXRX_SEL_CMDID     = 0x081a,
@@ -80,6 +79,7 @@ enum wmi_command_id {
        WMI_RF_RX_TEST_CMDID            = 0x081e,
        WMI_CFG_RX_CHAIN_CMDID          = 0x0820,
        WMI_VRING_CFG_CMDID             = 0x0821,
+       WMI_BCAST_VRING_CFG_CMDID       = 0x0822,
        WMI_VRING_BA_EN_CMDID           = 0x0823,
        WMI_VRING_BA_DIS_CMDID          = 0x0824,
        WMI_RCP_ADDBA_RESP_CMDID        = 0x0825,
@@ -99,6 +99,7 @@ enum wmi_command_id {
        WMI_BF_TXSS_MGMT_CMDID          = 0x0837,
        WMI_BF_SM_MGMT_CMDID            = 0x0838,
        WMI_BF_RXSS_MGMT_CMDID          = 0x0839,
+       WMI_BF_TRIG_CMDID               = 0x083A,
        WMI_SET_SECTORS_CMDID           = 0x0849,
        WMI_MAINTAIN_PAUSE_CMDID        = 0x0850,
        WMI_MAINTAIN_RESUME_CMDID       = 0x0851,
@@ -595,6 +596,22 @@ struct wmi_vring_cfg_cmd {
        struct wmi_vring_cfg vring_cfg;
 } __packed;
 
+/*
+ * WMI_BCAST_VRING_CFG_CMDID
+ */
+struct wmi_bcast_vring_cfg {
+       struct wmi_sw_ring_cfg tx_sw_ring;
+       u8 ringid;                              /* 0-23 vrings */
+       u8 encap_trans_type;
+       u8 ds_cfg;                              /* 802.3 DS cfg */
+       u8 nwifi_ds_trans_type;
+} __packed;
+
+struct wmi_bcast_vring_cfg_cmd {
+       __le32 action;
+       struct wmi_bcast_vring_cfg vring_cfg;
+} __packed;
+
 /*
  * WMI_VRING_BA_EN_CMDID
  */
@@ -687,6 +704,9 @@ struct wmi_cfg_rx_chain_cmd {
        #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_POS (0)
        #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_LEN (1)
        #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_MSK (0x1)
+       #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_POS (1)
+       #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_LEN (1)
+       #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK (0x2)
        u8 l2_802_3_offload_ctrl;
 
        #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_POS (0)
@@ -841,7 +861,6 @@ enum wmi_event_id {
        WMI_IQ_RX_CALIB_DONE_EVENTID            = 0x1812,
        WMI_SET_WORK_MODE_DONE_EVENTID          = 0x1815,
        WMI_LO_LEAKAGE_CALIB_DONE_EVENTID       = 0x1816,
-       WMI_MARLON_R_ACTIVATE_DONE_EVENTID      = 0x1817,
        WMI_MARLON_R_READ_DONE_EVENTID          = 0x1818,
        WMI_MARLON_R_WRITE_DONE_EVENTID         = 0x1819,
        WMI_MARLON_R_TXRX_SEL_DONE_EVENTID      = 0x181a,
index 1d7982afc0ad6af6b1b32f92ae07fb9ef694f2e1..6837064908bed38f05c4d98642c50c82d9cf984d 100644 (file)
@@ -553,7 +553,7 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring,
                                  size_t buffersize, bool dma_to_device)
 {
        if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
-               return 1;
+               return true;
 
        switch (ring->type) {
        case B43_DMA_30BIT:
@@ -571,13 +571,13 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring,
        }
 
        /* The address is OK. */
-       return 0;
+       return false;
 
 address_error:
        /* We can't support this address. Unmap it again. */
        unmap_descbuffer(ring, addr, buffersize, dma_to_device);
 
-       return 1;
+       return true;
 }
 
 static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
@@ -1099,16 +1099,16 @@ static bool b43_dma_translation_in_low_word(struct b43_wldev *dev,
                                            enum b43_dmatype type)
 {
        if (type != B43_DMA_64BIT)
-               return 1;
+               return true;
 
 #ifdef CONFIG_B43_SSB
        if (dev->dev->bus_type == B43_BUS_SSB &&
            dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI &&
            !(pci_is_pcie(dev->dev->sdev->bus->host_pci) &&
              ssb_read32(dev->dev->sdev, SSB_TMSHIGH) & SSB_TMSHIGH_DMA64))
-                       return 1;
+                       return true;
 #endif
-       return 0;
+       return false;
 }
 
 int b43_dma_init(struct b43_wldev *dev)
index ea4843be773cba1c68efab2ff99ffcf4214edc4d..b2f9521fe551a3e32bc85db602bd41a94e2b81ca 100644 (file)
@@ -4866,7 +4866,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
        switch (dev->dev->bus_type) {
 #ifdef CONFIG_B43_BCMA
        case B43_BUS_BCMA:
-               bcma_core_pci_irq_ctl(dev->dev->bdev->bus,
+               bcma_host_pci_irq_ctl(dev->dev->bdev->bus,
                                      dev->dev->bdev, true);
                bcma_host_pci_up(dev->dev->bdev->bus);
                break;
index b2ed1795130bb0d7e1f10f95ab9775743b5f276b..f9dd892b9f27e9978ea35e8514f457d037b2d9b1 100644 (file)
@@ -427,7 +427,7 @@ static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,
                                         bool dma_to_device)
 {
        if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
-               return 1;
+               return true;
 
        switch (ring->type) {
        case B43legacy_DMA_30BIT:
@@ -441,13 +441,13 @@ static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,
        }
 
        /* The address is OK. */
-       return 0;
+       return false;
 
 address_error:
        /* We can't support this address. Unmap it again. */
        unmap_descbuffer(ring, addr, buffersize, dma_to_device);
 
-       return 1;
+       return true;
 }
 
 static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
index c4559bcbc707aa00ff4f047876683451c8382619..7c1bdbc0256912568afd455cec0e429006ab8318 100644 (file)
@@ -32,7 +32,7 @@ bool b43legacy_is_hw_radio_enabled(struct b43legacy_wldev *dev)
        if (dev->dev->id.revision >= 3) {
                if (!(b43legacy_read32(dev, B43legacy_MMIO_RADIO_HWENABLED_HI)
                      & B43legacy_MMIO_RADIO_HWENABLED_HI_MASK))
-                       return 1;
+                       return true;
        } else {
                /* To prevent CPU fault on PPC, do not read a register
                 * unless the interface is started; however, on resume
@@ -40,12 +40,12 @@ bool b43legacy_is_hw_radio_enabled(struct b43legacy_wldev *dev)
                 * that happens, unconditionally return TRUE.
                 */
                if (b43legacy_status(dev) < B43legacy_STAT_STARTED)
-                       return 1;
+                       return true;
                if (b43legacy_read16(dev, B43legacy_MMIO_RADIO_HWENABLED_LO)
                    & B43legacy_MMIO_RADIO_HWENABLED_LO_MASK)
-                       return 1;
+                       return true;
        }
-       return 0;
+       return false;
 }
 
 /* The poll callback for the hardware button. */
index c438ccdb6ed8215ef0c1470adde8522c1e355944..9b508bd3b839256e7595ed90c6ad2eb1bb4709f7 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/mmc/host.h>
 #include <linux/platform_device.h>
 #include <linux/platform_data/brcmfmac-sdio.h>
+#include <linux/pm_runtime.h>
 #include <linux/suspend.h>
 #include <linux/errno.h>
 #include <linux/module.h>
@@ -1006,6 +1007,7 @@ static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
        sg_free_table(&sdiodev->sgtable);
        sdiodev->sbwad = 0;
 
+       pm_runtime_allow(sdiodev->func[1]->card->host->parent);
        return 0;
 }
 
@@ -1074,7 +1076,7 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
                ret = -ENODEV;
                goto out;
        }
-
+       pm_runtime_forbid(host->parent);
 out:
        if (ret)
                brcmf_sdiod_remove(sdiodev);
@@ -1096,6 +1098,8 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
        BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341),
        BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362),
        BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339),
+       BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),
+       BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345),
        BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354),
        { /* end: all zeroes */ }
 };
@@ -1194,7 +1198,7 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
        brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
        brcmf_dbg(SDIO, "Function: %d\n", func->num);
 
-       if (func->num != 1 && func->num != 2)
+       if (func->num != 1)
                return;
 
        bus_if = dev_get_drvdata(&func->dev);
index 04d2ca0d87d60b11cd0a2ca0baac23867e700ed6..ab2fac8b2760a89269ffcb549090087fe9a16f71 100644 (file)
 #define BCM4329_CORE_SOCRAM_BASE       0x18003000
 /* ARM Cortex M3 core, ID 0x82a */
 #define BCM4329_CORE_ARM_BASE          0x18002000
-#define BCM4329_RAMSIZE                        0x48000
-/* bcm43143 */
-#define BCM43143_RAMSIZE               0x70000
 
 #define CORE_SB(base, field) \
                (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
@@ -150,6 +147,78 @@ struct sbconfig {
        u32 sbidhigh;   /* identification */
 };
 
+/* bankidx and bankinfo reg defines corerev >= 8 */
+#define SOCRAM_BANKINFO_RETNTRAM_MASK  0x00010000
+#define SOCRAM_BANKINFO_SZMASK         0x0000007f
+#define SOCRAM_BANKIDX_ROM_MASK                0x00000100
+
+#define SOCRAM_BANKIDX_MEMTYPE_SHIFT   8
+/* socram bankinfo memtype */
+#define SOCRAM_MEMTYPE_RAM             0
+#define SOCRAM_MEMTYPE_R0M             1
+#define SOCRAM_MEMTYPE_DEVRAM          2
+
+#define SOCRAM_BANKINFO_SZBASE         8192
+#define SRCI_LSS_MASK          0x00f00000
+#define SRCI_LSS_SHIFT         20
+#define        SRCI_SRNB_MASK          0xf0
+#define        SRCI_SRNB_SHIFT         4
+#define        SRCI_SRBSZ_MASK         0xf
+#define        SRCI_SRBSZ_SHIFT        0
+#define SR_BSZ_BASE            14
+
+struct sbsocramregs {
+       u32 coreinfo;
+       u32 bwalloc;
+       u32 extracoreinfo;
+       u32 biststat;
+       u32 bankidx;
+       u32 standbyctrl;
+
+       u32 errlogstatus;       /* rev 6 */
+       u32 errlogaddr; /* rev 6 */
+       /* used for patching rev 3 & 5 */
+       u32 cambankidx;
+       u32 cambankstandbyctrl;
+       u32 cambankpatchctrl;
+       u32 cambankpatchtblbaseaddr;
+       u32 cambankcmdreg;
+       u32 cambankdatareg;
+       u32 cambankmaskreg;
+       u32 PAD[1];
+       u32 bankinfo;   /* corev 8 */
+       u32 bankpda;
+       u32 PAD[14];
+       u32 extmemconfig;
+       u32 extmemparitycsr;
+       u32 extmemparityerrdata;
+       u32 extmemparityerrcnt;
+       u32 extmemwrctrlandsize;
+       u32 PAD[84];
+       u32 workaround;
+       u32 pwrctl;             /* corerev >= 2 */
+       u32 PAD[133];
+       u32 sr_control;     /* corerev >= 15 */
+       u32 sr_status;      /* corerev >= 15 */
+       u32 sr_address;     /* corerev >= 15 */
+       u32 sr_data;        /* corerev >= 15 */
+};
+
+#define SOCRAMREGOFFS(_f)      offsetof(struct sbsocramregs, _f)
+
+#define ARMCR4_CAP             (0x04)
+#define ARMCR4_BANKIDX         (0x40)
+#define ARMCR4_BANKINFO                (0x44)
+#define ARMCR4_BANKPDA         (0x4C)
+
+#define        ARMCR4_TCBBNB_MASK      0xf0
+#define        ARMCR4_TCBBNB_SHIFT     4
+#define        ARMCR4_TCBANB_MASK      0xf
+#define        ARMCR4_TCBANB_SHIFT     0
+
+#define        ARMCR4_BSZ_MASK         0x3f
+#define        ARMCR4_BSZ_MULT         8192
+
 struct brcmf_core_priv {
        struct brcmf_core pub;
        u32 wrapbase;
@@ -419,13 +488,13 @@ static struct brcmf_core *brcmf_chip_add_core(struct brcmf_chip_priv *ci,
        return &core->pub;
 }
 
-#ifdef DEBUG
 /* safety check for chipinfo */
 static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
 {
        struct brcmf_core_priv *core;
        bool need_socram = false;
        bool has_socram = false;
+       bool cpu_found = false;
        int idx = 1;
 
        list_for_each_entry(core, &ci->cores, list) {
@@ -435,22 +504,24 @@ static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
 
                switch (core->pub.id) {
                case BCMA_CORE_ARM_CM3:
+                       cpu_found = true;
                        need_socram = true;
                        break;
                case BCMA_CORE_INTERNAL_MEM:
                        has_socram = true;
                        break;
                case BCMA_CORE_ARM_CR4:
-                       if (ci->pub.rambase == 0) {
-                               brcmf_err("RAM base not provided with ARM CR4 core\n");
-                               return -ENOMEM;
-                       }
+                       cpu_found = true;
                        break;
                default:
                        break;
                }
        }
 
+       if (!cpu_found) {
+               brcmf_err("CPU core not detected\n");
+               return -ENXIO;
+       }
        /* check RAM core presence for ARM CM3 core */
        if (need_socram && !has_socram) {
                brcmf_err("RAM core not provided with ARM CM3 core\n");
@@ -458,56 +529,164 @@ static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
        }
        return 0;
 }
-#else  /* DEBUG */
-static inline int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
+
+static u32 brcmf_chip_core_read32(struct brcmf_core_priv *core, u16 reg)
 {
-       return 0;
+       return core->chip->ops->read32(core->chip->ctx, core->pub.base + reg);
 }
-#endif
 
-static void brcmf_chip_get_raminfo(struct brcmf_chip_priv *ci)
+static void brcmf_chip_core_write32(struct brcmf_core_priv *core,
+                                   u16 reg, u32 val)
 {
-       switch (ci->pub.chip) {
-       case BRCM_CC_4329_CHIP_ID:
-               ci->pub.ramsize = BCM4329_RAMSIZE;
-               break;
-       case BRCM_CC_43143_CHIP_ID:
-               ci->pub.ramsize = BCM43143_RAMSIZE;
-               break;
-       case BRCM_CC_43241_CHIP_ID:
-               ci->pub.ramsize = 0x90000;
-               break;
-       case BRCM_CC_4330_CHIP_ID:
-               ci->pub.ramsize = 0x48000;
-               break;
+       core->chip->ops->write32(core->chip->ctx, core->pub.base + reg, val);
+}
+
+static bool brcmf_chip_socram_banksize(struct brcmf_core_priv *core, u8 idx,
+                                      u32 *banksize)
+{
+       u32 bankinfo;
+       u32 bankidx = (SOCRAM_MEMTYPE_RAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+
+       bankidx |= idx;
+       brcmf_chip_core_write32(core, SOCRAMREGOFFS(bankidx), bankidx);
+       bankinfo = brcmf_chip_core_read32(core, SOCRAMREGOFFS(bankinfo));
+       *banksize = (bankinfo & SOCRAM_BANKINFO_SZMASK) + 1;
+       *banksize *= SOCRAM_BANKINFO_SZBASE;
+       return !!(bankinfo & SOCRAM_BANKINFO_RETNTRAM_MASK);
+}
+
+static void brcmf_chip_socram_ramsize(struct brcmf_core_priv *sr, u32 *ramsize,
+                                     u32 *srsize)
+{
+       u32 coreinfo;
+       uint nb, banksize, lss;
+       bool retent;
+       int i;
+
+       *ramsize = 0;
+       *srsize = 0;
+
+       if (WARN_ON(sr->pub.rev < 4))
+               return;
+
+       if (!brcmf_chip_iscoreup(&sr->pub))
+               brcmf_chip_resetcore(&sr->pub, 0, 0, 0);
+
+       /* Get info for determining size */
+       coreinfo = brcmf_chip_core_read32(sr, SOCRAMREGOFFS(coreinfo));
+       nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+
+       if ((sr->pub.rev <= 7) || (sr->pub.rev == 12)) {
+               banksize = (coreinfo & SRCI_SRBSZ_MASK);
+               lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT;
+               if (lss != 0)
+                       nb--;
+               *ramsize = nb * (1 << (banksize + SR_BSZ_BASE));
+               if (lss != 0)
+                       *ramsize += (1 << ((lss - 1) + SR_BSZ_BASE));
+       } else {
+               nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+               for (i = 0; i < nb; i++) {
+                       retent = brcmf_chip_socram_banksize(sr, i, &banksize);
+                       *ramsize += banksize;
+                       if (retent)
+                               *srsize += banksize;
+               }
+       }
+
+       /* hardcoded save&restore memory sizes */
+       switch (sr->chip->pub.chip) {
        case BRCM_CC_4334_CHIP_ID:
-       case BRCM_CC_43340_CHIP_ID:
-               ci->pub.ramsize = 0x80000;
+               if (sr->chip->pub.chiprev < 2)
+                       *srsize = (32 * 1024);
                break;
-       case BRCM_CC_4335_CHIP_ID:
-               ci->pub.ramsize = 0xc0000;
-               ci->pub.rambase = 0x180000;
+       case BRCM_CC_43430_CHIP_ID:
+               /* assume sr for now as we can not check
+                * firmware sr capability at this point.
+                */
+               *srsize = (64 * 1024);
                break;
-       case BRCM_CC_43362_CHIP_ID:
-               ci->pub.ramsize = 0x3c000;
+       default:
                break;
+       }
+}
+
+/** Return the TCM-RAM size of the ARMCR4 core. */
+static u32 brcmf_chip_tcm_ramsize(struct brcmf_core_priv *cr4)
+{
+       u32 corecap;
+       u32 memsize = 0;
+       u32 nab;
+       u32 nbb;
+       u32 totb;
+       u32 bxinfo;
+       u32 idx;
+
+       corecap = brcmf_chip_core_read32(cr4, ARMCR4_CAP);
+
+       nab = (corecap & ARMCR4_TCBANB_MASK) >> ARMCR4_TCBANB_SHIFT;
+       nbb = (corecap & ARMCR4_TCBBNB_MASK) >> ARMCR4_TCBBNB_SHIFT;
+       totb = nab + nbb;
+
+       for (idx = 0; idx < totb; idx++) {
+               brcmf_chip_core_write32(cr4, ARMCR4_BANKIDX, idx);
+               bxinfo = brcmf_chip_core_read32(cr4, ARMCR4_BANKINFO);
+               memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * ARMCR4_BSZ_MULT;
+       }
+
+       return memsize;
+}
+
+static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
+{
+       switch (ci->pub.chip) {
+       case BRCM_CC_4345_CHIP_ID:
+               return 0x198000;
+       case BRCM_CC_4335_CHIP_ID:
        case BRCM_CC_4339_CHIP_ID:
        case BRCM_CC_4354_CHIP_ID:
        case BRCM_CC_4356_CHIP_ID:
        case BRCM_CC_43567_CHIP_ID:
        case BRCM_CC_43569_CHIP_ID:
        case BRCM_CC_43570_CHIP_ID:
-               ci->pub.ramsize = 0xc0000;
-               ci->pub.rambase = 0x180000;
-               break;
        case BRCM_CC_43602_CHIP_ID:
-               ci->pub.ramsize = 0xf0000;
-               ci->pub.rambase = 0x180000;
-               break;
+               return 0x180000;
        default:
                brcmf_err("unknown chip: %s\n", ci->pub.name);
                break;
        }
+       return 0;
+}
+
+static int brcmf_chip_get_raminfo(struct brcmf_chip_priv *ci)
+{
+       struct brcmf_core_priv *mem_core;
+       struct brcmf_core *mem;
+
+       mem = brcmf_chip_get_core(&ci->pub, BCMA_CORE_ARM_CR4);
+       if (mem) {
+               mem_core = container_of(mem, struct brcmf_core_priv, pub);
+               ci->pub.ramsize = brcmf_chip_tcm_ramsize(mem_core);
+               ci->pub.rambase = brcmf_chip_tcm_rambase(ci);
+               if (!ci->pub.rambase) {
+                       brcmf_err("RAM base not provided with ARM CR4 core\n");
+                       return -EINVAL;
+               }
+       } else {
+               mem = brcmf_chip_get_core(&ci->pub, BCMA_CORE_INTERNAL_MEM);
+               mem_core = container_of(mem, struct brcmf_core_priv, pub);
+               brcmf_chip_socram_ramsize(mem_core, &ci->pub.ramsize,
+                                         &ci->pub.srsize);
+       }
+       brcmf_dbg(INFO, "RAM: base=0x%x size=%d (0x%x) sr=%d (0x%x)\n",
+                 ci->pub.rambase, ci->pub.ramsize, ci->pub.ramsize,
+                 ci->pub.srsize, ci->pub.srsize);
+
+       if (!ci->pub.ramsize) {
+               brcmf_err("RAM size is undetermined\n");
+               return -ENOMEM;
+       }
+       return 0;
 }
 
 static u32 brcmf_chip_dmp_get_desc(struct brcmf_chip_priv *ci, u32 *eromaddr,
@@ -660,6 +839,7 @@ static int brcmf_chip_recognition(struct brcmf_chip_priv *ci)
        struct brcmf_core *core;
        u32 regdata;
        u32 socitype;
+       int ret;
 
        /* Get CC core rev
         * Chipid is assume to be at offset 0 from SI_ENUM_BASE
@@ -712,9 +892,13 @@ static int brcmf_chip_recognition(struct brcmf_chip_priv *ci)
                return -ENODEV;
        }
 
-       brcmf_chip_get_raminfo(ci);
+       ret = brcmf_chip_cores_check(ci);
+       if (ret)
+               return ret;
 
-       return brcmf_chip_cores_check(ci);
+       /* assure chip is passive for core access */
+       brcmf_chip_set_passive(&ci->pub);
+       return brcmf_chip_get_raminfo(ci);
 }
 
 static void brcmf_chip_disable_arm(struct brcmf_chip_priv *chip, u16 id)
@@ -778,12 +962,6 @@ static int brcmf_chip_setup(struct brcmf_chip_priv *chip)
        if (chip->ops->setup)
                ret = chip->ops->setup(chip->ctx, pub);
 
-       /*
-        * Make sure any on-chip ARM is off (in case strapping is wrong),
-        * or downloaded code was already running.
-        */
-       brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3);
-       brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CR4);
        return ret;
 }
 
@@ -799,7 +977,7 @@ struct brcmf_chip *brcmf_chip_attach(void *ctx,
                err = -EINVAL;
        if (WARN_ON(!ops->prepare))
                err = -EINVAL;
-       if (WARN_ON(!ops->exit_dl))
+       if (WARN_ON(!ops->activate))
                err = -EINVAL;
        if (err < 0)
                return ERR_PTR(-EINVAL);
@@ -897,9 +1075,10 @@ void brcmf_chip_resetcore(struct brcmf_core *pub, u32 prereset, u32 reset,
 }
 
 static void
-brcmf_chip_cm3_enterdl(struct brcmf_chip_priv *chip)
+brcmf_chip_cm3_set_passive(struct brcmf_chip_priv *chip)
 {
        struct brcmf_core *core;
+       struct brcmf_core_priv *sr;
 
        brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3);
        core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
@@ -909,9 +1088,16 @@ brcmf_chip_cm3_enterdl(struct brcmf_chip_priv *chip)
                             D11_BCMA_IOCTL_PHYCLOCKEN);
        core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM);
        brcmf_chip_resetcore(core, 0, 0, 0);
+
+       /* disable bank #3 remap for this device */
+       if (chip->pub.chip == BRCM_CC_43430_CHIP_ID) {
+               sr = container_of(core, struct brcmf_core_priv, pub);
+               brcmf_chip_core_write32(sr, SOCRAMREGOFFS(bankidx), 3);
+               brcmf_chip_core_write32(sr, SOCRAMREGOFFS(bankpda), 0);
+       }
 }
 
-static bool brcmf_chip_cm3_exitdl(struct brcmf_chip_priv *chip)
+static bool brcmf_chip_cm3_set_active(struct brcmf_chip_priv *chip)
 {
        struct brcmf_core *core;
 
@@ -921,7 +1107,7 @@ static bool brcmf_chip_cm3_exitdl(struct brcmf_chip_priv *chip)
                return false;
        }
 
-       chip->ops->exit_dl(chip->ctx, &chip->pub, 0);
+       chip->ops->activate(chip->ctx, &chip->pub, 0);
 
        core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CM3);
        brcmf_chip_resetcore(core, 0, 0, 0);
@@ -930,7 +1116,7 @@ static bool brcmf_chip_cm3_exitdl(struct brcmf_chip_priv *chip)
 }
 
 static inline void
-brcmf_chip_cr4_enterdl(struct brcmf_chip_priv *chip)
+brcmf_chip_cr4_set_passive(struct brcmf_chip_priv *chip)
 {
        struct brcmf_core *core;
 
@@ -943,11 +1129,11 @@ brcmf_chip_cr4_enterdl(struct brcmf_chip_priv *chip)
                             D11_BCMA_IOCTL_PHYCLOCKEN);
 }
 
-static bool brcmf_chip_cr4_exitdl(struct brcmf_chip_priv *chip, u32 rstvec)
+static bool brcmf_chip_cr4_set_active(struct brcmf_chip_priv *chip, u32 rstvec)
 {
        struct brcmf_core *core;
 
-       chip->ops->exit_dl(chip->ctx, &chip->pub, rstvec);
+       chip->ops->activate(chip->ctx, &chip->pub, rstvec);
 
        /* restore ARM */
        core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CR4);
@@ -956,7 +1142,7 @@ static bool brcmf_chip_cr4_exitdl(struct brcmf_chip_priv *chip, u32 rstvec)
        return true;
 }
 
-void brcmf_chip_enter_download(struct brcmf_chip *pub)
+void brcmf_chip_set_passive(struct brcmf_chip *pub)
 {
        struct brcmf_chip_priv *chip;
        struct brcmf_core *arm;
@@ -966,14 +1152,14 @@ void brcmf_chip_enter_download(struct brcmf_chip *pub)
        chip = container_of(pub, struct brcmf_chip_priv, pub);
        arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
        if (arm) {
-               brcmf_chip_cr4_enterdl(chip);
+               brcmf_chip_cr4_set_passive(chip);
                return;
        }
 
-       brcmf_chip_cm3_enterdl(chip);
+       brcmf_chip_cm3_set_passive(chip);
 }
 
-bool brcmf_chip_exit_download(struct brcmf_chip *pub, u32 rstvec)
+bool brcmf_chip_set_active(struct brcmf_chip *pub, u32 rstvec)
 {
        struct brcmf_chip_priv *chip;
        struct brcmf_core *arm;
@@ -983,9 +1169,9 @@ bool brcmf_chip_exit_download(struct brcmf_chip *pub, u32 rstvec)
        chip = container_of(pub, struct brcmf_chip_priv, pub);
        arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
        if (arm)
-               return brcmf_chip_cr4_exitdl(chip, rstvec);
+               return brcmf_chip_cr4_set_active(chip, rstvec);
 
-       return brcmf_chip_cm3_exitdl(chip);
+       return brcmf_chip_cm3_set_active(chip);
 }
 
 bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
@@ -1016,6 +1202,10 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
                addr = CORE_CC_REG(base, chipcontrol_data);
                reg = chip->ops->read32(chip->ctx, addr);
                return (reg & pmu_cc3_mask) != 0;
+       case BRCM_CC_43430_CHIP_ID:
+               addr = CORE_CC_REG(base, sr_control1);
+               reg = chip->ops->read32(chip->ctx, addr);
+               return reg != 0;
        default:
                addr = CORE_CC_REG(base, pmucapabilities_ext);
                reg = chip->ops->read32(chip->ctx, addr);
index c32908da90c853e7a133bc0e24a03b35fe2045b3..60dcb38fc77a3f59e1c2efcaefd3d337470d3282 100644 (file)
@@ -30,7 +30,8 @@
  * @pmucaps: PMU capabilities.
  * @pmurev: PMU revision.
  * @rambase: RAM base address (only applicable for ARM CR4 chips).
- * @ramsize: amount of RAM on chip.
+ * @ramsize: amount of RAM on chip including retention.
+ * @srsize: amount of retention RAM on chip.
  * @name: string representation of the chip identifier.
  */
 struct brcmf_chip {
@@ -41,6 +42,7 @@ struct brcmf_chip {
        u32 pmurev;
        u32 rambase;
        u32 ramsize;
+       u32 srsize;
        char name[8];
 };
 
@@ -64,7 +66,7 @@ struct brcmf_core {
  * @write32: write 32-bit value over bus.
  * @prepare: prepare bus for core configuration.
  * @setup: bus-specific core setup.
- * @exit_dl: exit download state.
+ * @active: chip becomes active.
  *     The callback should use the provided @rstvec when non-zero.
  */
 struct brcmf_buscore_ops {
@@ -72,7 +74,7 @@ struct brcmf_buscore_ops {
        void (*write32)(void *ctx, u32 addr, u32 value);
        int (*prepare)(void *ctx);
        int (*setup)(void *ctx, struct brcmf_chip *chip);
-       void (*exit_dl)(void *ctx, struct brcmf_chip *chip, u32 rstvec);
+       void (*activate)(void *ctx, struct brcmf_chip *chip, u32 rstvec);
 };
 
 struct brcmf_chip *brcmf_chip_attach(void *ctx,
@@ -84,8 +86,8 @@ bool brcmf_chip_iscoreup(struct brcmf_core *core);
 void brcmf_chip_coredisable(struct brcmf_core *core, u32 prereset, u32 reset);
 void brcmf_chip_resetcore(struct brcmf_core *core, u32 prereset, u32 reset,
                          u32 postreset);
-void brcmf_chip_enter_download(struct brcmf_chip *ci);
-bool brcmf_chip_exit_download(struct brcmf_chip *ci, u32 rstvec);
+void brcmf_chip_set_passive(struct brcmf_chip *ci);
+bool brcmf_chip_set_active(struct brcmf_chip *ci, u32 rstvec);
 bool brcmf_chip_sr_capable(struct brcmf_chip *pub);
 
 #endif /* BRCMF_AXIDMP_H */
index defb7a44e0bc1ff554195890dcccc31bfb0c9769..7748a1ccf14fdf4a6b2864441e9b041da35558a6 100644 (file)
@@ -126,7 +126,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
        brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan");
        if (drvr->bus_if->wowl_supported)
                brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl");
-       brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0);
+       if (drvr->bus_if->chip != BRCM_CC_43362_CHIP_ID)
+               brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0);
 
        /* set chip related quirks */
        switch (drvr->bus_if->chip) {
index 6262612dec450b649c3ccc1eb1b3e12205335fd3..4ec9811f49c87744458ed16cdcec32422432dc3f 100644 (file)
@@ -481,10 +481,9 @@ static int brcmf_msgbuf_ioctl_resp_wait(struct brcmf_msgbuf *msgbuf)
 
 static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf *msgbuf)
 {
-       if (waitqueue_active(&msgbuf->ioctl_resp_wait)) {
-               msgbuf->ctl_completed = true;
+       msgbuf->ctl_completed = true;
+       if (waitqueue_active(&msgbuf->ioctl_resp_wait))
                wake_up(&msgbuf->ioctl_resp_wait);
-       }
 }
 
 
index 77a51b8c1e120824a4eb6d0b336d3875d3c16fe5..3d513e407e3d5db610792a6bea0d1d843cd15a63 100644 (file)
 
 #ifdef CONFIG_BRCMFMAC_PROTO_MSGBUF
 
-#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM      20
-#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM       256
-#define BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM    20
+#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM      64
+#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM       512
+#define BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM    64
 #define BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM         1024
-#define BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM         256
+#define BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM         512
 #define BRCMF_H2D_TXFLOWRING_MAX_ITEM                  512
 
 #define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE      40
index 61c053a729be01b752101dab3db55cf1e6728e9b..1831ecd0813e955dfc88c3c6eb8783f36060465b 100644 (file)
@@ -47,8 +47,6 @@ enum brcmf_pcie_state {
 
 #define BRCMF_PCIE_43602_FW_NAME               "brcm/brcmfmac43602-pcie.bin"
 #define BRCMF_PCIE_43602_NVRAM_NAME            "brcm/brcmfmac43602-pcie.txt"
-#define BRCMF_PCIE_4354_FW_NAME                        "brcm/brcmfmac4354-pcie.bin"
-#define BRCMF_PCIE_4354_NVRAM_NAME             "brcm/brcmfmac4354-pcie.txt"
 #define BRCMF_PCIE_4356_FW_NAME                        "brcm/brcmfmac4356-pcie.bin"
 #define BRCMF_PCIE_4356_NVRAM_NAME             "brcm/brcmfmac4356-pcie.txt"
 #define BRCMF_PCIE_43570_FW_NAME               "brcm/brcmfmac43570-pcie.bin"
@@ -187,8 +185,8 @@ enum brcmf_pcie_state {
 
 MODULE_FIRMWARE(BRCMF_PCIE_43602_FW_NAME);
 MODULE_FIRMWARE(BRCMF_PCIE_43602_NVRAM_NAME);
-MODULE_FIRMWARE(BRCMF_PCIE_4354_FW_NAME);
-MODULE_FIRMWARE(BRCMF_PCIE_4354_NVRAM_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4356_FW_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4356_NVRAM_NAME);
 MODULE_FIRMWARE(BRCMF_PCIE_43570_FW_NAME);
 MODULE_FIRMWARE(BRCMF_PCIE_43570_NVRAM_NAME);
 
@@ -509,8 +507,6 @@ static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo)
 
 static int brcmf_pcie_enter_download_state(struct brcmf_pciedev_info *devinfo)
 {
-       brcmf_chip_enter_download(devinfo->ci);
-
        if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
                brcmf_pcie_select_core(devinfo, BCMA_CORE_ARM_CR4);
                brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
@@ -536,7 +532,7 @@ static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo,
                brcmf_chip_resetcore(core, 0, 0, 0);
        }
 
-       return !brcmf_chip_exit_download(devinfo->ci, resetintr);
+       return !brcmf_chip_set_active(devinfo->ci, resetintr);
 }
 
 
@@ -653,10 +649,9 @@ static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo)
                        console->log_str[console->log_idx] = ch;
                        console->log_idx++;
                }
-
                if (ch == '\n') {
                        console->log_str[console->log_idx] = 0;
-                       brcmf_dbg(PCIE, "CONSOLE: %s\n", console->log_str);
+                       brcmf_dbg(PCIE, "CONSOLE: %s", console->log_str);
                        console->log_idx = 0;
                }
        }
@@ -1328,10 +1323,6 @@ static int brcmf_pcie_get_fwnames(struct brcmf_pciedev_info *devinfo)
                fw_name = BRCMF_PCIE_43602_FW_NAME;
                nvram_name = BRCMF_PCIE_43602_NVRAM_NAME;
                break;
-       case BRCM_CC_4354_CHIP_ID:
-               fw_name = BRCMF_PCIE_4354_FW_NAME;
-               nvram_name = BRCMF_PCIE_4354_NVRAM_NAME;
-               break;
        case BRCM_CC_4356_CHIP_ID:
                fw_name = BRCMF_PCIE_4356_FW_NAME;
                nvram_name = BRCMF_PCIE_4356_NVRAM_NAME;
@@ -1566,8 +1557,8 @@ static int brcmf_pcie_buscoreprep(void *ctx)
 }
 
 
-static void brcmf_pcie_buscore_exitdl(void *ctx, struct brcmf_chip *chip,
-                                     u32 rstvec)
+static void brcmf_pcie_buscore_activate(void *ctx, struct brcmf_chip *chip,
+                                       u32 rstvec)
 {
        struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
 
@@ -1577,7 +1568,7 @@ static void brcmf_pcie_buscore_exitdl(void *ctx, struct brcmf_chip *chip,
 
 static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
        .prepare = brcmf_pcie_buscoreprep,
-       .exit_dl = brcmf_pcie_buscore_exitdl,
+       .activate = brcmf_pcie_buscore_activate,
        .read32 = brcmf_pcie_buscore_read32,
        .write32 = brcmf_pcie_buscore_write32,
 };
@@ -1856,7 +1847,6 @@ cleanup:
        PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
 
 static struct pci_device_id brcmf_pcie_devid_table[] = {
-       BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_DEVICE_ID),
        BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
        BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
        BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
index 257ee70feb5b143d8e9b5acbf59115f917bc072f..ab0c898330137e494a79c7418c151f9dec860f98 100644 (file)
@@ -432,8 +432,6 @@ struct brcmf_sdio {
        struct brcmf_sdio_dev *sdiodev; /* sdio device handler */
        struct brcmf_chip *ci;  /* Chip info struct */
 
-       u32 ramsize;            /* Size of RAM in SOCRAM (bytes) */
-
        u32 hostintmask;        /* Copy of Host Interrupt Mask */
        atomic_t intstatus;     /* Intstatus bits (events) pending */
        atomic_t fcstate;       /* State of dongle flow-control */
@@ -485,10 +483,9 @@ struct brcmf_sdio {
 #endif                         /* DEBUG */
 
        uint clkstate;          /* State of sd and backplane clock(s) */
-       bool activity;          /* Activity flag for clock down */
        s32 idletime;           /* Control for activity timeout */
-       s32 idlecount;  /* Activity timeout counter */
-       s32 idleclock;  /* How to set bus driver when idle */
+       s32 idlecount;          /* Activity timeout counter */
+       s32 idleclock;          /* How to set bus driver when idle */
        bool rxflow_mode;       /* Rx flow control mode */
        bool rxflow;            /* Is rx flow control on */
        bool alp_only;          /* Don't use HT clock (ALP only) */
@@ -510,7 +507,8 @@ struct brcmf_sdio {
 
        struct workqueue_struct *brcmf_wq;
        struct work_struct datawork;
-       atomic_t dpc_tskcnt;
+       bool dpc_triggered;
+       bool dpc_running;
 
        bool txoff;             /* Transmit flow-controlled */
        struct brcmf_sdio_count sdcnt;
@@ -617,6 +615,10 @@ static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
 #define BCM43362_NVRAM_NAME            "brcm/brcmfmac43362-sdio.txt"
 #define BCM4339_FIRMWARE_NAME          "brcm/brcmfmac4339-sdio.bin"
 #define BCM4339_NVRAM_NAME             "brcm/brcmfmac4339-sdio.txt"
+#define BCM43430_FIRMWARE_NAME         "brcm/brcmfmac43430-sdio.bin"
+#define BCM43430_NVRAM_NAME            "brcm/brcmfmac43430-sdio.txt"
+#define BCM43455_FIRMWARE_NAME         "brcm/brcmfmac43455-sdio.bin"
+#define BCM43455_NVRAM_NAME            "brcm/brcmfmac43455-sdio.txt"
 #define BCM4354_FIRMWARE_NAME          "brcm/brcmfmac4354-sdio.bin"
 #define BCM4354_NVRAM_NAME             "brcm/brcmfmac4354-sdio.txt"
 
@@ -640,6 +642,10 @@ MODULE_FIRMWARE(BCM43362_FIRMWARE_NAME);
 MODULE_FIRMWARE(BCM43362_NVRAM_NAME);
 MODULE_FIRMWARE(BCM4339_FIRMWARE_NAME);
 MODULE_FIRMWARE(BCM4339_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43430_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43430_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43455_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43455_NVRAM_NAME);
 MODULE_FIRMWARE(BCM4354_FIRMWARE_NAME);
 MODULE_FIRMWARE(BCM4354_NVRAM_NAME);
 
@@ -669,6 +675,8 @@ static const struct brcmf_firmware_names brcmf_fwname_data[] = {
        { BRCM_CC_4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) },
        { BRCM_CC_43362_CHIP_ID, 0xFFFFFFFE, BRCMF_FIRMWARE_NVRAM(BCM43362) },
        { BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) },
+       { BRCM_CC_43430_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43430) },
+       { BRCM_CC_4345_CHIP_ID, 0xFFFFFFC0, BRCMF_FIRMWARE_NVRAM(BCM43455) },
        { BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4354) }
 };
 
@@ -959,13 +967,8 @@ static int brcmf_sdio_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
        brcmf_dbg(SDIO, "Enter\n");
 
        /* Early exit if we're already there */
-       if (bus->clkstate == target) {
-               if (target == CLK_AVAIL) {
-                       brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
-                       bus->activity = true;
-               }
+       if (bus->clkstate == target)
                return 0;
-       }
 
        switch (target) {
        case CLK_AVAIL:
@@ -974,8 +977,6 @@ static int brcmf_sdio_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
                        brcmf_sdio_sdclk(bus, true);
                /* Now request HT Avail on the backplane */
                brcmf_sdio_htclk(bus, true, pendok);
-               brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
-               bus->activity = true;
                break;
 
        case CLK_SDONLY:
@@ -987,7 +988,6 @@ static int brcmf_sdio_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
                else
                        brcmf_err("request for %d -> %d\n",
                                  bus->clkstate, target);
-               brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
                break;
 
        case CLK_NONE:
@@ -996,7 +996,6 @@ static int brcmf_sdio_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
                        brcmf_sdio_htclk(bus, false, false);
                /* Now remove the SD clock */
                brcmf_sdio_sdclk(bus, false);
-               brcmf_sdio_wd_timer(bus, 0);
                break;
        }
 #ifdef DEBUG
@@ -1024,17 +1023,6 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
 
                /* Going to sleep */
                if (sleep) {
-                       /* Don't sleep if something is pending */
-                       if (atomic_read(&bus->intstatus) ||
-                           atomic_read(&bus->ipend) > 0 ||
-                           bus->ctrl_frame_stat ||
-                           (!atomic_read(&bus->fcstate) &&
-                           brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
-                           data_ok(bus))) {
-                                err = -EBUSY;
-                                goto done;
-                       }
-
                        clkcsr = brcmf_sdiod_regrb(bus->sdiodev,
                                                   SBSDIO_FUNC1_CHIPCLKCSR,
                                                   &err);
@@ -1045,11 +1033,7 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
                                                  SBSDIO_ALP_AVAIL_REQ, &err);
                        }
                        err = brcmf_sdio_kso_control(bus, false);
-                       /* disable watchdog */
-                       if (!err)
-                               brcmf_sdio_wd_timer(bus, 0);
                } else {
-                       bus->idlecount = 0;
                        err = brcmf_sdio_kso_control(bus, true);
                }
                if (err) {
@@ -1066,6 +1050,7 @@ end:
                        brcmf_sdio_clkctl(bus, CLK_NONE, pendok);
        } else {
                brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok);
+               brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
        }
        bus->sleeping = sleep;
        brcmf_dbg(SDIO, "new state %s\n",
@@ -1085,44 +1070,47 @@ static inline bool brcmf_sdio_valid_shared_address(u32 addr)
 static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
                                 struct sdpcm_shared *sh)
 {
-       u32 addr;
+       u32 addr = 0;
        int rv;
        u32 shaddr = 0;
        struct sdpcm_shared_le sh_le;
        __le32 addr_le;
 
-       shaddr = bus->ci->rambase + bus->ramsize - 4;
+       sdio_claim_host(bus->sdiodev->func[1]);
+       brcmf_sdio_bus_sleep(bus, false, false);
 
        /*
         * Read last word in socram to determine
         * address of sdpcm_shared structure
         */
-       sdio_claim_host(bus->sdiodev->func[1]);
-       brcmf_sdio_bus_sleep(bus, false, false);
-       rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
-       sdio_release_host(bus->sdiodev->func[1]);
+       shaddr = bus->ci->rambase + bus->ci->ramsize - 4;
+       if (!bus->ci->rambase && brcmf_chip_sr_capable(bus->ci))
+               shaddr -= bus->ci->srsize;
+       rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr,
+                              (u8 *)&addr_le, 4);
        if (rv < 0)
-               return rv;
-
-       addr = le32_to_cpu(addr_le);
-
-       brcmf_dbg(SDIO, "sdpcm_shared address 0x%08X\n", addr);
+               goto fail;
 
        /*
         * Check if addr is valid.
         * NVRAM length at the end of memory should have been overwritten.
         */
+       addr = le32_to_cpu(addr_le);
        if (!brcmf_sdio_valid_shared_address(addr)) {
-                       brcmf_err("invalid sdpcm_shared address 0x%08X\n",
-                                 addr);
-                       return -EINVAL;
+               brcmf_err("invalid sdpcm_shared address 0x%08X\n", addr);
+               rv = -EINVAL;
+               goto fail;
        }
 
+       brcmf_dbg(INFO, "sdpcm_shared address 0x%08X\n", addr);
+
        /* Read hndrte_shared structure */
        rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
                               sizeof(struct sdpcm_shared_le));
        if (rv < 0)
-               return rv;
+               goto fail;
+
+       sdio_release_host(bus->sdiodev->func[1]);
 
        /* Endianness */
        sh->flags = le32_to_cpu(sh_le.flags);
@@ -1139,8 +1127,13 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
                          sh->flags & SDPCM_SHARED_VERSION_MASK);
                return -EPROTO;
        }
-
        return 0;
+
+fail:
+       brcmf_err("unable to obtain sdpcm_shared info: rv=%d (addr=0x%x)\n",
+                 rv, addr);
+       sdio_release_host(bus->sdiodev->func[1]);
+       return rv;
 }
 
 static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
@@ -2721,11 +2714,14 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
        if (bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL) &&
            data_ok(bus)) {
                sdio_claim_host(bus->sdiodev->func[1]);
-               err = brcmf_sdio_tx_ctrlframe(bus,  bus->ctrl_frame_buf,
-                                             bus->ctrl_frame_len);
+               if (bus->ctrl_frame_stat) {
+                       err = brcmf_sdio_tx_ctrlframe(bus,  bus->ctrl_frame_buf,
+                                                     bus->ctrl_frame_len);
+                       bus->ctrl_frame_err = err;
+                       wmb();
+                       bus->ctrl_frame_stat = false;
+               }
                sdio_release_host(bus->sdiodev->func[1]);
-               bus->ctrl_frame_err = err;
-               bus->ctrl_frame_stat = false;
                brcmf_sdio_wait_event_wakeup(bus);
        }
        /* Send queued frames (limit 1 if rx may still be pending) */
@@ -2740,12 +2736,22 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
        if ((bus->sdiodev->state != BRCMF_SDIOD_DATA) || (err != 0)) {
                brcmf_err("failed backplane access over SDIO, halting operation\n");
                atomic_set(&bus->intstatus, 0);
+               if (bus->ctrl_frame_stat) {
+                       sdio_claim_host(bus->sdiodev->func[1]);
+                       if (bus->ctrl_frame_stat) {
+                               bus->ctrl_frame_err = -ENODEV;
+                               wmb();
+                               bus->ctrl_frame_stat = false;
+                               brcmf_sdio_wait_event_wakeup(bus);
+                       }
+                       sdio_release_host(bus->sdiodev->func[1]);
+               }
        } else if (atomic_read(&bus->intstatus) ||
                   atomic_read(&bus->ipend) > 0 ||
                   (!atomic_read(&bus->fcstate) &&
                    brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
                    data_ok(bus))) {
-               atomic_inc(&bus->dpc_tskcnt);
+               bus->dpc_triggered = true;
        }
 }
 
@@ -2941,20 +2947,27 @@ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
        /* Send from dpc */
        bus->ctrl_frame_buf = msg;
        bus->ctrl_frame_len = msglen;
+       wmb();
        bus->ctrl_frame_stat = true;
 
        brcmf_sdio_trigger_dpc(bus);
        wait_event_interruptible_timeout(bus->ctrl_wait, !bus->ctrl_frame_stat,
                                         msecs_to_jiffies(CTL_DONE_TIMEOUT));
-
-       if (!bus->ctrl_frame_stat) {
+       ret = 0;
+       if (bus->ctrl_frame_stat) {
+               sdio_claim_host(bus->sdiodev->func[1]);
+               if (bus->ctrl_frame_stat) {
+                       brcmf_dbg(SDIO, "ctrl_frame timeout\n");
+                       bus->ctrl_frame_stat = false;
+                       ret = -ETIMEDOUT;
+               }
+               sdio_release_host(bus->sdiodev->func[1]);
+       }
+       if (!ret) {
                brcmf_dbg(SDIO, "ctrl_frame complete, err=%d\n",
                          bus->ctrl_frame_err);
+               rmb();
                ret = bus->ctrl_frame_err;
-       } else {
-               brcmf_dbg(SDIO, "ctrl_frame timeout\n");
-               bus->ctrl_frame_stat = false;
-               ret = -ETIMEDOUT;
        }
 
        if (ret)
@@ -3358,9 +3371,6 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
        sdio_claim_host(bus->sdiodev->func[1]);
        brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
 
-       /* Keep arm in reset */
-       brcmf_chip_enter_download(bus->ci);
-
        rstvec = get_unaligned_le32(fw->data);
        brcmf_dbg(SDIO, "firmware rstvec: %x\n", rstvec);
 
@@ -3380,7 +3390,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
        }
 
        /* Take arm out of reset */
-       if (!brcmf_chip_exit_download(bus->ci, rstvec)) {
+       if (!brcmf_chip_set_active(bus->ci, rstvec)) {
                brcmf_err("error getting out of ARM core reset\n");
                goto err;
        }
@@ -3525,8 +3535,8 @@ done:
 
 void brcmf_sdio_trigger_dpc(struct brcmf_sdio *bus)
 {
-       if (atomic_read(&bus->dpc_tskcnt) == 0) {
-               atomic_inc(&bus->dpc_tskcnt);
+       if (!bus->dpc_triggered) {
+               bus->dpc_triggered = true;
                queue_work(bus->brcmf_wq, &bus->datawork);
        }
 }
@@ -3557,11 +3567,11 @@ void brcmf_sdio_isr(struct brcmf_sdio *bus)
        if (!bus->intr)
                brcmf_err("isr w/o interrupt configured!\n");
 
-       atomic_inc(&bus->dpc_tskcnt);
+       bus->dpc_triggered = true;
        queue_work(bus->brcmf_wq, &bus->datawork);
 }
 
-static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
+static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
 {
        brcmf_dbg(TIMER, "Enter\n");
 
@@ -3577,7 +3587,7 @@ static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
                if (!bus->intr ||
                    (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) {
 
-                       if (atomic_read(&bus->dpc_tskcnt) == 0) {
+                       if (!bus->dpc_triggered) {
                                u8 devpend;
 
                                sdio_claim_host(bus->sdiodev->func[1]);
@@ -3595,7 +3605,7 @@ static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
                                bus->sdcnt.pollcnt++;
                                atomic_set(&bus->ipend, 1);
 
-                               atomic_inc(&bus->dpc_tskcnt);
+                               bus->dpc_triggered = true;
                                queue_work(bus->brcmf_wq, &bus->datawork);
                        }
                }
@@ -3622,22 +3632,25 @@ static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
 #endif                         /* DEBUG */
 
        /* On idle timeout clear activity flag and/or turn off clock */
-       if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) {
-               if (++bus->idlecount >= bus->idletime) {
-                       bus->idlecount = 0;
-                       if (bus->activity) {
-                               bus->activity = false;
-                               brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
-                       } else {
+       if (!bus->dpc_triggered) {
+               rmb();
+               if ((!bus->dpc_running) && (bus->idletime > 0) &&
+                   (bus->clkstate == CLK_AVAIL)) {
+                       bus->idlecount++;
+                       if (bus->idlecount > bus->idletime) {
                                brcmf_dbg(SDIO, "idle\n");
                                sdio_claim_host(bus->sdiodev->func[1]);
+                               brcmf_sdio_wd_timer(bus, 0);
+                               bus->idlecount = 0;
                                brcmf_sdio_bus_sleep(bus, true, false);
                                sdio_release_host(bus->sdiodev->func[1]);
                        }
+               } else {
+                       bus->idlecount = 0;
                }
+       } else {
+               bus->idlecount = 0;
        }
-
-       return (atomic_read(&bus->ipend) > 0);
 }
 
 static void brcmf_sdio_dataworker(struct work_struct *work)
@@ -3645,10 +3658,14 @@ static void brcmf_sdio_dataworker(struct work_struct *work)
        struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio,
                                              datawork);
 
-       while (atomic_read(&bus->dpc_tskcnt)) {
-               atomic_set(&bus->dpc_tskcnt, 0);
+       bus->dpc_running = true;
+       wmb();
+       while (ACCESS_ONCE(bus->dpc_triggered)) {
+               bus->dpc_triggered = false;
                brcmf_sdio_dpc(bus);
+               bus->idlecount = 0;
        }
+       bus->dpc_running = false;
        if (brcmf_sdiod_freezing(bus->sdiodev)) {
                brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DOWN);
                brcmf_sdiod_try_freeze(bus->sdiodev);
@@ -3771,8 +3788,8 @@ static int brcmf_sdio_buscoreprep(void *ctx)
        return 0;
 }
 
-static void brcmf_sdio_buscore_exitdl(void *ctx, struct brcmf_chip *chip,
-                                     u32 rstvec)
+static void brcmf_sdio_buscore_activate(void *ctx, struct brcmf_chip *chip,
+                                       u32 rstvec)
 {
        struct brcmf_sdio_dev *sdiodev = ctx;
        struct brcmf_core *core;
@@ -3815,7 +3832,7 @@ static void brcmf_sdio_buscore_write32(void *ctx, u32 addr, u32 val)
 
 static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops = {
        .prepare = brcmf_sdio_buscoreprep,
-       .exit_dl = brcmf_sdio_buscore_exitdl,
+       .activate = brcmf_sdio_buscore_activate,
        .read32 = brcmf_sdio_buscore_read32,
        .write32 = brcmf_sdio_buscore_write32,
 };
@@ -3869,13 +3886,6 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
                drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH;
        brcmf_sdio_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
 
-       /* Get info on the SOCRAM cores... */
-       bus->ramsize = bus->ci->ramsize;
-       if (!(bus->ramsize)) {
-               brcmf_err("failed to find SOCRAM memory!\n");
-               goto fail;
-       }
-
        /* Set card control so an SDIO card reset does a WLAN backplane reset */
        reg_val = brcmf_sdiod_regrb(bus->sdiodev,
                                    SDIO_CCCR_BRCM_CARDCTRL, &err);
@@ -4148,7 +4158,8 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
                bus->watchdog_tsk = NULL;
        }
        /* Initialize DPC thread */
-       atomic_set(&bus->dpc_tskcnt, 0);
+       bus->dpc_triggered = false;
+       bus->dpc_running = false;
 
        /* Assign bus interface call back */
        bus->sdiodev->bus_if->dev = bus->sdiodev->dev;
@@ -4243,14 +4254,14 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
                if (bus->ci) {
                        if (bus->sdiodev->state != BRCMF_SDIOD_NOMEDIUM) {
                                sdio_claim_host(bus->sdiodev->func[1]);
+                               brcmf_sdio_wd_timer(bus, 0);
                                brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
                                /* Leave the device in state where it is
-                                * 'quiet'. This is done by putting it in
-                                * download_state which essentially resets
-                                * all necessary cores.
+                                * 'passive'. This is done by resetting all
+                                * necessary cores.
                                 */
                                msleep(20);
-                               brcmf_chip_enter_download(bus->ci);
+                               brcmf_chip_set_passive(bus->ci);
                                brcmf_sdio_clkctl(bus, CLK_NONE, false);
                                sdio_release_host(bus->sdiodev->func[1]);
                        }
index c84af1dfc88fdde21df4c384a82e849ba827464a..369527e2768956ee30563d8d330341c802f7bc6d 100644 (file)
@@ -4959,7 +4959,7 @@ static int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
         * Configure pci/pcmcia here instead of in brcms_c_attach()
         * to allow mfg hotswap:  down, hotswap (chip power cycle), up.
         */
-       bcma_core_pci_irq_ctl(wlc_hw->d11core->bus, wlc_hw->d11core,
+       bcma_host_pci_irq_ctl(wlc_hw->d11core->bus, wlc_hw->d11core,
                              true);
 
        /*
index 941b1e41f3664cc0ea7f5708a337e9edceb7271b..1c4e9dd57960f133ba20661eef5c47ea24336fad 100644 (file)
@@ -2949,5 +2949,5 @@ bool wlc_phy_txpower_ipa_ison(struct brcms_phy_pub *ppi)
        if (ISNPHY(pi))
                return wlc_phy_n_txpower_ipa_ison(pi);
        else
-               return 0;
+               return false;
 }
index 5f1366234a0dc3e906d085b927b2360b60f05a66..93d4cde0eb3135bbb4334a5ba482437744c09592 100644 (file)
@@ -4999,7 +4999,7 @@ void wlc_2064_vco_cal(struct brcms_phy *pi)
 bool wlc_phy_tpc_isenabled_lcnphy(struct brcms_phy *pi)
 {
        if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi))
-               return 0;
+               return false;
        else
                return (LCNPHY_TX_PWR_CTRL_HW ==
                        wlc_lcnphy_get_tx_pwr_ctrl((pi)));
index 2124a17d0bfdda603d34cf03a76cf550d29bf1be..4efdd51af9c8fd72ce99ea00e8bc03f6c86c271a 100644 (file)
@@ -37,6 +37,8 @@
 #define BRCM_CC_43362_CHIP_ID          43362
 #define BRCM_CC_4335_CHIP_ID           0x4335
 #define BRCM_CC_4339_CHIP_ID           0x4339
+#define BRCM_CC_43430_CHIP_ID          43430
+#define BRCM_CC_4345_CHIP_ID           0x4345
 #define BRCM_CC_4354_CHIP_ID           0x4354
 #define BRCM_CC_4356_CHIP_ID           0x4356
 #define BRCM_CC_43566_CHIP_ID          43566
index d242333b7559fca6de21e7f0b458770d149287df..e1fd499930a03a1cb0317c38de42813bea149275 100644 (file)
@@ -183,7 +183,14 @@ struct chipcregs {
        u8 uart1lsr;
        u8 uart1msr;
        u8 uart1scratch;
-       u32 PAD[126];
+       u32 PAD[62];
+
+       /* save/restore, corerev >= 48 */
+       u32 sr_capability;          /* 0x500 */
+       u32 sr_control0;            /* 0x504 */
+       u32 sr_control1;            /* 0x508 */
+       u32 gpio_control;           /* 0x50C */
+       u32 PAD[60];
 
        /* PMU registers (corerev >= 20) */
        u32 pmucontrol; /* 0x600 */
index 964b64ab7fe3a10aed8024f79abe6a23df6afb38..7603546d2de322cf8fb5dd5bf0d5ff3e48d8a6be 100644 (file)
@@ -447,7 +447,7 @@ static int cw1200_spi_disconnect(struct spi_device *func)
 }
 
 #ifdef CONFIG_PM
-static int cw1200_spi_suspend(struct device *dev, pm_message_t state)
+static int cw1200_spi_suspend(struct device *dev)
 {
        struct hwbus_priv *self = spi_get_drvdata(to_spi_device(dev));
 
@@ -458,10 +458,8 @@ static int cw1200_spi_suspend(struct device *dev, pm_message_t state)
        return 0;
 }
 
-static int cw1200_spi_resume(struct device *dev)
-{
-       return 0;
-}
+static SIMPLE_DEV_PM_OPS(cw1200_pm_ops, cw1200_spi_suspend, NULL);
+
 #endif
 
 static struct spi_driver spi_driver = {
@@ -472,8 +470,7 @@ static struct spi_driver spi_driver = {
                .bus            = &spi_bus_type,
                .owner          = THIS_MODULE,
 #ifdef CONFIG_PM
-               .suspend        = cw1200_spi_suspend,
-               .resume         = cw1200_spi_resume,
+               .pm             = &cw1200_pm_ops,
 #endif
        },
 };
index a6f22c32a27994000f578c2baff08a2f62042aa0..3811878ab9cd2057ad44785c7f3dad5cb5edcf07 100644 (file)
@@ -708,7 +708,6 @@ struct iwl_priv {
        unsigned long reload_jiffies;
        int reload_count;
        bool ucode_loaded;
-       bool init_ucode_run;            /* Don't run init uCode again */
 
        u8 plcp_delta_threshold;
 
index 5707ba5ce23f31a490c09a87e3d4952d67e680bc..5abd62ed8cb47da0c9dfe21de3592dc2482f6926 100644 (file)
@@ -1114,16 +1114,17 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) |
                        BIT(IWL_DEFAULT_CMD_QUEUE_NUM));
 
-       if (vif)
-               scd_queues &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]);
-
-       IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n", scd_queues);
-       if (iwlagn_txfifo_flush(priv, scd_queues)) {
-               IWL_ERR(priv, "flush request fail\n");
-               goto done;
+       if (drop) {
+               IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n",
+                                   scd_queues);
+               if (iwlagn_txfifo_flush(priv, scd_queues)) {
+                       IWL_ERR(priv, "flush request fail\n");
+                       goto done;
+               }
        }
+
        IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n");
-       iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
+       iwl_trans_wait_tx_queue_empty(priv->trans, scd_queues);
 done:
        mutex_unlock(&priv->mutex);
        IWL_DEBUG_MAC80211(priv, "leave\n");
index 32b78a66536db90bbb1e67f10e210df49f6b2d03..3bd7c86e90d9fca5c43c6a95ac7795ec74ddeaf7 100644 (file)
@@ -3153,12 +3153,13 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
        desc += sprintf(buff+desc, "lq type %s\n",
           (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
        if (is_Ht(tbl->lq_type)) {
-               desc += sprintf(buff+desc, " %s",
+               desc += sprintf(buff + desc, " %s",
                   (is_siso(tbl->lq_type)) ? "SISO" :
                   ((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3"));
-                  desc += sprintf(buff+desc, " %s",
+               desc += sprintf(buff + desc, " %s",
                   (tbl->is_ht40) ? "40MHz" : "20MHz");
-                  desc += sprintf(buff+desc, " %s %s %s\n", (tbl->is_SGI) ? "SGI" : "",
+               desc += sprintf(buff + desc, " %s %s %s\n",
+                  (tbl->is_SGI) ? "SGI" : "",
                   (lq_sta->is_green) ? "GF enabled" : "",
                   (lq_sta->is_agg) ? "AGG on" : "");
        }
index 1e40a12de077237add85776ec6ad3c6fddf30857..275df12a6045044cdee1ddeba1c8840caf491923 100644 (file)
@@ -189,9 +189,9 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
                rate_flags |= RATE_MCS_CCK_MSK;
 
        /* Set up antennas */
-        if (priv->lib->bt_params &&
-            priv->lib->bt_params->advanced_bt_coexist &&
-            priv->bt_full_concurrent) {
+       if (priv->lib->bt_params &&
+           priv->lib->bt_params->advanced_bt_coexist &&
+           priv->bt_full_concurrent) {
                /* operated as 1x1 in full concurrency mode */
                priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
                                first_antenna(priv->nvm_data->valid_tx_ant));
index 4dbef7e58c2e3dfba5be41c1f2f4cfe32c04727b..5244e43bfafbc4617720097660ec693f5d30742f 100644 (file)
@@ -418,9 +418,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
        if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len)
                return 0;
 
-       if (priv->init_ucode_run)
-               return 0;
-
        iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
                                   calib_complete, ARRAY_SIZE(calib_complete),
                                   iwlagn_wait_calib, priv);
@@ -440,8 +437,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
         */
        ret = iwl_wait_notification(&priv->notif_wait, &calib_wait,
                                        UCODE_CALIB_TIMEOUT);
-       if (!ret)
-               priv->init_ucode_run = true;
 
        goto out;
 
index 0597a9cfd2f60c26f319afa6954322b86d0476b7..36e786f0387bd42593fe3c8ec523831694483bea 100644 (file)
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX  12
-#define IWL3160_UCODE_API_MAX  12
+#define IWL7260_UCODE_API_MAX  13
+#define IWL3160_UCODE_API_MAX  13
 
 /* Oldest version we won't warn about */
-#define IWL7260_UCODE_API_OK   10
-#define IWL3160_UCODE_API_OK   10
+#define IWL7260_UCODE_API_OK   12
+#define IWL3160_UCODE_API_OK   12
 
 /* Lowest firmware API version supported */
 #define IWL7260_UCODE_API_MIN  10
index d8dfa6da63072650dcf668d1e16d54ffddff0345..ce6321b7d24156269fd553f4409078b1c3fb24df 100644 (file)
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX  12
+#define IWL8000_UCODE_API_MAX  13
 
 /* Oldest version we won't warn about */
-#define IWL8000_UCODE_API_OK   10
+#define IWL8000_UCODE_API_OK   12
 
 /* Lowest firmware API version supported */
 #define IWL8000_UCODE_API_MIN  10
@@ -94,8 +94,8 @@
        IWL8000_FW_PRE "-" __stringify(api) ".ucode"
 
 #define NVM_HW_SECTION_NUM_FAMILY_8000         10
-#define DEFAULT_NVM_FILE_FAMILY_8000A          "iwl_nvm_8000.bin"
-#define DEFAULT_NVM_FILE_FAMILY_8000           "iwl_nvm_8000B.bin"
+#define DEFAULT_NVM_FILE_FAMILY_8000B          "nvmData-8000B"
+#define DEFAULT_NVM_FILE_FAMILY_8000C          "nvmData-8000C"
 
 /* Max SDIO RX aggregation size of the ADDBA request/response */
 #define MAX_RX_AGG_SIZE_8260_SDIO      28
@@ -177,8 +177,8 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
        .ht_params = &iwl8000_ht_params,
        .nvm_ver = IWL8000_NVM_VERSION,
        .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
-       .default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000,
-       .default_nvm_file_8000A = DEFAULT_NVM_FILE_FAMILY_8000A,
+       .default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B,
+       .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C,
        .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
        .disable_dummy_notification = true,
        .max_ht_ampdu_exponent  = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
@@ -192,8 +192,8 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
        .ht_params = &iwl8000_ht_params,
        .nvm_ver = IWL8000_NVM_VERSION,
        .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
-       .default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000,
-       .default_nvm_file_8000A = DEFAULT_NVM_FILE_FAMILY_8000A,
+       .default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B,
+       .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C,
        .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
        .bt_shared_single_ant = true,
        .disable_dummy_notification = true,
index 4b190d98a1ec0c3f98427c3efaab9e0162eb80f3..3f33f753ce2f9ce4a81ef1247d05fb517aeec0ec 100644 (file)
@@ -92,9 +92,9 @@ static inline bool iwl_has_secure_boot(u32 hw_rev,
 {
        /* return 1 only for family 8000 B0 */
        if ((family == IWL_DEVICE_FAMILY_8000) && (hw_rev & 0xC))
-               return 1;
+               return true;
 
-       return 0;
+       return false;
 }
 
 /*
@@ -228,7 +228,7 @@ struct iwl_pwr_tx_backoff {
 
 /**
  * struct iwl_cfg
- * @name: Offical name of the device
+ * @name: Official name of the device
  * @fw_name_pre: Firmware filename prefix. The api version and extension
  *     (.ucode) will be added to filename before loading from disk. The
  *     filename is constructed as fw_name_pre<api>.ucode.
@@ -303,8 +303,8 @@ struct iwl_cfg {
        bool lp_xtal_workaround;
        const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
        bool no_power_up_nic_in_init;
-       const char *default_nvm_file;
-       const char *default_nvm_file_8000A;
+       const char *default_nvm_file_B_step;
+       const char *default_nvm_file_C_step;
        unsigned int max_rx_agg_size;
        bool disable_dummy_notification;
        unsigned int max_tx_agg_size;
index 6842545535582246cf5369327c7748344ce98a97..9bb36d79c2bd3b6f32e7370db651533eb3bdad5c 100644 (file)
@@ -157,6 +157,7 @@ do {                                                                \
 /* 0x0000F000 - 0x00001000 */
 #define IWL_DL_ASSOC           0x00001000
 #define IWL_DL_DROP            0x00002000
+#define IWL_DL_LAR             0x00004000
 #define IWL_DL_COEX            0x00008000
 /* 0x000F0000 - 0x00010000 */
 #define IWL_DL_FW              0x00010000
@@ -219,5 +220,6 @@ do {                                                                \
 #define IWL_DEBUG_POWER(p, f, a...)    IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
 #define IWL_DEBUG_11H(p, f, a...)      IWL_DEBUG(p, IWL_DL_11H, f, ## a)
 #define IWL_DEBUG_RPM(p, f, a...)      IWL_DEBUG(p, IWL_DL_RPM, f, ## a)
+#define IWL_DEBUG_LAR(p, f, a...)      IWL_DEBUG(p, IWL_DL_LAR, f, ## a)
 
 #endif
index 141331d41abf28c59059b2e2ce50051a67b5491d..7267152e7dc7705aaa514237c17ed908b9e4919d 100644 (file)
@@ -145,7 +145,7 @@ static struct iwlwifi_opmode_table {
 #define IWL_DEFAULT_SCAN_CHANNELS 40
 
 /*
- * struct fw_sec: Just for the image parsing proccess.
+ * struct fw_sec: Just for the image parsing process.
  * For the fw storage we are using struct fw_desc.
  */
 struct fw_sec {
@@ -241,16 +241,10 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
         * previous name and uses the new format.
         */
        if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
-               char rev_step[2] = {
-                       'A' + CSR_HW_REV_STEP(drv->trans->hw_rev), 0
-               };
-
-               /* A-step doesn't have an indication */
-               if (CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_A_STEP)
-                       rev_step[0] = 0;
+               char rev_step = 'A' + CSR_HW_REV_STEP(drv->trans->hw_rev);
 
                snprintf(drv->firmware_name, sizeof(drv->firmware_name),
-                        "%s%s-%s.ucode", name_pre, rev_step, tag);
+                        "%s%c-%s.ucode", name_pre, rev_step, tag);
        }
 
        IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
@@ -1014,34 +1008,34 @@ static int validate_sec_sizes(struct iwl_drv *drv,
 
        /* Verify that uCode images will fit in card's SRAM. */
        if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) >
-                                                       cfg->max_inst_size) {
+           cfg->max_inst_size) {
                IWL_ERR(drv, "uCode instr len %Zd too large to fit in\n",
                        get_sec_size(pieces, IWL_UCODE_REGULAR,
-                                               IWL_UCODE_SECTION_INST));
+                                    IWL_UCODE_SECTION_INST));
                return -1;
        }
 
        if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) >
-                                                       cfg->max_data_size) {
+           cfg->max_data_size) {
                IWL_ERR(drv, "uCode data len %Zd too large to fit in\n",
                        get_sec_size(pieces, IWL_UCODE_REGULAR,
-                                               IWL_UCODE_SECTION_DATA));
+                                    IWL_UCODE_SECTION_DATA));
                return -1;
        }
 
-        if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) >
-                                                       cfg->max_inst_size) {
+       if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) >
+            cfg->max_inst_size) {
                IWL_ERR(drv, "uCode init instr len %Zd too large to fit in\n",
                        get_sec_size(pieces, IWL_UCODE_INIT,
-                                               IWL_UCODE_SECTION_INST));
+                                    IWL_UCODE_SECTION_INST));
                return -1;
        }
 
        if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA) >
-                                                       cfg->max_data_size) {
+           cfg->max_data_size) {
                IWL_ERR(drv, "uCode init data len %Zd too large to fit in\n",
                        get_sec_size(pieces, IWL_UCODE_REGULAR,
-                                               IWL_UCODE_SECTION_DATA));
+                                    IWL_UCODE_SECTION_DATA));
                return -1;
        }
        return 0;
@@ -1108,6 +1102,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
        const unsigned int api_max = drv->cfg->ucode_api_max;
        unsigned int api_ok = drv->cfg->ucode_api_ok;
        const unsigned int api_min = drv->cfg->ucode_api_min;
+       size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX];
        u32 api_ver;
        int i;
        bool load_module = false;
@@ -1227,8 +1222,37 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
                }
        }
 
+       memset(&trigger_tlv_sz, 0xff, sizeof(trigger_tlv_sz));
+
+       trigger_tlv_sz[FW_DBG_TRIGGER_MISSED_BEACONS] =
+               sizeof(struct iwl_fw_dbg_trigger_missed_bcon);
+       trigger_tlv_sz[FW_DBG_TRIGGER_CHANNEL_SWITCH] = 0;
+       trigger_tlv_sz[FW_DBG_TRIGGER_FW_NOTIF] =
+               sizeof(struct iwl_fw_dbg_trigger_cmd);
+       trigger_tlv_sz[FW_DBG_TRIGGER_MLME] =
+               sizeof(struct iwl_fw_dbg_trigger_mlme);
+       trigger_tlv_sz[FW_DBG_TRIGGER_STATS] =
+               sizeof(struct iwl_fw_dbg_trigger_stats);
+       trigger_tlv_sz[FW_DBG_TRIGGER_RSSI] =
+               sizeof(struct iwl_fw_dbg_trigger_low_rssi);
+       trigger_tlv_sz[FW_DBG_TRIGGER_TXQ_TIMERS] =
+               sizeof(struct iwl_fw_dbg_trigger_txq_timer);
+       trigger_tlv_sz[FW_DBG_TRIGGER_TIME_EVENT] =
+               sizeof(struct iwl_fw_dbg_trigger_time_event);
+
        for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) {
                if (pieces->dbg_trigger_tlv[i]) {
+                       /*
+                        * If the trigger isn't long enough, WARN and exit.
+                        * Someone is trying to debug something and he won't
+                        * be able to catch the bug he is trying to chase.
+                        * We'd better be noisy to be sure he knows what's
+                        * going on.
+                        */
+                       if (WARN_ON(pieces->dbg_trigger_tlv_len[i] <
+                                   (trigger_tlv_sz[i] +
+                                    sizeof(struct iwl_fw_dbg_trigger_tlv))))
+                               goto out_free_fw;
                        drv->fw.dbg_trigger_tlv_len[i] =
                                pieces->dbg_trigger_tlv_len[i];
                        drv->fw.dbg_trigger_tlv[i] =
@@ -1319,6 +1343,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
                                op->name, err);
 #endif
        }
+       kfree(pieces);
        return;
 
  try_again:
@@ -1546,6 +1571,10 @@ module_param_named(d0i3_disable, iwlwifi_mod_params.d0i3_disable,
                   bool, S_IRUGO);
 MODULE_PARM_DESC(d0i3_disable, "disable d0i3 functionality (default: Y)");
 
+module_param_named(lar_disable, iwlwifi_mod_params.lar_disable,
+                  bool, S_IRUGO);
+MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)");
+
 module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable,
                   bool, S_IRUGO | S_IWUSR);
 #ifdef CONFIG_IWLWIFI_UAPSD
index adf522c756e61dbbe146fe463dc39241edb3762a..cda746b33db1f60e228cde9c90f5e040847cc0ae 100644 (file)
@@ -68,7 +68,7 @@
 
 /* for all modules */
 #define DRV_NAME        "iwlwifi"
-#define DRV_COPYRIGHT  "Copyright(c) 2003- 2014 Intel Corporation"
+#define DRV_COPYRIGHT  "Copyright(c) 2003- 2015 Intel Corporation"
 #define DRV_AUTHOR     "<ilw@linux.intel.com>"
 
 /* radio config bits (actual values from NVM definition) */
@@ -123,7 +123,7 @@ struct iwl_cfg;
  * starts the driver: fetches the firmware. This should be called by bus
  * specific system flows implementations. For example, the bus specific probe
  * function should do bus related operations only, and then call to this
- * function. It returns the driver object or %NULL if an error occured.
+ * function. It returns the driver object or %NULL if an error occurred.
  */
 struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
                              const struct iwl_cfg *cfg);
index f0548b8a64b072a1a58b80e6d6d5cc7b9d00df59..5234a0bf11e4e3286b740c22518f4a039e224e94 100644 (file)
@@ -94,6 +94,7 @@ struct iwl_nvm_data {
        u32 nvm_version;
        s8 max_tx_pwr_half_dbm;
 
+       bool lar_enabled;
        struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
        struct ieee80211_channel channels[];
 };
index 25d0105741dbee2b1a292d10c1b6cc56ce3f41d5..219ca8acca6281f02abb2c44abc066e730360848 100644 (file)
@@ -248,7 +248,7 @@ static int iwl_read_otp_word(struct iwl_trans *trans, u16 addr,
        otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
        if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
                /* stop in this case */
-               /* set the uncorrectable OTP ECC bit for acknowledgement */
+               /* set the uncorrectable OTP ECC bit for acknowledgment */
                iwl_set_bit(trans, CSR_OTP_GP_REG,
                            CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
                IWL_ERR(trans, "Uncorrectable OTP ECC error, abort OTP read\n");
@@ -256,7 +256,7 @@ static int iwl_read_otp_word(struct iwl_trans *trans, u16 addr,
        }
        if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
                /* continue in this case */
-               /* set the correctable OTP ECC bit for acknowledgement */
+               /* set the correctable OTP ECC bit for acknowledgment */
                iwl_set_bit(trans, CSR_OTP_GP_REG,
                            CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
                IWL_ERR(trans, "Correctable OTP ECC error, continue read\n");
index 1f7f15eb86da208df591ae8e7ca27e52c5921acf..d45dc021cda2c0715b8d7e740ff90b46589ae141 100644 (file)
@@ -445,7 +445,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
 #define RX_LOW_WATERMARK 8
 
 /**
- * struct iwl_rb_status - reseve buffer status
+ * struct iwl_rb_status - reserve buffer status
  *     host memory mapped FH registers
  * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
  * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
index 37b38a585dd182f8fa1c44256f4a16ecc008d3df..251bf8dc4a12133b4ae77757170436c67219afc9 100644 (file)
@@ -183,7 +183,7 @@ struct iwl_fw_error_dump_info {
  * struct iwl_fw_error_dump_fw_mon - FW monitor data
  * @fw_mon_wr_ptr: the position of the write pointer in the cyclic buffer
  * @fw_mon_base_ptr: base pointer of the data
- * @fw_mon_cycle_cnt: number of wrap arounds
+ * @fw_mon_cycle_cnt: number of wraparounds
  * @reserved: for future use
  * @data: captured data
  */
@@ -246,10 +246,14 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
  * @FW_DBG_TRIGGER_CHANNEL_SWITCH: trigger log collection upon channel switch.
  * @FW_DBG_TRIGGER_FW_NOTIF: trigger log collection when the firmware sends a
  *     command response or a notification.
- * @FW_DB_TRIGGER_RESERVED: reserved
+ * @FW_DBG_TRIGGER_MLME: trigger log collection upon MLME event.
  * @FW_DBG_TRIGGER_STATS: trigger log collection upon statistics threshold.
  * @FW_DBG_TRIGGER_RSSI: trigger log collection when the rssi of the beacon
  *     goes below a threshold.
+ * @FW_DBG_TRIGGER_TXQ_TIMERS: configures the timers for the Tx queue hang
+ *     detection.
+ * @FW_DBG_TRIGGER_TIME_EVENT: trigger log collection upon time events related
+ *     events.
  */
 enum iwl_fw_dbg_trigger {
        FW_DBG_TRIGGER_INVALID = 0,
@@ -258,9 +262,11 @@ enum iwl_fw_dbg_trigger {
        FW_DBG_TRIGGER_MISSED_BEACONS,
        FW_DBG_TRIGGER_CHANNEL_SWITCH,
        FW_DBG_TRIGGER_FW_NOTIF,
-       FW_DB_TRIGGER_RESERVED,
+       FW_DBG_TRIGGER_MLME,
        FW_DBG_TRIGGER_STATS,
        FW_DBG_TRIGGER_RSSI,
+       FW_DBG_TRIGGER_TXQ_TIMERS,
+       FW_DBG_TRIGGER_TIME_EVENT,
 
        /* must be last */
        FW_DBG_TRIGGER_MAX,
index 5ea381861d5d2c46c1dce922c40967681dc60185..bfdf3faa6c470dafbd9a66672b2f55e38b0872bc 100644 (file)
@@ -191,7 +191,7 @@ struct iwl_ucode_capa {
  * enum iwl_ucode_tlv_flag - ucode API flags
  * @IWL_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously
  *     was a separate TLV but moved here to save space.
- * @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behaviour on hidden SSID,
+ * @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behavior on hidden SSID,
  *     treats good CRC threshold as a boolean
  * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
  * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
@@ -240,10 +240,9 @@ enum iwl_ucode_tlv_flag {
 /**
  * enum iwl_ucode_tlv_api - ucode api
  * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
- * @IWL_UCODE_TLV_API_DISABLE_STA_TX: ucode supports tx_disable bit.
- * @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif.
  * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
  *     longer than the passive one, which is essential for fragmented scan.
+ * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
  * IWL_UCODE_TLV_API_HDC_PHASE_0: ucode supports finer configuration of LTR
  * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
  *     regardless of the band or the number of the probes. FW will calculate
@@ -258,9 +257,8 @@ enum iwl_ucode_tlv_flag {
  */
 enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_BT_COEX_SPLIT         = BIT(3),
-       IWL_UCODE_TLV_API_DISABLE_STA_TX        = BIT(5),
-       IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF     = BIT(7),
        IWL_UCODE_TLV_API_FRAGMENTED_SCAN       = BIT(8),
+       IWL_UCODE_TLV_API_WIFI_MCC_UPDATE       = BIT(9),
        IWL_UCODE_TLV_API_HDC_PHASE_0           = BIT(10),
        IWL_UCODE_TLV_API_BASIC_DWELL           = BIT(13),
        IWL_UCODE_TLV_API_SCD_CFG               = BIT(15),
@@ -292,6 +290,11 @@ enum iwl_ucode_tlv_api {
  * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
  * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
  * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
+ * @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different
+ *     sources for the MCC. This TLV bit is a future replacement to
+ *     IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR
+ *     is supported.
+ * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
  */
 enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_D0I3_SUPPORT                 = BIT(0),
@@ -308,6 +311,8 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT              = BIT(18),
        IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS           = BIT(22),
        IWL_UCODE_TLV_CAPA_BT_COEX_PLCR                 = BIT(28),
+       IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC                = BIT(29),
+       IWL_UCODE_TLV_CAPA_BT_COEX_RRC                  = BIT(30),
 };
 
 /* The default calibrate table size if not specified by firmware file */
@@ -573,6 +578,84 @@ struct iwl_fw_dbg_trigger_low_rssi {
        __le32 rssi;
 } __packed;
 
+/**
+ * struct iwl_fw_dbg_trigger_mlme - configures trigger for mlme events
+ * @stop_auth_denied: number of denied authentication to collect
+ * @stop_auth_timeout: number of authentication timeout to collect
+ * @stop_rx_deauth: number of Rx deauth before to collect
+ * @stop_tx_deauth: number of Tx deauth before to collect
+ * @stop_assoc_denied: number of denied association to collect
+ * @stop_assoc_timeout: number of association timeout to collect
+ * @stop_connection_loss: number of connection loss to collect
+ * @start_auth_denied: number of denied authentication to start recording
+ * @start_auth_timeout: number of authentication timeout to start recording
+ * @start_rx_deauth: number of Rx deauth to start recording
+ * @start_tx_deauth: number of Tx deauth to start recording
+ * @start_assoc_denied: number of denied association to start recording
+ * @start_assoc_timeout: number of association timeout to start recording
+ * @start_connection_loss: number of connection loss to start recording
+ */
+struct iwl_fw_dbg_trigger_mlme {
+       u8 stop_auth_denied;
+       u8 stop_auth_timeout;
+       u8 stop_rx_deauth;
+       u8 stop_tx_deauth;
+
+       u8 stop_assoc_denied;
+       u8 stop_assoc_timeout;
+       u8 stop_connection_loss;
+       u8 reserved;
+
+       u8 start_auth_denied;
+       u8 start_auth_timeout;
+       u8 start_rx_deauth;
+       u8 start_tx_deauth;
+
+       u8 start_assoc_denied;
+       u8 start_assoc_timeout;
+       u8 start_connection_loss;
+       u8 reserved2;
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_trigger_txq_timer - configures the Tx queue's timer
+ * @command_queue: timeout for the command queue in ms
+ * @bss: timeout for the queues of a BSS (except for TDLS queues) in ms
+ * @softap: timeout for the queues of a softAP in ms
+ * @p2p_go: timeout for the queues of a P2P GO in ms
+ * @p2p_client: timeout for the queues of a P2P client in ms
+ * @p2p_device: timeout for the queues of a P2P device in ms
+ * @ibss: timeout for the queues of an IBSS in ms
+ * @tdls: timeout for the queues of a TDLS station in ms
+ */
+struct iwl_fw_dbg_trigger_txq_timer {
+       __le32 command_queue;
+       __le32 bss;
+       __le32 softap;
+       __le32 p2p_go;
+       __le32 p2p_client;
+       __le32 p2p_device;
+       __le32 ibss;
+       __le32 tdls;
+       __le32 reserved[4];
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_trigger_time_event - configures a time event trigger
+ * time_Events: a list of tuples <id, action_bitmap>. The driver will issue a
+ *     trigger each time a time event notification that relates to time event
+ *     id with one of the actions in the bitmap is received and
+ *     BIT(notif->status) is set in status_bitmap.
+ *
+ */
+struct iwl_fw_dbg_trigger_time_event {
+       struct {
+               __le32 id;
+               __le32 action_bitmap;
+               __le32 status_bitmap;
+       } __packed time_events[16];
+} __packed;
+
 /**
  * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration.
  * @id: conf id
index 03250a45272eba7185851d74bc90d6a6863d664d..27c66e477833f7747d621fabe489b5d687a3120d 100644 (file)
@@ -186,21 +186,16 @@ IWL_EXPORT_SYMBOL(iwl_clear_bits_prph);
 
 void iwl_force_nmi(struct iwl_trans *trans)
 {
-       /*
-        * In HW previous to the 8000 HW family, and in the 8000 HW family
-        * itself when the revision step==0, the DEVICE_SET_NMI_REG is used
-        * to force an NMI. Otherwise, a different register -
-        * DEVICE_SET_NMI_8000B_REG - is used.
-        */
-       if ((trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) ||
-           (CSR_HW_REV_STEP(trans->hw_rev) == SILICON_A_STEP)) {
+       if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
                iwl_write_prph(trans, DEVICE_SET_NMI_REG,
                               DEVICE_SET_NMI_VAL_DRV);
                iwl_write_prph(trans, DEVICE_SET_NMI_REG,
                               DEVICE_SET_NMI_VAL_HW);
        } else {
-               iwl_write_prph(trans, DEVICE_SET_NMI_8000B_REG,
-                              DEVICE_SET_NMI_8000B_VAL);
+               iwl_write_prph(trans, DEVICE_SET_NMI_8000_REG,
+                              DEVICE_SET_NMI_8000_VAL);
+               iwl_write_prph(trans, DEVICE_SET_NMI_REG,
+                              DEVICE_SET_NMI_VAL_DRV);
        }
 }
 IWL_EXPORT_SYMBOL(iwl_force_nmi);
index e8eabd21ccfefa1083c36bbcbe1b26b7b858d3ff..ac2b90df841316134e85c6cc8fb5b205eb679ee9 100644 (file)
@@ -103,6 +103,7 @@ enum iwl_disable_11n {
  * @debug_level: levels are IWL_DL_*
  * @ant_coupling: antenna coupling in dB, default = 0
  * @d0i3_disable: disable d0i3, default = 1,
+ * @lar_disable: disable LAR (regulatory), default = 0
  * @fw_monitor: allow to use firmware monitor
  */
 struct iwl_mod_params {
@@ -121,6 +122,7 @@ struct iwl_mod_params {
        char *nvm_file;
        bool uapsd_disable;
        bool d0i3_disable;
+       bool lar_disable;
        bool fw_monitor;
 };
 
index c74f1a4edf2367ba558dc97c9f744b49b91ba42f..83903a5025c2e69779554e7bcf980aff48b3d080 100644 (file)
@@ -99,12 +99,15 @@ enum family_8000_nvm_offsets {
        /* NVM SW-Section offset (in words) definitions */
        NVM_SW_SECTION_FAMILY_8000 = 0x1C0,
        NVM_VERSION_FAMILY_8000 = 0,
-       RADIO_CFG_FAMILY_8000 = 2,
-       SKU_FAMILY_8000 = 4,
-       N_HW_ADDRS_FAMILY_8000 = 5,
+       RADIO_CFG_FAMILY_8000 = 0,
+       SKU_FAMILY_8000 = 2,
+       N_HW_ADDRS_FAMILY_8000 = 3,
 
        /* NVM REGULATORY -Section offset (in words) definitions */
        NVM_CHANNELS_FAMILY_8000 = 0,
+       NVM_LAR_OFFSET_FAMILY_8000_OLD = 0x4C7,
+       NVM_LAR_OFFSET_FAMILY_8000 = 0x507,
+       NVM_LAR_ENABLED_FAMILY_8000 = 0x7,
 
        /* NVM calibration section offset (in words) definitions */
        NVM_CALIB_SECTION_FAMILY_8000 = 0x2B8,
@@ -146,7 +149,9 @@ static const u8 iwl_nvm_channels_family_8000[] = {
 #define NUM_2GHZ_CHANNELS_FAMILY_8000  14
 #define FIRST_2GHZ_HT_MINUS            5
 #define LAST_2GHZ_HT_PLUS              9
-#define LAST_5GHZ_HT                   161
+#define LAST_5GHZ_HT                   165
+#define LAST_5GHZ_HT_FAMILY_8000       181
+#define N_HW_ADDR_MASK                 0xF
 
 /* rate data (static) */
 static struct ieee80211_rate iwl_cfg80211_rates[] = {
@@ -201,9 +206,57 @@ enum iwl_nvm_channel_flags {
 #define CHECK_AND_PRINT_I(x)   \
        ((ch_flags & NVM_CHANNEL_##x) ? # x " " : "")
 
+static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
+                                u16 nvm_flags, const struct iwl_cfg *cfg)
+{
+       u32 flags = IEEE80211_CHAN_NO_HT40;
+       u32 last_5ghz_ht = LAST_5GHZ_HT;
+
+       if (cfg->device_family == IWL_DEVICE_FAMILY_8000)
+               last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
+
+       if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) {
+               if (ch_num <= LAST_2GHZ_HT_PLUS)
+                       flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
+               if (ch_num >= FIRST_2GHZ_HT_MINUS)
+                       flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
+       } else if (ch_num <= last_5ghz_ht && (nvm_flags & NVM_CHANNEL_40MHZ)) {
+               if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
+                       flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
+               else
+                       flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
+       }
+       if (!(nvm_flags & NVM_CHANNEL_80MHZ))
+               flags |= IEEE80211_CHAN_NO_80MHZ;
+       if (!(nvm_flags & NVM_CHANNEL_160MHZ))
+               flags |= IEEE80211_CHAN_NO_160MHZ;
+
+       if (!(nvm_flags & NVM_CHANNEL_IBSS))
+               flags |= IEEE80211_CHAN_NO_IR;
+
+       if (!(nvm_flags & NVM_CHANNEL_ACTIVE))
+               flags |= IEEE80211_CHAN_NO_IR;
+
+       if (nvm_flags & NVM_CHANNEL_RADAR)
+               flags |= IEEE80211_CHAN_RADAR;
+
+       if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY)
+               flags |= IEEE80211_CHAN_INDOOR_ONLY;
+
+       /* Set the GO concurrent flag only in case that NO_IR is set.
+        * Otherwise it is meaningless
+        */
+       if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) &&
+           (flags & IEEE80211_CHAN_NO_IR))
+               flags |= IEEE80211_CHAN_GO_CONCURRENT;
+
+       return flags;
+}
+
 static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
                                struct iwl_nvm_data *data,
-                               const __le16 * const nvm_ch_flags)
+                               const __le16 * const nvm_ch_flags,
+                               bool lar_supported)
 {
        int ch_idx;
        int n_channels = 0;
@@ -228,9 +281,14 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
 
                if (ch_idx >= num_2ghz_channels &&
                    !data->sku_cap_band_52GHz_enable)
-                       ch_flags &= ~NVM_CHANNEL_VALID;
+                       continue;
 
-               if (!(ch_flags & NVM_CHANNEL_VALID)) {
+               if (!lar_supported && !(ch_flags & NVM_CHANNEL_VALID)) {
+                       /*
+                        * Channels might become valid later if lar is
+                        * supported, hence we still want to add them to
+                        * the list of supported channels to cfg80211.
+                        */
                        IWL_DEBUG_EEPROM(dev,
                                         "Ch. %d Flags %x [%sGHz] - No traffic\n",
                                         nvm_chan[ch_idx],
@@ -250,45 +308,6 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
                        ieee80211_channel_to_frequency(
                                channel->hw_value, channel->band);
 
-               /* TODO: Need to be dependent to the NVM */
-               channel->flags = IEEE80211_CHAN_NO_HT40;
-               if (ch_idx < num_2ghz_channels &&
-                   (ch_flags & NVM_CHANNEL_40MHZ)) {
-                       if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS)
-                               channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
-                       if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS)
-                               channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
-               } else if (nvm_chan[ch_idx] <= LAST_5GHZ_HT &&
-                          (ch_flags & NVM_CHANNEL_40MHZ)) {
-                       if ((ch_idx - num_2ghz_channels) % 2 == 0)
-                               channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
-                       else
-                               channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
-               }
-               if (!(ch_flags & NVM_CHANNEL_80MHZ))
-                       channel->flags |= IEEE80211_CHAN_NO_80MHZ;
-               if (!(ch_flags & NVM_CHANNEL_160MHZ))
-                       channel->flags |= IEEE80211_CHAN_NO_160MHZ;
-
-               if (!(ch_flags & NVM_CHANNEL_IBSS))
-                       channel->flags |= IEEE80211_CHAN_NO_IR;
-
-               if (!(ch_flags & NVM_CHANNEL_ACTIVE))
-                       channel->flags |= IEEE80211_CHAN_NO_IR;
-
-               if (ch_flags & NVM_CHANNEL_RADAR)
-                       channel->flags |= IEEE80211_CHAN_RADAR;
-
-               if (ch_flags & NVM_CHANNEL_INDOOR_ONLY)
-                       channel->flags |= IEEE80211_CHAN_INDOOR_ONLY;
-
-               /* Set the GO concurrent flag only in case that NO_IR is set.
-                * Otherwise it is meaningless
-                */
-               if ((ch_flags & NVM_CHANNEL_GO_CONCURRENT) &&
-                   (channel->flags & IEEE80211_CHAN_NO_IR))
-                       channel->flags |= IEEE80211_CHAN_GO_CONCURRENT;
-
                /* Initialize regulatory-based run-time data */
 
                /*
@@ -297,6 +316,15 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
                 */
                channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
                is_5ghz = channel->band == IEEE80211_BAND_5GHZ;
+
+               /* don't put limitations in case we're using LAR */
+               if (!lar_supported)
+                       channel->flags = iwl_get_channel_flags(nvm_chan[ch_idx],
+                                                              ch_idx, is_5ghz,
+                                                              ch_flags, cfg);
+               else
+                       channel->flags = 0;
+
                IWL_DEBUG_EEPROM(dev,
                                 "Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
                                 channel->hw_value,
@@ -370,8 +398,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
 
 static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
                            struct iwl_nvm_data *data,
-                           const __le16 *ch_section, bool enable_vht,
-                           u8 tx_chains, u8 rx_chains)
+                           const __le16 *ch_section,
+                           u8 tx_chains, u8 rx_chains, bool lar_supported)
 {
        int n_channels;
        int n_used = 0;
@@ -380,11 +408,12 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
        if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
                n_channels = iwl_init_channel_map(
                                dev, cfg, data,
-                               &ch_section[NVM_CHANNELS]);
+                               &ch_section[NVM_CHANNELS], lar_supported);
        else
                n_channels = iwl_init_channel_map(
                                dev, cfg, data,
-                               &ch_section[NVM_CHANNELS_FAMILY_8000]);
+                               &ch_section[NVM_CHANNELS_FAMILY_8000],
+                               lar_supported);
 
        sband = &data->bands[IEEE80211_BAND_2GHZ];
        sband->band = IEEE80211_BAND_2GHZ;
@@ -403,7 +432,7 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
                                          IEEE80211_BAND_5GHZ);
        iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
                             tx_chains, rx_chains);
-       if (enable_vht)
+       if (data->sku_cap_11ac_enable)
                iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap,
                                      tx_chains, rx_chains);
 
@@ -412,17 +441,16 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
                            n_used, n_channels);
 }
 
-static int iwl_get_sku(const struct iwl_cfg *cfg,
-                      const __le16 *nvm_sw)
+static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
+                      const __le16 *phy_sku)
 {
        if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
                return le16_to_cpup(nvm_sw + SKU);
-       else
-               return le32_to_cpup((__le32 *)(nvm_sw + SKU_FAMILY_8000));
+
+       return le32_to_cpup((__le32 *)(phy_sku + SKU_FAMILY_8000));
 }
 
-static int iwl_get_nvm_version(const struct iwl_cfg *cfg,
-                              const __le16 *nvm_sw)
+static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
 {
        if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
                return le16_to_cpup(nvm_sw + NVM_VERSION);
@@ -431,24 +459,26 @@ static int iwl_get_nvm_version(const struct iwl_cfg *cfg,
                                               NVM_VERSION_FAMILY_8000));
 }
 
-static int iwl_get_radio_cfg(const struct iwl_cfg *cfg,
-                            const __le16 *nvm_sw)
+static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
+                            const __le16 *phy_sku)
 {
        if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
                return le16_to_cpup(nvm_sw + RADIO_CFG);
-       else
-               return le32_to_cpup((__le32 *)(nvm_sw + RADIO_CFG_FAMILY_8000));
+
+       return le32_to_cpup((__le32 *)(nvm_sw + RADIO_CFG_FAMILY_8000));
+
 }
 
-#define N_HW_ADDRS_MASK_FAMILY_8000    0xF
-static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg,
-                             const __le16 *nvm_sw)
+static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
 {
+       int n_hw_addr;
+
        if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
                return le16_to_cpup(nvm_sw + N_HW_ADDRS);
-       else
-               return le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000))
-                      & N_HW_ADDRS_MASK_FAMILY_8000;
+
+       n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000));
+
+       return n_hw_addr & N_HW_ADDR_MASK;
 }
 
 static void iwl_set_radio_cfg(const struct iwl_cfg *cfg,
@@ -491,7 +521,8 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
                                           const struct iwl_cfg *cfg,
                                           struct iwl_nvm_data *data,
                                           const __le16 *mac_override,
-                                          const __le16 *nvm_hw)
+                                          const __le16 *nvm_hw,
+                                          u32 mac_addr0, u32 mac_addr1)
 {
        const u8 *hw_addr;
 
@@ -515,48 +546,17 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
        }
 
        if (nvm_hw) {
-               /* read the MAC address from OTP */
-               if (!dev_is_pci(dev) || (data->nvm_version < 0xE08)) {
-                       /* read the mac address from the WFPM location */
-                       hw_addr = (const u8 *)(nvm_hw +
-                                              HW_ADDR0_WFPM_FAMILY_8000);
-                       data->hw_addr[0] = hw_addr[3];
-                       data->hw_addr[1] = hw_addr[2];
-                       data->hw_addr[2] = hw_addr[1];
-                       data->hw_addr[3] = hw_addr[0];
-
-                       hw_addr = (const u8 *)(nvm_hw +
-                                              HW_ADDR1_WFPM_FAMILY_8000);
-                       data->hw_addr[4] = hw_addr[1];
-                       data->hw_addr[5] = hw_addr[0];
-               } else if ((data->nvm_version >= 0xE08) &&
-                          (data->nvm_version < 0xE0B)) {
-                       /* read "reverse order"  from the PCIe location */
-                       hw_addr = (const u8 *)(nvm_hw +
-                                              HW_ADDR0_PCIE_FAMILY_8000);
-                       data->hw_addr[5] = hw_addr[2];
-                       data->hw_addr[4] = hw_addr[1];
-                       data->hw_addr[3] = hw_addr[0];
-
-                       hw_addr = (const u8 *)(nvm_hw +
-                                              HW_ADDR1_PCIE_FAMILY_8000);
-                       data->hw_addr[2] = hw_addr[3];
-                       data->hw_addr[1] = hw_addr[2];
-                       data->hw_addr[0] = hw_addr[1];
-               } else {
-                       /* read from the PCIe location */
-                       hw_addr = (const u8 *)(nvm_hw +
-                                              HW_ADDR0_PCIE_FAMILY_8000);
-                       data->hw_addr[5] = hw_addr[0];
-                       data->hw_addr[4] = hw_addr[1];
-                       data->hw_addr[3] = hw_addr[2];
-
-                       hw_addr = (const u8 *)(nvm_hw +
-                                              HW_ADDR1_PCIE_FAMILY_8000);
-                       data->hw_addr[2] = hw_addr[1];
-                       data->hw_addr[1] = hw_addr[2];
-                       data->hw_addr[0] = hw_addr[3];
-               }
+               /* read the MAC address from HW resisters */
+               hw_addr = (const u8 *)&mac_addr0;
+               data->hw_addr[0] = hw_addr[3];
+               data->hw_addr[1] = hw_addr[2];
+               data->hw_addr[2] = hw_addr[1];
+               data->hw_addr[3] = hw_addr[0];
+
+               hw_addr = (const u8 *)&mac_addr1;
+               data->hw_addr[4] = hw_addr[1];
+               data->hw_addr[5] = hw_addr[0];
+
                if (!is_valid_ether_addr(data->hw_addr))
                        IWL_ERR_DEV(dev,
                                    "mac address from hw section is not valid\n");
@@ -571,11 +571,14 @@ struct iwl_nvm_data *
 iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
                   const __le16 *nvm_hw, const __le16 *nvm_sw,
                   const __le16 *nvm_calib, const __le16 *regulatory,
-                  const __le16 *mac_override, u8 tx_chains, u8 rx_chains)
+                  const __le16 *mac_override, const __le16 *phy_sku,
+                  u8 tx_chains, u8 rx_chains, bool lar_fw_supported,
+                  u32 mac_addr0, u32 mac_addr1)
 {
        struct iwl_nvm_data *data;
        u32 sku;
        u32 radio_cfg;
+       u16 lar_config;
 
        if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
                data = kzalloc(sizeof(*data) +
@@ -592,20 +595,21 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
 
        data->nvm_version = iwl_get_nvm_version(cfg, nvm_sw);
 
-       radio_cfg = iwl_get_radio_cfg(cfg, nvm_sw);
+       radio_cfg = iwl_get_radio_cfg(cfg, nvm_sw, phy_sku);
        iwl_set_radio_cfg(cfg, data, radio_cfg);
        if (data->valid_tx_ant)
                tx_chains &= data->valid_tx_ant;
        if (data->valid_rx_ant)
                rx_chains &= data->valid_rx_ant;
 
-       sku = iwl_get_sku(cfg, nvm_sw);
+       sku = iwl_get_sku(cfg, nvm_sw, phy_sku);
        data->sku_cap_band_24GHz_enable = sku & NVM_SKU_CAP_BAND_24GHZ;
        data->sku_cap_band_52GHz_enable = sku & NVM_SKU_CAP_BAND_52GHZ;
        data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE;
-       data->sku_cap_11ac_enable = sku & NVM_SKU_CAP_11AC_ENABLE;
        if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
                data->sku_cap_11n_enable = false;
+       data->sku_cap_11ac_enable = data->sku_cap_11n_enable &&
+                                   (sku & NVM_SKU_CAP_11AC_ENABLE);
 
        data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
 
@@ -626,16 +630,23 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
                iwl_set_hw_address(cfg, data, nvm_hw);
 
                iwl_init_sbands(dev, cfg, data, nvm_sw,
-                               sku & NVM_SKU_CAP_11AC_ENABLE, tx_chains,
-                               rx_chains);
+                               tx_chains, rx_chains, lar_fw_supported);
        } else {
+               u16 lar_offset = data->nvm_version < 0xE39 ?
+                                NVM_LAR_OFFSET_FAMILY_8000_OLD :
+                                NVM_LAR_OFFSET_FAMILY_8000;
+
+               lar_config = le16_to_cpup(regulatory + lar_offset);
+               data->lar_enabled = !!(lar_config &
+                                      NVM_LAR_ENABLED_FAMILY_8000);
+
                /* MAC address in family 8000 */
                iwl_set_hw_address_family_8000(dev, cfg, data, mac_override,
-                                              nvm_hw);
+                                              nvm_hw, mac_addr0, mac_addr1);
 
                iwl_init_sbands(dev, cfg, data, regulatory,
-                               sku & NVM_SKU_CAP_11AC_ENABLE, tx_chains,
-                               rx_chains);
+                               tx_chains, rx_chains,
+                               lar_fw_supported && data->lar_enabled);
        }
 
        data->calib_version = 255;
@@ -643,3 +654,164 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
        return data;
 }
 IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
+
+static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan,
+                                      int ch_idx, u16 nvm_flags,
+                                      const struct iwl_cfg *cfg)
+{
+       u32 flags = NL80211_RRF_NO_HT40;
+       u32 last_5ghz_ht = LAST_5GHZ_HT;
+
+       if (cfg->device_family == IWL_DEVICE_FAMILY_8000)
+               last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
+
+       if (ch_idx < NUM_2GHZ_CHANNELS &&
+           (nvm_flags & NVM_CHANNEL_40MHZ)) {
+               if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS)
+                       flags &= ~NL80211_RRF_NO_HT40PLUS;
+               if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS)
+                       flags &= ~NL80211_RRF_NO_HT40MINUS;
+       } else if (nvm_chan[ch_idx] <= last_5ghz_ht &&
+                  (nvm_flags & NVM_CHANNEL_40MHZ)) {
+               if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
+                       flags &= ~NL80211_RRF_NO_HT40PLUS;
+               else
+                       flags &= ~NL80211_RRF_NO_HT40MINUS;
+       }
+
+       if (!(nvm_flags & NVM_CHANNEL_80MHZ))
+               flags |= NL80211_RRF_NO_80MHZ;
+       if (!(nvm_flags & NVM_CHANNEL_160MHZ))
+               flags |= NL80211_RRF_NO_160MHZ;
+
+       if (!(nvm_flags & NVM_CHANNEL_ACTIVE))
+               flags |= NL80211_RRF_NO_IR;
+
+       if (nvm_flags & NVM_CHANNEL_RADAR)
+               flags |= NL80211_RRF_DFS;
+
+       if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY)
+               flags |= NL80211_RRF_NO_OUTDOOR;
+
+       /* Set the GO concurrent flag only in case that NO_IR is set.
+        * Otherwise it is meaningless
+        */
+       if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) &&
+           (flags & NL80211_RRF_NO_IR))
+               flags |= NL80211_RRF_GO_CONCURRENT;
+
+       return flags;
+}
+
+struct ieee80211_regdomain *
+iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
+                      int num_of_ch, __le32 *channels, u16 fw_mcc)
+{
+       int ch_idx;
+       u16 ch_flags, prev_ch_flags = 0;
+       const u8 *nvm_chan = cfg->device_family == IWL_DEVICE_FAMILY_8000 ?
+                            iwl_nvm_channels_family_8000 : iwl_nvm_channels;
+       struct ieee80211_regdomain *regd;
+       int size_of_regd;
+       struct ieee80211_reg_rule *rule;
+       enum ieee80211_band band;
+       int center_freq, prev_center_freq = 0;
+       int valid_rules = 0;
+       bool new_rule;
+       int max_num_ch = cfg->device_family == IWL_DEVICE_FAMILY_8000 ?
+                        IWL_NUM_CHANNELS_FAMILY_8000 : IWL_NUM_CHANNELS;
+
+       if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES))
+               return ERR_PTR(-EINVAL);
+
+       if (WARN_ON(num_of_ch > max_num_ch))
+               num_of_ch = max_num_ch;
+
+       IWL_DEBUG_DEV(dev, IWL_DL_LAR, "building regdom for %d channels\n",
+                     num_of_ch);
+
+       /* build a regdomain rule for every valid channel */
+       size_of_regd =
+               sizeof(struct ieee80211_regdomain) +
+               num_of_ch * sizeof(struct ieee80211_reg_rule);
+
+       regd = kzalloc(size_of_regd, GFP_KERNEL);
+       if (!regd)
+               return ERR_PTR(-ENOMEM);
+
+       for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
+               ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
+               band = (ch_idx < NUM_2GHZ_CHANNELS) ?
+                      IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+               center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx],
+                                                            band);
+               new_rule = false;
+
+               if (!(ch_flags & NVM_CHANNEL_VALID)) {
+                       IWL_DEBUG_DEV(dev, IWL_DL_LAR,
+                                     "Ch. %d Flags %x [%sGHz] - No traffic\n",
+                                     nvm_chan[ch_idx],
+                                     ch_flags,
+                                     (ch_idx >= NUM_2GHZ_CHANNELS) ?
+                                     "5.2" : "2.4");
+                       continue;
+               }
+
+               /* we can't continue the same rule */
+               if (ch_idx == 0 || prev_ch_flags != ch_flags ||
+                   center_freq - prev_center_freq > 20) {
+                       valid_rules++;
+                       new_rule = true;
+               }
+
+               rule = &regd->reg_rules[valid_rules - 1];
+
+               if (new_rule)
+                       rule->freq_range.start_freq_khz =
+                                               MHZ_TO_KHZ(center_freq - 10);
+
+               rule->freq_range.end_freq_khz = MHZ_TO_KHZ(center_freq + 10);
+
+               /* this doesn't matter - not used by FW */
+               rule->power_rule.max_antenna_gain = DBI_TO_MBI(6);
+               rule->power_rule.max_eirp =
+                       DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER);
+
+               rule->flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
+                                                         ch_flags, cfg);
+
+               /* rely on auto-calculation to merge BW of contiguous chans */
+               rule->flags |= NL80211_RRF_AUTO_BW;
+               rule->freq_range.max_bandwidth_khz = 0;
+
+               prev_ch_flags = ch_flags;
+               prev_center_freq = center_freq;
+
+               IWL_DEBUG_DEV(dev, IWL_DL_LAR,
+                             "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x): Ad-Hoc %ssupported\n",
+                             center_freq,
+                             band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4",
+                             CHECK_AND_PRINT_I(VALID),
+                             CHECK_AND_PRINT_I(ACTIVE),
+                             CHECK_AND_PRINT_I(RADAR),
+                             CHECK_AND_PRINT_I(WIDE),
+                             CHECK_AND_PRINT_I(40MHZ),
+                             CHECK_AND_PRINT_I(80MHZ),
+                             CHECK_AND_PRINT_I(160MHZ),
+                             CHECK_AND_PRINT_I(INDOOR_ONLY),
+                             CHECK_AND_PRINT_I(GO_CONCURRENT),
+                             ch_flags,
+                             ((ch_flags & NVM_CHANNEL_ACTIVE) &&
+                              !(ch_flags & NVM_CHANNEL_RADAR))
+                                        ? "" : "not ");
+       }
+
+       regd->n_reg_rules = valid_rules;
+
+       /* set alpha2 from FW. */
+       regd->alpha2[0] = fw_mcc >> 8;
+       regd->alpha2[1] = fw_mcc & 0xff;
+
+       return regd;
+}
+IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info);
index c9c45a39d212c2516433bdce81c74373460383ab..822ba52e0e5af537e42262ee4106af53d0b37f04 100644 (file)
@@ -62,6 +62,7 @@
 #ifndef __iwl_nvm_parse_h__
 #define __iwl_nvm_parse_h__
 
+#include <net/cfg80211.h>
 #include "iwl-eeprom-parse.h"
 
 /**
@@ -76,6 +77,21 @@ struct iwl_nvm_data *
 iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
                   const __le16 *nvm_hw, const __le16 *nvm_sw,
                   const __le16 *nvm_calib, const __le16 *regulatory,
-                  const __le16 *mac_override, u8 tx_chains, u8 rx_chains);
+                  const __le16 *mac_override, const __le16 *phy_sku,
+                  u8 tx_chains, u8 rx_chains, bool lar_fw_supported,
+                  u32 mac_addr0, u32 mac_addr1);
+
+/**
+ * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
+ *
+ * This function parses the regulatory channel data received as a
+ * MCC_UPDATE_CMD command. It returns a newly allocation regulatory domain,
+ * to be fed into the regulatory core. An ERR_PTR is returned on error.
+ * If not given to the regulatory core, the user is responsible for freeing
+ * the regdomain returned here with kfree.
+ */
+struct ieee80211_regdomain *
+iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
+                      int num_of_ch, __le32 *channels, u16 fw_mcc);
 
 #endif /* __iwl_nvm_parse_h__ */
index 17de6d46222ae7c803605017c066453d73076f01..ce1cdd7604e8a156cf7356466b9232bdc176d9db 100644 (file)
@@ -94,7 +94,7 @@ struct iwl_cfg;
  * The operational mode has a very simple life cycle.
  *
  *     1) The driver layer (iwl-drv.c) chooses the op_mode based on the
- *        capabilities advertized by the fw file (in TLV format).
+ *        capabilities advertised by the fw file (in TLV format).
  *     2) The driver layer starts the op_mode (ops->start)
  *     3) The op_mode registers mac80211
  *     4) The op_mode is governed by mac80211
@@ -116,7 +116,7 @@ struct iwl_cfg;
  *     May sleep
  * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
  *     HCMD this Rx responds to. Can't sleep.
- * @napi_add: NAPI initialisation. The transport is fully responsible for NAPI,
+ * @napi_add: NAPI initialization. The transport is fully responsible for NAPI,
  *     but the higher layers need to know about it (in particular mac80211 to
  *     to able to call the right NAPI RX functions); this function is needed
  *     to eventually call netif_napi_add() with higher layer involvement.
index e893c6eb260cd4866b7c7d03c1437b9d479ad9de..a105455b6a2469c2afb07e1b04e4949d8e1184f9 100644 (file)
@@ -125,7 +125,7 @@ struct iwl_phy_db_chg_txp {
 } __packed;
 
 /*
- * phy db - Receieve phy db chunk after calibrations
+ * phy db - Receive phy db chunk after calibrations
  */
 struct iwl_calib_res_notif_phy_db {
        __le16 type;
index 6095088b88d91c0fb9c58b82e74cc7e969e589a3..88a57e6e232ff318dea917d0856da781662bc5eb 100644 (file)
 #define DEVICE_SET_NMI_REG 0x00a01c30
 #define DEVICE_SET_NMI_VAL_HW BIT(0)
 #define DEVICE_SET_NMI_VAL_DRV BIT(7)
-#define DEVICE_SET_NMI_8000B_REG 0x00a01c24
-#define DEVICE_SET_NMI_8000B_VAL 0x1000000
+#define DEVICE_SET_NMI_8000_REG 0x00a01c24
+#define DEVICE_SET_NMI_8000_VAL 0x1000000
 
 /* Shared registers (0x0..0x3ff, via target indirect or periphery */
 #define SHR_BASE       0x00a10000
 #define OSC_CLK                                (0xa04068)
 #define OSC_CLK_FORCE_CONTROL          (0x8)
 
-/* SECURE boot registers */
-#define LMPM_SECURE_BOOT_CONFIG_ADDR   (0x100)
-enum secure_boot_config_reg {
-       LMPM_SECURE_BOOT_CONFIG_INSPECTOR_BURNED_IN_OTP = 0x00000001,
-       LMPM_SECURE_BOOT_CONFIG_INSPECTOR_NOT_REQ       = 0x00000002,
-};
-
-#define LMPM_SECURE_BOOT_CPU1_STATUS_ADDR_B0   (0xA01E30)
-#define LMPM_SECURE_BOOT_CPU1_STATUS_ADDR      (0x1E30)
-#define LMPM_SECURE_BOOT_CPU2_STATUS_ADDR      (0x1E34)
-enum secure_boot_status_reg {
-       LMPM_SECURE_BOOT_CPU_STATUS_VERF_STATUS         = 0x00000001,
-       LMPM_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED      = 0x00000002,
-       LMPM_SECURE_BOOT_CPU_STATUS_VERF_SUCCESS        = 0x00000004,
-       LMPM_SECURE_BOOT_CPU_STATUS_VERF_FAIL           = 0x00000008,
-       LMPM_SECURE_BOOT_CPU_STATUS_SIGN_VERF_FAIL      = 0x00000010,
-       LMPM_SECURE_BOOT_STATUS_SUCCESS                 = 0x00000003,
-};
-
 #define FH_UCODE_LOAD_STATUS           (0x1AF0)
 #define CSR_UCODE_LOAD_STATUS_ADDR     (0x1E70)
 enum secure_load_status_reg {
@@ -334,8 +315,6 @@ enum secure_load_status_reg {
 #define LMPM_SECURE_CPU1_HDR_MEM_SPACE         (0x420000)
 #define LMPM_SECURE_CPU2_HDR_MEM_SPACE         (0x420400)
 
-#define LMPM_SECURE_TIME_OUT   (100) /* 10 micro */
-
 /* Rx FIFO */
 #define RXF_SIZE_ADDR                  (0xa00c88)
 #define RXF_RD_D_SPACE                 (0xa00c40)
@@ -371,6 +350,33 @@ enum secure_load_status_reg {
 
 #define DBGC_IN_SAMPLE                 (0xa03c00)
 
+/* enable the ID buf for read */
+#define WFPM_PS_CTL_CLR                        0xA0300C
+#define WFMP_MAC_ADDR_0                        0xA03080
+#define WFMP_MAC_ADDR_1                        0xA03084
+#define LMPM_PMG_EN                    0xA01CEC
+#define RADIO_REG_SYS_MANUAL_DFT_0     0xAD4078
+#define RFIC_REG_RD                    0xAD0470
+#define WFPM_CTRL_REG                  0xA03030
+enum {
+       ENABLE_WFPM = BIT(31),
+       WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK       = 0x80000000,
+};
+
+#define AUX_MISC_REG                   0xA200B0
+enum {
+       HW_STEP_LOCATION_BITS = 24,
+};
+
+#define AUX_MISC_MASTER1_EN            0xA20818
+enum aux_misc_master1_en {
+       AUX_MISC_MASTER1_EN_SBE_MSK     = 0x1,
+};
+
+#define AUX_MISC_MASTER1_SMPHR_STATUS  0xA20800
+#define RSA_ENABLE                     0xA24B08
+#define PREG_AUX_BUS_WPROT_0           0xA04CC0
+
 /* FW chicken bits */
 #define LMPM_CHICK                     0xA01FF8
 enum {
index 542a6810c81cba704b37a28235678a685ecf47ab..6dfed1259260f06d23feb544d78ce6484c01fb36 100644 (file)
 /**
  * DOC: Transport layer - what is it ?
  *
- * The tranport layer is the layer that deals with the HW directly. It provides
+ * The transport layer is the layer that deals with the HW directly. It provides
  * an abstraction of the underlying HW to the upper layer. The transport layer
  * doesn't provide any policy, algorithm or anything of this kind, but only
- * mechanisms to make the HW do something.It is not completely stateless but
+ * mechanisms to make the HW do something. It is not completely stateless but
  * close to it.
  * We will have an implementation for each different supported bus.
  */
 /**
  * DOC: Host command section
  *
- * A host command is a commaned issued by the upper layer to the fw. There are
+ * A host command is a command issued by the upper layer to the fw. There are
  * several versions of fw that have several APIs. The transport layer is
  * completely agnostic to these differences.
- * The transport does provide helper functionnality (i.e. SYNC / ASYNC mode),
+ * The transport does provide helper functionality (i.e. SYNC / ASYNC mode),
  */
 #define SEQ_TO_QUEUE(s)        (((s) >> 8) & 0x1f)
 #define QUEUE_TO_SEQ(q)        (((q) & 0x1f) << 8)
@@ -195,7 +195,7 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
  * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
  *     the response. The caller needs to call iwl_free_resp when done.
  * @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the
- *     command queue, but after other high priority commands. valid only
+ *     command queue, but after other high priority commands. Valid only
  *     with CMD_ASYNC.
  * @CMD_SEND_IN_IDLE: The command should be sent even when the trans is idle.
  * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle.
@@ -458,6 +458,8 @@ struct iwl_trans_txq_scd_cfg {
  * @txq_disable: de-configure a Tx queue to send AMPDUs
  *     Must be atomic
  * @wait_tx_queue_empty: wait until tx queues are empty. May sleep.
+ * @freeze_txq_timer: prevents the timer of the queue from firing until the
+ *     queue is set to awake. Must be atomic.
  * @dbgfs_register: add the dbgfs files under this directory. Files will be
  *     automatically deleted.
  * @write8: write a u8 to a register at offset ofs from the BAR
@@ -517,6 +519,8 @@ struct iwl_trans_ops {
 
        int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
        int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
+       void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
+                                bool freeze);
 
        void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
        void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
@@ -578,7 +582,7 @@ enum iwl_d0i3_mode {
  * @cfg - pointer to the configuration
  * @status: a bit-mask of transport status flags
  * @dev - pointer to struct device * that represents the device
- * @hw_id: a u32 with the ID of the device / subdevice.
+ * @hw_id: a u32 with the ID of the device / sub-device.
  *     Set during transport allocation.
  * @hw_id_str: a string with info about HW ID. Set during transport allocation.
  * @pm_support: set to true in start_hw if link pm is supported
@@ -873,6 +877,17 @@ void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
        iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
 }
 
+static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
+                                             unsigned long txqs,
+                                             bool freeze)
+{
+       if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
+               IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
+
+       if (trans->ops->freeze_txq_timer)
+               trans->ops->freeze_txq_timer(trans, txqs, freeze);
+}
+
 static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans,
                                                u32 txqs)
 {
index 877f19bbae7ea9c4d4caa29583600ff4111c45cd..13a0a03158deb0d1d884b390dfb2e7e3751b5ee2 100644 (file)
 #include "mvm.h"
 #include "iwl-debug.h"
 
-const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX] = {
-       [BT_KILL_MSK_DEFAULT] = 0xfffffc00,
-       [BT_KILL_MSK_NEVER] = 0xffffffff,
-       [BT_KILL_MSK_ALWAYS] = 0,
-};
-
-const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
-       {
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-       },
-       {
-               BT_KILL_MSK_NEVER,
-               BT_KILL_MSK_NEVER,
-               BT_KILL_MSK_NEVER,
-       },
-       {
-               BT_KILL_MSK_NEVER,
-               BT_KILL_MSK_NEVER,
-               BT_KILL_MSK_NEVER,
-       },
-       {
-               BT_KILL_MSK_DEFAULT,
-               BT_KILL_MSK_NEVER,
-               BT_KILL_MSK_DEFAULT,
-       },
-};
-
-const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
-       {
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-       },
-       {
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-       },
-       {
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-       },
-       {
-               BT_KILL_MSK_DEFAULT,
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_DEFAULT,
-       },
-};
-
-static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = {
-       cpu_to_le32(0xf0f0f0f0), /* 50% */
-       cpu_to_le32(0xc0c0c0c0), /* 25% */
-       cpu_to_le32(0xfcfcfcfc), /* 75% */
-       cpu_to_le32(0xfefefefe), /* 87.5% */
-};
-
-static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
-       {
-               cpu_to_le32(0x40000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x44000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x40000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x44000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-       },
-       {
-               cpu_to_le32(0x40000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x44000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x40000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x44000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-       },
-       {
-               cpu_to_le32(0x40000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x44000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x40000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x44000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-       },
-};
-
-static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
-       {
-               /* Tight */
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaeaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xcc00ff28),
-               cpu_to_le32(0x0000aaaa),
-               cpu_to_le32(0xcc00aaaa),
-               cpu_to_le32(0x0000aaaa),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0x00004000),
-               cpu_to_le32(0xf0005000),
-               cpu_to_le32(0xf0005000),
-       },
-       {
-               /* Loose */
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xcc00ff28),
-               cpu_to_le32(0x0000aaaa),
-               cpu_to_le32(0xcc00aaaa),
-               cpu_to_le32(0x0000aaaa),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0xf0005000),
-               cpu_to_le32(0xf0005000),
-       },
-       {
-               /* Tx Tx disabled */
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xeeaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xcc00ff28),
-               cpu_to_le32(0x0000aaaa),
-               cpu_to_le32(0xcc00aaaa),
-               cpu_to_le32(0x0000aaaa),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-               cpu_to_le32(0xf0005000),
-       },
-};
-
 /* 20MHz / 40MHz below / 40Mhz above*/
 static const __le64 iwl_ci_mask[][3] = {
        /* dummy entry for channel 0 */
@@ -596,14 +444,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
                goto send_cmd;
        }
 
-       bt_cmd->max_kill = cpu_to_le32(5);
-       bt_cmd->bt4_antenna_isolation_thr =
-               cpu_to_le32(IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS);
-       bt_cmd->bt4_tx_tx_delta_freq_thr = cpu_to_le32(15);
-       bt_cmd->bt4_tx_rx_max_freq0 = cpu_to_le32(15);
-       bt_cmd->override_primary_lut = cpu_to_le32(BT_COEX_INVALID_LUT);
-       bt_cmd->override_secondary_lut = cpu_to_le32(BT_COEX_INVALID_LUT);
-
        mode = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE;
        bt_cmd->mode = cpu_to_le32(mode);
 
@@ -622,18 +462,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
 
        bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET);
 
-       if (mvm->cfg->bt_shared_single_ant)
-               memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
-                      sizeof(iwl_single_shared_ant));
-       else
-               memcpy(&bt_cmd->decision_lut, iwl_combined_lookup,
-                      sizeof(iwl_combined_lookup));
-
-       memcpy(&bt_cmd->mplut_prio_boost, iwl_bt_prio_boost,
-              sizeof(iwl_bt_prio_boost));
-       bt_cmd->multiprio_lut[0] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG0);
-       bt_cmd->multiprio_lut[1] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG1);
-
 send_cmd:
        memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
        memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
@@ -644,48 +472,6 @@ send_cmd:
        return ret;
 }
 
-static int iwl_mvm_bt_udpate_sw_boost(struct iwl_mvm *mvm)
-{
-       struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
-       u32 primary_lut = le32_to_cpu(notif->primary_ch_lut);
-       u32 secondary_lut = le32_to_cpu(notif->secondary_ch_lut);
-       u32 ag = le32_to_cpu(notif->bt_activity_grading);
-       struct iwl_bt_coex_sw_boost_update_cmd cmd = {};
-       u8 ack_kill_msk[NUM_PHY_CTX] = {};
-       u8 cts_kill_msk[NUM_PHY_CTX] = {};
-       int i;
-
-       lockdep_assert_held(&mvm->mutex);
-
-       ack_kill_msk[0] = iwl_bt_ack_kill_msk[ag][primary_lut];
-       cts_kill_msk[0] = iwl_bt_cts_kill_msk[ag][primary_lut];
-
-       ack_kill_msk[1] = iwl_bt_ack_kill_msk[ag][secondary_lut];
-       cts_kill_msk[1] = iwl_bt_cts_kill_msk[ag][secondary_lut];
-
-       /* Don't send HCMD if there is no update */
-       if (!memcmp(ack_kill_msk, mvm->bt_ack_kill_msk, sizeof(ack_kill_msk)) ||
-           !memcmp(cts_kill_msk, mvm->bt_cts_kill_msk, sizeof(cts_kill_msk)))
-               return 0;
-
-       memcpy(mvm->bt_ack_kill_msk, ack_kill_msk,
-              sizeof(mvm->bt_ack_kill_msk));
-       memcpy(mvm->bt_cts_kill_msk, cts_kill_msk,
-              sizeof(mvm->bt_cts_kill_msk));
-
-       BUILD_BUG_ON(ARRAY_SIZE(ack_kill_msk) < ARRAY_SIZE(cmd.boost_values));
-
-       for (i = 0; i < ARRAY_SIZE(cmd.boost_values); i++) {
-               cmd.boost_values[i].kill_ack_msk =
-                       cpu_to_le32(iwl_bt_ctl_kill_msk[ack_kill_msk[i]]);
-               cmd.boost_values[i].kill_cts_msk =
-                       cpu_to_le32(iwl_bt_ctl_kill_msk[cts_kill_msk[i]]);
-       }
-
-       return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_SW_BOOST, 0,
-                                   sizeof(cmd), &cmd);
-}
-
 static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
                                       bool enable)
 {
@@ -951,9 +737,6 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
                        IWL_ERR(mvm, "Failed to send BT_CI cmd\n");
                memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd));
        }
-
-       if (iwl_mvm_bt_udpate_sw_boost(mvm))
-               IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
 }
 
 int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
@@ -1074,9 +857,6 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        ieee80211_iterate_active_interfaces_atomic(
                mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
                iwl_mvm_bt_rssi_iterator, &data);
-
-       if (iwl_mvm_bt_udpate_sw_boost(mvm))
-               IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
 }
 
 #define LINK_QUAL_AGG_TIME_LIMIT_DEF   (4000)
index 5535ec9766cb6a5321bff82b587a4c61fe87f557..d954591e0be58528d138f8738b2cb2325db1fed3 100644 (file)
@@ -288,6 +288,65 @@ static const __le64 iwl_ci_mask[][3] = {
        },
 };
 
+enum iwl_bt_kill_msk {
+       BT_KILL_MSK_DEFAULT,
+       BT_KILL_MSK_NEVER,
+       BT_KILL_MSK_ALWAYS,
+       BT_KILL_MSK_MAX,
+};
+
+static const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX] = {
+       [BT_KILL_MSK_DEFAULT] = 0xfffffc00,
+       [BT_KILL_MSK_NEVER] = 0xffffffff,
+       [BT_KILL_MSK_ALWAYS] = 0,
+};
+
+static const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
+       {
+               BT_KILL_MSK_ALWAYS,
+               BT_KILL_MSK_ALWAYS,
+               BT_KILL_MSK_ALWAYS,
+       },
+       {
+               BT_KILL_MSK_NEVER,
+               BT_KILL_MSK_NEVER,
+               BT_KILL_MSK_NEVER,
+       },
+       {
+               BT_KILL_MSK_NEVER,
+               BT_KILL_MSK_NEVER,
+               BT_KILL_MSK_NEVER,
+       },
+       {
+               BT_KILL_MSK_DEFAULT,
+               BT_KILL_MSK_NEVER,
+               BT_KILL_MSK_DEFAULT,
+       },
+};
+
+static const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
+       {
+               BT_KILL_MSK_ALWAYS,
+               BT_KILL_MSK_ALWAYS,
+               BT_KILL_MSK_ALWAYS,
+       },
+       {
+               BT_KILL_MSK_ALWAYS,
+               BT_KILL_MSK_ALWAYS,
+               BT_KILL_MSK_ALWAYS,
+       },
+       {
+               BT_KILL_MSK_ALWAYS,
+               BT_KILL_MSK_ALWAYS,
+               BT_KILL_MSK_ALWAYS,
+       },
+       {
+               BT_KILL_MSK_DEFAULT,
+               BT_KILL_MSK_ALWAYS,
+               BT_KILL_MSK_DEFAULT,
+       },
+};
+
 struct corunning_block_luts {
        u8 range;
        __le32 lut20[BT_COEX_CORUN_LUT_SIZE];
@@ -633,7 +692,7 @@ int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm)
        if (IWL_MVM_BT_COEX_TTC)
                bt_cmd->flags |= cpu_to_le32(BT_COEX_TTC);
 
-       if (IWL_MVM_BT_COEX_RRC)
+       if (iwl_mvm_bt_is_rrc_supported(mvm))
                bt_cmd->flags |= cpu_to_le32(BT_COEX_RRC);
 
        if (mvm->cfg->bt_shared_single_ant)
index 9bdfa95d6ce7325f54b2aefde320879297783a19..a6c48c7b1e1683fdbdcb99fd0e4f971cf6ad66d2 100644 (file)
@@ -694,6 +694,9 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        if (ret)
                IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
 
+       if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm))
+               IWL_ERR(mvm, "Failed to initialize D3 LAR information\n");
+
        return 0;
 }
 
@@ -1128,6 +1131,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
        iwl_trans_d3_suspend(mvm->trans, test);
  out:
        if (ret < 0) {
+               iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
                ieee80211_restart_hw(mvm->hw);
                iwl_mvm_free_nd(mvm);
        }
@@ -1596,7 +1600,7 @@ iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
        /* RF-kill already asserted again... */
        if (!cmd.resp_pkt) {
-               ret = -ERFKILL;
+               fw_status = ERR_PTR(-ERFKILL);
                goto out_free_resp;
        }
 
@@ -1605,7 +1609,7 @@ iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
        len = iwl_rx_packet_payload_len(cmd.resp_pkt);
        if (len < status_size) {
                IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
-               ret = -EIO;
+               fw_status = ERR_PTR(-EIO);
                goto out_free_resp;
        }
 
@@ -1613,7 +1617,7 @@ iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
        if (len != (status_size +
                    ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4))) {
                IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
-               ret = -EIO;
+               fw_status = ERR_PTR(-EIO);
                goto out_free_resp;
        }
 
@@ -1621,7 +1625,7 @@ iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
 out_free_resp:
        iwl_free_resp(&cmd);
-       return ret ? ERR_PTR(ret) : fw_status;
+       return fw_status;
 }
 
 /* releases the MVM mutex */
@@ -1722,6 +1726,10 @@ iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
        results->matched_profiles = le32_to_cpu(query->matched_profiles);
        memcpy(results->matches, query->matches, sizeof(results->matches));
 
+#ifdef CPTCFG_IWLWIFI_DEBUGFS
+       mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done);
+#endif
+
 out_free_resp:
        iwl_free_resp(&cmd);
        return ret;
@@ -1874,6 +1882,12 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
        /* query SRAM first in case we want event logging */
        iwl_mvm_read_d3_sram(mvm);
 
+       /*
+        * Query the current location and source from the D3 firmware so we
+        * can play it back when we re-intiailize the D0 firmware
+        */
+       iwl_mvm_update_changed_regdom(mvm);
+
        if (mvm->net_detect) {
                iwl_mvm_query_netdetect_reasons(mvm, vif);
                /* has unlocked the mutex, so skip that */
@@ -1883,9 +1897,9 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
 #ifdef CONFIG_IWLWIFI_DEBUGFS
                if (keep)
                        mvm->keep_vif = vif;
+#endif
                /* has unlocked the mutex, so skip that */
                goto out_iterate;
-#endif
        }
 
  out_unlock:
@@ -2007,6 +2021,7 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
        __iwl_mvm_resume(mvm, true);
        rtnl_unlock();
        iwl_abort_notification_waits(&mvm->notif_wait);
+       iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
        ieee80211_restart_hw(mvm->hw);
 
        /* wait for restart and disconnect all interfaces */
index 8cbe77dc1dbb991b6cad15ed10ae828590682a20..9ac04c1ea7063d985980a5237fe705c1bba7186c 100644 (file)
@@ -562,11 +562,12 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
                               "\tSecondary Channel Bitmap 0x%016llx\n",
                               le64_to_cpu(cmd->bt_secondary_ci));
 
-               pos += scnprintf(buf+pos, bufsz-pos, "BT Configuration CMD\n");
-               pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill Mask 0x%08x\n",
-                                iwl_bt_ctl_kill_msk[mvm->bt_ack_kill_msk[0]]);
-               pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill Mask 0x%08x\n",
-                                iwl_bt_ctl_kill_msk[mvm->bt_cts_kill_msk[0]]);
+               pos += scnprintf(buf+pos, bufsz-pos,
+                                "BT Configuration CMD - 0=default, 1=never, 2=always\n");
+               pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill msk idx %d\n",
+                                mvm->bt_ack_kill_msk[0]);
+               pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill msk idx %d\n",
+                                mvm->bt_cts_kill_msk[0]);
 
        } else {
                struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
@@ -579,21 +580,6 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
                pos += scnprintf(buf+pos, bufsz-pos,
                               "\tSecondary Channel Bitmap 0x%016llx\n",
                               le64_to_cpu(cmd->bt_secondary_ci));
-
-               pos += scnprintf(buf+pos, bufsz-pos, "BT Configuration CMD\n");
-               pos += scnprintf(buf+pos, bufsz-pos,
-                                "\tPrimary: ACK Kill Mask 0x%08x\n",
-                                iwl_bt_ctl_kill_msk[mvm->bt_ack_kill_msk[0]]);
-               pos += scnprintf(buf+pos, bufsz-pos,
-                                "\tPrimary: CTS Kill Mask 0x%08x\n",
-                                iwl_bt_ctl_kill_msk[mvm->bt_cts_kill_msk[0]]);
-               pos += scnprintf(buf+pos, bufsz-pos,
-                                "\tSecondary: ACK Kill Mask 0x%08x\n",
-                                iwl_bt_ctl_kill_msk[mvm->bt_ack_kill_msk[1]]);
-               pos += scnprintf(buf+pos, bufsz-pos,
-                                "\tSecondary: CTS Kill Mask 0x%08x\n",
-                                iwl_bt_ctl_kill_msk[mvm->bt_cts_kill_msk[1]]);
-
        }
 
        mutex_unlock(&mvm->mutex);
@@ -1487,26 +1473,6 @@ out:
        return count;
 }
 
-static ssize_t iwl_dbgfs_enable_scan_iteration_notif_write(struct iwl_mvm *mvm,
-                                                          char *buf,
-                                                          size_t count,
-                                                          loff_t *ppos)
-{
-       int val;
-
-       mutex_lock(&mvm->mutex);
-
-       if (kstrtoint(buf, 10, &val)) {
-               mutex_unlock(&mvm->mutex);
-               return -EINVAL;
-       }
-
-       mvm->scan_iter_notif_enabled = val;
-       mutex_unlock(&mvm->mutex);
-
-       return count;
-}
-
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64);
 
 /* Device wide debugfs entries */
@@ -1529,7 +1495,6 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8);
 MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 8);
-MVM_DEBUGFS_WRITE_FILE_OPS(enable_scan_iteration_notif, 8);
 
 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
@@ -1573,8 +1538,11 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
        MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
        MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
        MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, S_IWUSR);
-       MVM_DEBUGFS_ADD_FILE(enable_scan_iteration_notif, mvm->debugfs_dir,
-                            S_IWUSR);
+       if (!debugfs_create_bool("enable_scan_iteration_notif",
+                                S_IRUSR | S_IWUSR,
+                                mvm->debugfs_dir,
+                                &mvm->scan_iter_notif_enabled))
+               goto err;
 
 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
        if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
@@ -1601,6 +1569,9 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
        if (!debugfs_create_bool("d3_wake_sysassert", S_IRUSR | S_IWUSR,
                                 mvm->debugfs_dir, &mvm->d3_wake_sysassert))
                goto err;
+       if (!debugfs_create_u32("last_netdetect_scans", S_IRUSR,
+                               mvm->debugfs_dir, &mvm->last_netdetect_scans))
+               goto err;
        MVM_DEBUGFS_ADD_FILE(netdetect, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
 #endif
 
index f3b11897991eeee2e1d88329d075e52a25a106e3..d398a6102805e9c77d1ee921e80c4572b45d2111 100644 (file)
@@ -235,36 +235,12 @@ enum iwl_bt_coex_enabled_modules {
  * struct iwl_bt_coex_cmd - bt coex configuration command
  * @mode: enum %iwl_bt_coex_mode
  * @enabled_modules: enum %iwl_bt_coex_enabled_modules
- * @max_kill: max count of Tx retries due to kill from PTA
- * @override_primary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
- *     should be set by default
- * @override_secondary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
- *     should be set by default
- * @bt4_antenna_isolation_thr: antenna threshold value
- * @bt4_tx_tx_delta_freq_thr: TxTx delta frequency
- * @bt4_tx_rx_max_freq0: TxRx max frequency
- * @multiprio_lut: multi priority LUT configuration
- * @mplut_prio_boost: BT priority boost registers
- * @decision_lut: PTA decision LUT, per Prio-Ch
  *
  * The structure is used for the BT_COEX command.
  */
 struct iwl_bt_coex_cmd {
        __le32 mode;
        __le32 enabled_modules;
-
-       __le32 max_kill;
-       __le32 override_primary_lut;
-       __le32 override_secondary_lut;
-       __le32 bt4_antenna_isolation_thr;
-
-       __le32 bt4_tx_tx_delta_freq_thr;
-       __le32 bt4_tx_rx_max_freq0;
-
-       __le32 multiprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE];
-       __le32 mplut_prio_boost[BT_COEX_BOOST_SIZE];
-
-       __le32 decision_lut[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE];
 } __packed; /* BT_COEX_CMD_API_S_VER_6 */
 
 /**
@@ -279,29 +255,6 @@ struct iwl_bt_coex_corun_lut_update_cmd {
        __le32 corun_lut40[BT_COEX_CORUN_LUT_SIZE];
 } __packed; /* BT_COEX_UPDATE_CORUN_LUT_API_S_VER_1 */
 
-/**
- * struct iwl_bt_coex_sw_boost - SW boost values
- * @wifi_tx_prio_boost: SW boost of wifi tx priority
- * @wifi_rx_prio_boost: SW boost of wifi rx priority
- * @kill_ack_msk: kill ACK mask. 1 - Tx ACK, 0 - kill Tx of ACK.
- * @kill_cts_msk: kill CTS mask. 1 - Tx CTS, 0 - kill Tx of CTS.
- */
-struct iwl_bt_coex_sw_boost {
-       __le32 wifi_tx_prio_boost;
-       __le32 wifi_rx_prio_boost;
-       __le32 kill_ack_msk;
-       __le32 kill_cts_msk;
-};
-
-/**
- * struct iwl_bt_coex_sw_boost_update_cmd - command to update the SW boost
- * @boost_values: check struct  %iwl_bt_coex_sw_boost - one for each channel
- *     primary / secondary / low priority
- */
-struct iwl_bt_coex_sw_boost_update_cmd {
-       struct iwl_bt_coex_sw_boost boost_values[3];
-} __packed; /* BT_COEX_UPDATE_SW_BOOST_S_VER_1 */
-
 /**
  * struct iwl_bt_coex_reduced_txp_update_cmd
  * @reduced_txp: bit BT_REDUCED_TX_POWER_BIT to enable / disable, rest of the
index 6d3bea5c59d1eb8ab42bca2f0c1a4523a23b8291..d7658d16e965c835144d8804d4e7185d605e8bfd 100644 (file)
@@ -132,7 +132,7 @@ struct iwl_proto_offload_cmd_common {
  * @solicited_node_ipv6_addr: broken -- solicited node address exists
  *     for each target address
  * @target_ipv6_addr: our target addresses
- * @ndp_mac_addr: neighbor soliciation response MAC address
+ * @ndp_mac_addr: neighbor solicitation response MAC address
  */
 struct iwl_proto_offload_cmd_v1 {
        struct iwl_proto_offload_cmd_common common;
@@ -150,7 +150,7 @@ struct iwl_proto_offload_cmd_v1 {
  * @solicited_node_ipv6_addr: broken -- solicited node address exists
  *     for each target address
  * @target_ipv6_addr: our target addresses
- * @ndp_mac_addr: neighbor soliciation response MAC address
+ * @ndp_mac_addr: neighbor solicitation response MAC address
  */
 struct iwl_proto_offload_cmd_v2 {
        struct iwl_proto_offload_cmd_common common;
index aabaedd3b3ee1c6e3deeadc1ec7a9a9fe6059b83..f3f3ee0a766bd21645e26fe4c671caa8e15e71c0 100644 (file)
@@ -255,7 +255,7 @@ struct iwl_mac_data_p2p_dev {
 /**
  * enum iwl_mac_filter_flags - MAC context filter flags
  * @MAC_FILTER_IN_PROMISC: accept all data frames
- * @MAC_FILTER_IN_CONTROL_AND_MGMT: pass all mangement and
+ * @MAC_FILTER_IN_CONTROL_AND_MGMT: pass all management and
  *     control frames to the host
  * @MAC_FILTER_ACCEPT_GRP: accept multicast frames
  * @MAC_FILTER_DIS_DECRYPT: don't decrypt unicast frames
index a5fbbd637070795b97fafde922aabb50d7bcaf4c..4f81dcf57a736e7409087f4b3193809c3570db6f 100644 (file)
@@ -103,7 +103,7 @@ struct iwl_ssid_ie {
  * @SCAN_COMP_STATUS_ERR_COEX: medium was lost ot WiMax
  * @SCAN_COMP_STATUS_P2P_ACTION_OK: P2P public action frame TX was successful
  *     (not an error!)
- * @SCAN_COMP_STATUS_ITERATION_END: indicates end of one repeatition the driver
+ * @SCAN_COMP_STATUS_ITERATION_END: indicates end of one repetition the driver
  *     asked for
  * @SCAN_COMP_STATUS_ERR_ALLOC_TE: scan could not allocate time events
 */
@@ -187,11 +187,11 @@ enum scan_framework_client {
  * struct iwl_scan_offload_cmd - SCAN_REQUEST_FIXED_PART_API_S_VER_6
  * @scan_flags:                see enum iwl_scan_flags
  * @channel_count:     channels in channel list
- * @quiet_time:                dwell time, in milisiconds, on quiet channel
+ * @quiet_time:                dwell time, in milliseconds, on quiet channel
  * @quiet_plcp_th:     quiet channel num of packets threshold
  * @good_CRC_th:       passive to active promotion threshold
  * @rx_chain:          RXON rx chain.
- * @max_out_time:      max TUs to be out of assoceated channel
+ * @max_out_time:      max TUs to be out of associated channel
  * @suspend_time:      pause scan this TUs when returning to service channel
  * @flags:             RXON flags
  * @filter_flags:      RXONfilter
@@ -232,7 +232,7 @@ enum iwl_scan_offload_channel_flags {
  *     see enum iwl_scan_offload_channel_flags.
  * __le16 channel_number: channel number 1-13 etc.
  * __le16 iter_count: repetition count for the channel.
- * __le32 iter_interval: interval between two innteration on one channel.
+ * __le32 iter_interval: interval between two iterations on one channel.
  * u8 active_dwell.
  * u8 passive_dwell.
  */
@@ -275,8 +275,8 @@ enum iwl_scan_offload_band_selection {
 /**
  * iwl_scan_offload_profile - SCAN_OFFLOAD_PROFILE_S
  * @ssid_index:                index to ssid list in fixed part
- * @unicast_cipher:    encryption olgorithm to match - bitmap
- * @aut_alg:           authentication olgorithm to match - bitmap
+ * @unicast_cipher:    encryption algorithm to match - bitmap
+ * @aut_alg:           authentication algorithm to match - bitmap
  * @network_type:      enum iwl_scan_offload_network_type
  * @band_selection:    enum iwl_scan_offload_band_selection
  * @client_bitmap:     clients waiting for match - enum scan_framework_client
@@ -748,7 +748,7 @@ enum iwl_umac_scan_general_flags {
  * @flags:             bitmap - 0-19:  directed scan to i'th ssid.
  * @channel_num:       channel number 1-13 etc.
  * @iter_count:                repetition count for the channel.
- * @iter_interval:     interval between two scan interations on one channel.
+ * @iter_interval:     interval between two scan iterations on one channel.
  */
 struct iwl_scan_channel_cfg_umac {
        __le32 flags;
index d95b472137318863e0cac5109d383b83182d5cbe..aab68cbae754d547a9e1fe514c4c88de6877777f 100644 (file)
@@ -212,6 +212,10 @@ enum {
        REPLY_RX_MPDU_CMD = 0xc1,
        BA_NOTIF = 0xc5,
 
+       /* Location Aware Regulatory */
+       MCC_UPDATE_CMD = 0xc8,
+       MCC_CHUB_UPDATE_CMD = 0xc9,
+
        MARKER_CMD = 0xcb,
 
        /* BT Coex */
@@ -362,7 +366,8 @@ enum {
        NVM_SECTION_TYPE_CALIBRATION = 4,
        NVM_SECTION_TYPE_PRODUCTION = 5,
        NVM_SECTION_TYPE_MAC_OVERRIDE = 11,
-       NVM_MAX_NUM_SECTIONS = 12,
+       NVM_SECTION_TYPE_PHY_SKU = 12,
+       NVM_MAX_NUM_SECTIONS = 13,
 };
 
 /**
@@ -1442,7 +1447,19 @@ enum iwl_sf_scenario {
 #define SF_W_MARK_LEGACY 4096
 #define SF_W_MARK_SCAN 4096
 
-/* SF Scenarios timers for FULL_ON state (aligned to 32 uSec) */
+/* SF Scenarios timers for default configuration (aligned to 32 uSec) */
+#define SF_SINGLE_UNICAST_IDLE_TIMER_DEF 160   /* 150 uSec  */
+#define SF_SINGLE_UNICAST_AGING_TIMER_DEF 400  /* 0.4 mSec */
+#define SF_AGG_UNICAST_IDLE_TIMER_DEF 160              /* 150 uSec */
+#define SF_AGG_UNICAST_AGING_TIMER_DEF 400             /* 0.4 mSec */
+#define SF_MCAST_IDLE_TIMER_DEF 160            /* 150 mSec */
+#define SF_MCAST_AGING_TIMER_DEF 400           /* 0.4 mSec */
+#define SF_BA_IDLE_TIMER_DEF 160                       /* 150 uSec */
+#define SF_BA_AGING_TIMER_DEF 400                      /* 0.4 mSec */
+#define SF_TX_RE_IDLE_TIMER_DEF 160                    /* 150 uSec */
+#define SF_TX_RE_AGING_TIMER_DEF 400           /* 0.4 mSec */
+
+/* SF Scenarios timers for BSS MAC configuration (aligned to 32 uSec) */
 #define SF_SINGLE_UNICAST_IDLE_TIMER 320       /* 300 uSec  */
 #define SF_SINGLE_UNICAST_AGING_TIMER 2016     /* 2 mSec */
 #define SF_AGG_UNICAST_IDLE_TIMER 320          /* 300 uSec */
@@ -1473,6 +1490,92 @@ struct iwl_sf_cfg_cmd {
        __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
 } __packed; /* SF_CFG_API_S_VER_2 */
 
+/***********************************
+ * Location Aware Regulatory (LAR) API - MCC updates
+ ***********************************/
+
+/**
+ * struct iwl_mcc_update_cmd - Request the device to update geographic
+ * regulatory profile according to the given MCC (Mobile Country Code).
+ * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
+ * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
+ * MCC in the cmd response will be the relevant MCC in the NVM.
+ * @mcc: given mobile country code
+ * @source_id: the source from where we got the MCC, see iwl_mcc_source
+ * @reserved: reserved for alignment
+ */
+struct iwl_mcc_update_cmd {
+       __le16 mcc;
+       u8 source_id;
+       u8 reserved;
+} __packed; /* LAR_UPDATE_MCC_CMD_API_S */
+
+/**
+ * iwl_mcc_update_resp - response to MCC_UPDATE_CMD.
+ * Contains the new channel control profile map, if changed, and the new MCC
+ * (mobile country code).
+ * The new MCC may be different than what was requested in MCC_UPDATE_CMD.
+ * @status: see &enum iwl_mcc_update_status
+ * @mcc: the new applied MCC
+ * @cap: capabilities for all channels which matches the MCC
+ * @source_id: the MCC source, see iwl_mcc_source
+ * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51
+ *             channels, depending on platform)
+ * @channels: channel control data map, DWORD for each channel. Only the first
+ *     16bits are used.
+ */
+struct iwl_mcc_update_resp {
+       __le32 status;
+       __le16 mcc;
+       u8 cap;
+       u8 source_id;
+       __le32 n_channels;
+       __le32 channels[0];
+} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S */
+
+/**
+ * struct iwl_mcc_chub_notif - chub notifies of mcc change
+ * (MCC_CHUB_UPDATE_CMD = 0xc9)
+ * The Chub (Communication Hub, CommsHUB) is a HW component that connects to
+ * the cellular and connectivity cores that gets updates of the mcc, and
+ * notifies the ucode directly of any mcc change.
+ * The ucode requests the driver to request the device to update geographic
+ * regulatory  profile according to the given MCC (Mobile Country Code).
+ * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
+ * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
+ * MCC in the cmd response will be the relevant MCC in the NVM.
+ * @mcc: given mobile country code
+ * @source_id: identity of the change originator, see iwl_mcc_source
+ * @reserved1: reserved for alignment
+ */
+struct iwl_mcc_chub_notif {
+       u16 mcc;
+       u8 source_id;
+       u8 reserved1;
+} __packed; /* LAR_MCC_NOTIFY_S */
+
+enum iwl_mcc_update_status {
+       MCC_RESP_NEW_CHAN_PROFILE,
+       MCC_RESP_SAME_CHAN_PROFILE,
+       MCC_RESP_INVALID,
+       MCC_RESP_NVM_DISABLED,
+       MCC_RESP_ILLEGAL,
+       MCC_RESP_LOW_PRIORITY,
+};
+
+enum iwl_mcc_source {
+       MCC_SOURCE_OLD_FW = 0,
+       MCC_SOURCE_ME = 1,
+       MCC_SOURCE_BIOS = 2,
+       MCC_SOURCE_3G_LTE_HOST = 3,
+       MCC_SOURCE_3G_LTE_DEVICE = 4,
+       MCC_SOURCE_WIFI = 5,
+       MCC_SOURCE_RESERVED = 6,
+       MCC_SOURCE_DEFAULT = 7,
+       MCC_SOURCE_UNINITIALIZED = 8,
+       MCC_SOURCE_GET_CURRENT = 0x10
+};
+
 /* DTS measurements */
 
 enum iwl_dts_measurement_flags {
index a81da4cde643a5e38659a3b7098a01bac8a49a29..bc5eac4960e18a79a211da2a2bf6492b1a39e570 100644 (file)
@@ -526,16 +526,33 @@ int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
 
 int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
                                struct iwl_fw_dbg_trigger_tlv *trigger,
-                               const char *str, size_t len)
+                               const char *fmt, ...)
 {
        unsigned int delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
        u16 occurrences = le16_to_cpu(trigger->occurrences);
-       int ret;
+       int ret, len = 0;
+       char buf[64];
 
        if (!occurrences)
                return 0;
 
-       ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), str,
+       if (fmt) {
+               va_list ap;
+
+               buf[sizeof(buf) - 1] = '\0';
+
+               va_start(ap, fmt);
+               vsnprintf(buf, sizeof(buf), fmt, ap);
+               va_end(ap);
+
+               /* check for truncation */
+               if (WARN_ON_ONCE(buf[sizeof(buf) - 1]))
+                       buf[sizeof(buf) - 1] = '\0';
+
+               len = strlen(buf) + 1;
+       }
+
+       ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf,
                                     len, delay);
        if (ret)
                return ret;
@@ -739,6 +756,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
        if (ret)
                goto error;
 
+       /*
+        * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
+        * anyway, so don't init MCC.
+        */
+       if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) {
+               ret = iwl_mvm_init_mcc(mvm);
+               if (ret)
+                       goto error;
+       }
+
        if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
                ret = iwl_mvm_config_scan(mvm);
                if (ret)
index 581b3b8f29f9b6d7460b98eeb9ee54e3b9612c35..8088c7137f7c9462417ffdfc07a30dcc37be6ec2 100644 (file)
@@ -470,9 +470,8 @@ exit_fail:
 
 int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
-       unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
-                                       mvm->cfg->base_params->wd_timeout :
-                                       IWL_WATCHDOG_DISABLED;
+       unsigned int wdg_timeout =
+               iwl_mvm_get_wd_timeout(mvm, vif, false, false);
        u32 ac;
        int ret;
 
@@ -1413,7 +1412,7 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
 
        if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx ||
            rx_missed_bcon >= stop_trig_missed_bcon)
-               iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL, 0);
+               iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL);
 }
 
 int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
index 7396b52262b584dc64ac9094f717266a3d1d9d97..84555170b6f751bb4f0925bf5c85319de76293b6 100644 (file)
@@ -86,6 +86,7 @@
 #include "iwl-fw-error-dump.h"
 #include "iwl-prph.h"
 #include "iwl-csr.h"
+#include "iwl-nvm-parse.h"
 
 static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
        {
@@ -301,6 +302,116 @@ static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
        }
 }
 
+struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
+                                                 const char *alpha2,
+                                                 enum iwl_mcc_source src_id,
+                                                 bool *changed)
+{
+       struct ieee80211_regdomain *regd = NULL;
+       struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct iwl_mcc_update_resp *resp;
+
+       IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2);
+
+       lockdep_assert_held(&mvm->mutex);
+
+       resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
+       if (IS_ERR_OR_NULL(resp)) {
+               IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
+                             PTR_RET(resp));
+               goto out;
+       }
+
+       if (changed)
+               *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE);
+
+       regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
+                                     __le32_to_cpu(resp->n_channels),
+                                     resp->channels,
+                                     __le16_to_cpu(resp->mcc));
+       /* Store the return source id */
+       src_id = resp->source_id;
+       kfree(resp);
+       if (IS_ERR_OR_NULL(regd)) {
+               IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
+                             PTR_RET(regd));
+               goto out;
+       }
+
+       IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n",
+                     regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id);
+       mvm->lar_regdom_set = true;
+       mvm->mcc_src = src_id;
+
+out:
+       return regd;
+}
+
+void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm)
+{
+       bool changed;
+       struct ieee80211_regdomain *regd;
+
+       if (!iwl_mvm_is_lar_supported(mvm))
+               return;
+
+       regd = iwl_mvm_get_current_regdomain(mvm, &changed);
+       if (!IS_ERR_OR_NULL(regd)) {
+               /* only update the regulatory core if changed */
+               if (changed)
+                       regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
+
+               kfree(regd);
+       }
+}
+
+struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
+                                                         bool *changed)
+{
+       return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ",
+                                    iwl_mvm_is_wifi_mcc_supported(mvm) ?
+                                    MCC_SOURCE_GET_CURRENT :
+                                    MCC_SOURCE_OLD_FW, changed);
+}
+
+int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
+{
+       enum iwl_mcc_source used_src;
+       struct ieee80211_regdomain *regd;
+       int ret;
+       bool changed;
+       const struct ieee80211_regdomain *r =
+                       rtnl_dereference(mvm->hw->wiphy->regd);
+
+       if (!r)
+               return -ENOENT;
+
+       /* save the last source in case we overwrite it below */
+       used_src = mvm->mcc_src;
+       if (iwl_mvm_is_wifi_mcc_supported(mvm)) {
+               /* Notify the firmware we support wifi location updates */
+               regd = iwl_mvm_get_current_regdomain(mvm, NULL);
+               if (!IS_ERR_OR_NULL(regd))
+                       kfree(regd);
+       }
+
+       /* Now set our last stored MCC and source */
+       regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src,
+                                    &changed);
+       if (IS_ERR_OR_NULL(regd))
+               return -EIO;
+
+       /* update cfg80211 if the regdomain was changed */
+       if (changed)
+               ret = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
+       else
+               ret = 0;
+
+       kfree(regd);
+       return ret;
+}
+
 int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
 {
        struct ieee80211_hw *hw = mvm->hw;
@@ -356,8 +467,12 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                BIT(NL80211_IFTYPE_ADHOC);
 
        hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
-       hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
-                                      REGULATORY_DISABLE_BEACON_HINTS;
+       hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
+       if (iwl_mvm_is_lar_supported(mvm))
+               hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
+       else
+               hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
+                                              REGULATORY_DISABLE_BEACON_HINTS;
 
        if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
                hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
@@ -899,6 +1014,9 @@ void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm)
        mvm->fw_dump_desc = NULL;
 }
 
+#define IWL8260_ICCM_OFFSET            0x44000 /* Only for B-step */
+#define IWL8260_ICCM_LEN               0xC000 /* Only for B-step */
+
 void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
 {
        struct iwl_fw_error_dump_file *dump_file;
@@ -914,16 +1032,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
 
        lockdep_assert_held(&mvm->mutex);
 
-       /* W/A for 8000 HW family A-step */
-       if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
-           CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_A_STEP) {
-               if (smem_len)
-                       smem_len = 0x38000;
-
-               if (sram2_len)
-                       sram2_len = 0x10000;
-       }
-
        fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
        if (!fw_error_dump)
                return;
@@ -975,6 +1083,14 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                   fifo_data_len +
                   sizeof(*dump_info);
 
+       /*
+        * In 8000 HW family B-step include the ICCM (which resides separately)
+        */
+       if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
+           CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP)
+               file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
+                           IWL8260_ICCM_LEN;
+
        if (mvm->fw_dump_desc)
                file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
                            mvm->fw_dump_desc->len;
@@ -1062,6 +1178,19 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                                         dump_mem->data, sram2_len);
        }
 
+       if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
+           CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) {
+               dump_data = iwl_fw_error_next_data(dump_data);
+               dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
+               dump_data->len = cpu_to_le32(IWL8260_ICCM_LEN +
+                                            sizeof(*dump_mem));
+               dump_mem = (void *)dump_data->data;
+               dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
+               dump_mem->offset = cpu_to_le32(IWL8260_ICCM_OFFSET);
+               iwl_trans_read_mem_bytes(mvm->trans, IWL8260_ICCM_OFFSET,
+                                        dump_mem->data, IWL8260_ICCM_LEN);
+       }
+
        fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans);
        fw_error_dump->op_mode_len = file_len;
        if (fw_error_dump->trans_ptr)
@@ -1193,7 +1322,7 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
 
        clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
        iwl_mvm_d0i3_enable_tx(mvm, NULL);
-       ret = iwl_mvm_update_quotas(mvm, NULL);
+       ret = iwl_mvm_update_quotas(mvm, false, NULL);
        if (ret)
                IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
                        ret);
@@ -1291,6 +1420,20 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
         */
        clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
 
+       /* We shouldn't have any UIDs still set.  Loop over all the UIDs to
+        * make sure there's nothing left there and warn if any is found.
+        */
+       if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
+               int i;
+
+               for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++) {
+                       if (WARN_ONCE(mvm->scan_uid[i],
+                                     "UMAC scan UID %d was not cleaned\n",
+                                     mvm->scan_uid[i]))
+                               mvm->scan_uid[i] = 0;
+               }
+       }
+
        mvm->ucode_loaded = false;
 }
 
@@ -1487,9 +1630,33 @@ static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
        u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif);
 
        if (tfd_msk) {
+               /*
+                * mac80211 first removes all the stations of the vif and
+                * then removes the vif. When it removes a station it also
+                * flushes the AMPDU session. So by now, all the AMPDU sessions
+                * of all the stations of this vif are closed, and the queues
+                * of these AMPDU sessions are properly closed.
+                * We still need to take care of the shared queues of the vif.
+                * Flush them here.
+                */
                mutex_lock(&mvm->mutex);
                iwl_mvm_flush_tx_path(mvm, tfd_msk, true);
                mutex_unlock(&mvm->mutex);
+
+               /*
+                * There are transports that buffer a few frames in the host.
+                * For these, the flush above isn't enough since while we were
+                * flushing, the transport might have sent more frames to the
+                * device. To solve this, wait here until the transport is
+                * empty. Technically, this could have replaced the flush
+                * above, but flush is much faster than draining. So flush
+                * first, and drain to make sure we have no frames in the
+                * transport anymore.
+                * If a station still had frames on the shared queues, it is
+                * already marked as draining, so to complete the draining, we
+                * just need to wait until the transport is empty.
+                */
+               iwl_trans_wait_tx_queue_empty(mvm->trans, tfd_msk);
        }
 
        if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
@@ -1872,7 +2039,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
                               sizeof(mvmvif->beacon_stats));
 
                        /* add quota for this interface */
-                       ret = iwl_mvm_update_quotas(mvm, NULL);
+                       ret = iwl_mvm_update_quotas(mvm, true, NULL);
                        if (ret) {
                                IWL_ERR(mvm, "failed to update quotas\n");
                                return;
@@ -1924,7 +2091,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
                                mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
                        mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
                        /* remove quota for this interface */
-                       ret = iwl_mvm_update_quotas(mvm, NULL);
+                       ret = iwl_mvm_update_quotas(mvm, false, NULL);
                        if (ret)
                                IWL_ERR(mvm, "failed to update quotas\n");
 
@@ -2043,7 +2210,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
        /* power updated needs to be done before quotas */
        iwl_mvm_power_update_mac(mvm);
 
-       ret = iwl_mvm_update_quotas(mvm, NULL);
+       ret = iwl_mvm_update_quotas(mvm, false, NULL);
        if (ret)
                goto out_quota_failed;
 
@@ -2059,8 +2226,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
        if (iwl_mvm_phy_ctx_count(mvm) > 1)
                iwl_mvm_teardown_tdls_peers(mvm);
 
-       mutex_unlock(&mvm->mutex);
-       return 0;
+       goto out_unlock;
 
 out_quota_failed:
        iwl_mvm_power_update_mac(mvm);
@@ -2109,7 +2275,7 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
        if (vif->p2p && mvm->p2p_device_vif)
                iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
 
-       iwl_mvm_update_quotas(mvm, NULL);
+       iwl_mvm_update_quotas(mvm, false, NULL);
        iwl_mvm_send_rm_bcast_sta(mvm, vif);
        iwl_mvm_binding_remove_vif(mvm, vif);
 
@@ -2248,6 +2414,12 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
 
        mutex_lock(&mvm->mutex);
 
+       if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
+               IWL_ERR(mvm, "scan while LAR regdomain is not set\n");
+               ret = -EBUSY;
+               goto out;
+       }
+
        if (mvm->scan_status != IWL_MVM_SCAN_NONE) {
                ret = -EBUSY;
                goto out;
@@ -2328,25 +2500,35 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+       unsigned long txqs = 0, tids = 0;
        int tid;
 
+       spin_lock_bh(&mvmsta->lock);
+       for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+               struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+
+               if (tid_data->state != IWL_AGG_ON &&
+                   tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
+                       continue;
+
+               __set_bit(tid_data->txq_id, &txqs);
+
+               if (iwl_mvm_tid_queued(tid_data) == 0)
+                       continue;
+
+               __set_bit(tid, &tids);
+       }
+
        switch (cmd) {
        case STA_NOTIFY_SLEEP:
                if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
                        ieee80211_sta_block_awake(hw, sta, true);
-               spin_lock_bh(&mvmsta->lock);
-               for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
-                       struct iwl_mvm_tid_data *tid_data;
 
-                       tid_data = &mvmsta->tid_data[tid];
-                       if (tid_data->state != IWL_AGG_ON &&
-                           tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
-                               continue;
-                       if (iwl_mvm_tid_queued(tid_data) == 0)
-                               continue;
+               for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT)
                        ieee80211_sta_set_buffered(sta, tid, true);
-               }
-               spin_unlock_bh(&mvmsta->lock);
+
+               if (txqs)
+                       iwl_trans_freeze_txq_timer(mvm->trans, txqs, true);
                /*
                 * The fw updates the STA to be asleep. Tx packets on the Tx
                 * queues to this station will not be transmitted. The fw will
@@ -2356,11 +2538,15 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
        case STA_NOTIFY_AWAKE:
                if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
                        break;
+
+               if (txqs)
+                       iwl_trans_freeze_txq_timer(mvm->trans, txqs, false);
                iwl_mvm_sta_modify_ps_wake(mvm, sta);
                break;
        default:
                break;
        }
+       spin_unlock_bh(&mvmsta->lock);
 }
 
 static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
@@ -2598,6 +2784,12 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
 
        mutex_lock(&mvm->mutex);
 
+       if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
+               IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n");
+               ret = -EBUSY;
+               goto out;
+       }
+
        if (!vif->bss_conf.idle) {
                ret = -EBUSY;
                goto out;
@@ -2888,6 +3080,8 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
        IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
                           duration, type);
 
+       flush_work(&mvm->roc_done_wk);
+
        mutex_lock(&mvm->mutex);
 
        switch (vif->type) {
@@ -3159,14 +3353,14 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
         */
        if (vif->type == NL80211_IFTYPE_MONITOR) {
                mvmvif->monitor_active = true;
-               ret = iwl_mvm_update_quotas(mvm, NULL);
+               ret = iwl_mvm_update_quotas(mvm, false, NULL);
                if (ret)
                        goto out_remove_binding;
        }
 
        /* Handle binding during CSA */
        if (vif->type == NL80211_IFTYPE_AP) {
-               iwl_mvm_update_quotas(mvm, NULL);
+               iwl_mvm_update_quotas(mvm, false, NULL);
                iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
        }
 
@@ -3190,7 +3384,7 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
 
                iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA);
 
-               iwl_mvm_update_quotas(mvm, NULL);
+               iwl_mvm_update_quotas(mvm, false, NULL);
        }
 
        goto out;
@@ -3263,7 +3457,7 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
                break;
        }
 
-       iwl_mvm_update_quotas(mvm, disabled_vif);
+       iwl_mvm_update_quotas(mvm, false, disabled_vif);
        iwl_mvm_binding_remove_vif(mvm, vif);
 
 out:
@@ -3455,7 +3649,7 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
                mvm->noa_duration = noa_duration;
                mvm->noa_vif = vif;
 
-               return iwl_mvm_update_quotas(mvm, NULL);
+               return iwl_mvm_update_quotas(mvm, false, NULL);
        case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
                /* must be associated client vif - ignore authorized */
                if (!vif || vif->type != NL80211_IFTYPE_STATION ||
@@ -3512,11 +3706,12 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
 
        mutex_lock(&mvm->mutex);
 
+       mvmvif->csa_failed = false;
+
        IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
                           chsw->chandef.center_freq1);
 
-       iwl_fw_dbg_trigger_simple_stop(mvm, vif, FW_DBG_TRIGGER_CHANNEL_SWITCH,
-                                      NULL, 0);
+       iwl_fw_dbg_trigger_simple_stop(mvm, vif, FW_DBG_TRIGGER_CHANNEL_SWITCH);
 
        switch (vif->type) {
        case NL80211_IFTYPE_AP:
@@ -3587,6 +3782,12 @@ static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
 
        mutex_lock(&mvm->mutex);
 
+       if (mvmvif->csa_failed) {
+               mvmvif->csa_failed = false;
+               ret = -EIO;
+               goto out_unlock;
+       }
+
        if (vif->type == NL80211_IFTYPE_STATION) {
                struct iwl_mvm_sta *mvmsta;
 
@@ -3710,6 +3911,7 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
                            mvm->radio_stats.on_time_scan;
        do_div(survey->time_scan, USEC_PER_MSEC);
 
+       ret = 0;
  out:
        mutex_unlock(&mvm->mutex);
        return ret;
@@ -3755,6 +3957,64 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
        mutex_unlock(&mvm->mutex);
 }
 
+static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
+                                      struct ieee80211_vif *vif,
+                                      const struct ieee80211_event *event)
+{
+#define CHECK_MLME_TRIGGER(_mvm, _trig, _buf, _cnt, _fmt...)   \
+       do {                                                    \
+               if ((_cnt) && --(_cnt))                         \
+                       break;                                  \
+               iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt);\
+       } while (0)
+
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct iwl_fw_dbg_trigger_tlv *trig;
+       struct iwl_fw_dbg_trigger_mlme *trig_mlme;
+
+       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
+               return;
+
+       if (event->u.mlme.status == MLME_SUCCESS)
+               return;
+
+       trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
+       trig_mlme = (void *)trig->data;
+       if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
+               return;
+
+       if (event->u.mlme.data == ASSOC_EVENT) {
+               if (event->u.mlme.status == MLME_DENIED)
+                       CHECK_MLME_TRIGGER(mvm, trig, buf,
+                                          trig_mlme->stop_assoc_denied,
+                                          "DENIED ASSOC: reason %d",
+                                           event->u.mlme.reason);
+               else if (event->u.mlme.status == MLME_TIMEOUT)
+                       CHECK_MLME_TRIGGER(mvm, trig, buf,
+                                          trig_mlme->stop_assoc_timeout,
+                                          "ASSOC TIMEOUT");
+       } else if (event->u.mlme.data == AUTH_EVENT) {
+               if (event->u.mlme.status == MLME_DENIED)
+                       CHECK_MLME_TRIGGER(mvm, trig, buf,
+                                          trig_mlme->stop_auth_denied,
+                                          "DENIED AUTH: reason %d",
+                                          event->u.mlme.reason);
+               else if (event->u.mlme.status == MLME_TIMEOUT)
+                       CHECK_MLME_TRIGGER(mvm, trig, buf,
+                                          trig_mlme->stop_auth_timeout,
+                                          "AUTH TIMEOUT");
+       } else if (event->u.mlme.data == DEAUTH_RX_EVENT) {
+               CHECK_MLME_TRIGGER(mvm, trig, buf,
+                                  trig_mlme->stop_rx_deauth,
+                                  "DEAUTH RX %d", event->u.mlme.reason);
+       } else if (event->u.mlme.data == DEAUTH_TX_EVENT) {
+               CHECK_MLME_TRIGGER(mvm, trig, buf,
+                                  trig_mlme->stop_tx_deauth,
+                                  "DEAUTH TX %d", event->u.mlme.reason);
+       }
+#undef CHECK_MLME_TRIGGER
+}
+
 const struct ieee80211_ops iwl_mvm_hw_ops = {
        .tx = iwl_mvm_mac_tx,
        .ampdu_action = iwl_mvm_mac_ampdu_action,
@@ -3808,6 +4068,8 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
        .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch,
        .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch,
 
+       .event_callback = iwl_mvm_mac_event_callback,
+
        CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
 
 #ifdef CONFIG_PM_SLEEP
index 95cad68ab069a2d4eb49da2a5575cb79a0485af0..d5522a16124292cd6cab36159851a8446b5e2d95 100644 (file)
@@ -349,11 +349,12 @@ struct iwl_mvm_vif_bf_data {
  * @bcast_sta: station used for broadcast packets. Used by the following
  *  vifs: P2P_DEVICE, GO and AP.
  * @beacon_skb: the skb used to hold the AP/GO beacon template
- * @smps_requests: the SMPS requests of differents parts of the driver,
+ * @smps_requests: the SMPS requests of different parts of the driver,
  *     combined on update to yield the overall request to mac80211.
  * @beacon_stats: beacon statistics, containing the # of received beacons,
  *     # of received beacons accumulated over FW restart, and the current
  *     average signal of beacons retrieved from the firmware
+ * @csa_failed: CSA failed to schedule time event, report an error later
  */
 struct iwl_mvm_vif {
        struct iwl_mvm *mvm;
@@ -433,6 +434,7 @@ struct iwl_mvm_vif {
 
        /* Indicates that CSA countdown may be started */
        bool csa_countdown;
+       bool csa_failed;
 };
 
 static inline struct iwl_mvm_vif *
@@ -686,7 +688,7 @@ struct iwl_mvm {
        bool disable_power_off;
        bool disable_power_off_d3;
 
-       bool scan_iter_notif_enabled;
+       u32 scan_iter_notif_enabled; /* must be u32 for debugfs_create_bool */
 
        struct debugfs_blob_wrapper nvm_hw_blob;
        struct debugfs_blob_wrapper nvm_sw_blob;
@@ -746,6 +748,7 @@ struct iwl_mvm {
        void *d3_resume_sram;
        u32 d3_test_pme_ptr;
        struct ieee80211_vif *keep_vif;
+       u32 last_netdetect_scans; /* no. of scans in the last net-detect wake */
 #endif
 #endif
 
@@ -810,6 +813,9 @@ struct iwl_mvm {
        /* system time of last beacon (for AP/GO interface) */
        u32 ap_last_beacon_gp2;
 
+       bool lar_regdom_set;
+       enum iwl_mcc_source mcc_src;
+
        u8 low_latency_agg_frame_limit;
 
        /* TDLS channel switch data */
@@ -910,6 +916,31 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
               (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
 }
 
+static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
+{
+       bool nvm_lar = mvm->nvm_data->lar_enabled;
+       bool tlv_lar = mvm->fw->ucode_capa.capa[0] &
+               IWL_UCODE_TLV_CAPA_LAR_SUPPORT;
+
+       if (iwlwifi_mod_params.lar_disable)
+               return false;
+
+       /*
+        * Enable LAR only if it is supported by the FW (TLV) &&
+        * enabled in the NVM
+        */
+       if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+               return nvm_lar && tlv_lar;
+       else
+               return tlv_lar;
+}
+
+static inline bool iwl_mvm_is_wifi_mcc_supported(struct iwl_mvm *mvm)
+{
+       return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WIFI_MCC_UPDATE ||
+              mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC;
+}
+
 static inline bool iwl_mvm_is_scd_cfg_supported(struct iwl_mvm *mvm)
 {
        return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SCD_CFG;
@@ -921,6 +952,12 @@ static inline bool iwl_mvm_bt_is_plcr_supported(struct iwl_mvm *mvm)
                IWL_MVM_BT_COEX_CORUNNING;
 }
 
+static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm)
+{
+       return (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BT_COEX_RRC) &&
+               IWL_MVM_BT_COEX_RRC;
+}
+
 extern const u8 iwl_mvm_ac_to_tx_fifo[];
 
 struct iwl_rate_info {
@@ -1106,13 +1143,14 @@ int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
 int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
 
 /* Quota management */
-int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
+int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_upload,
                          struct ieee80211_vif *disabled_vif);
 
 /* Scanning */
 int iwl_mvm_scan_size(struct iwl_mvm *mvm);
 int iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
 int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan);
+void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
 
 /* Scheduled scan */
 int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
@@ -1282,17 +1320,6 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
                                      struct iwl_rx_cmd_buffer *rxb,
                                      struct iwl_device_cmd *cmd);
 
-enum iwl_bt_kill_msk {
-       BT_KILL_MSK_DEFAULT,
-       BT_KILL_MSK_NEVER,
-       BT_KILL_MSK_ALWAYS,
-       BT_KILL_MSK_MAX,
-};
-
-extern const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT];
-extern const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT];
-extern const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX];
-
 /* beacon filtering */
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 void
@@ -1389,6 +1416,23 @@ void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
 void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
 int iwl_mvm_get_temp(struct iwl_mvm *mvm);
 
+/* Location Aware Regulatory */
+struct iwl_mcc_update_resp *
+iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
+                  enum iwl_mcc_source src_id);
+int iwl_mvm_init_mcc(struct iwl_mvm *mvm);
+int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
+                              struct iwl_rx_cmd_buffer *rxb,
+                              struct iwl_device_cmd *cmd);
+struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
+                                                 const char *alpha2,
+                                                 enum iwl_mcc_source src_id,
+                                                 bool *changed);
+struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
+                                                         bool *changed);
+int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm);
+void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm);
+
 /* smart fifo */
 int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                      bool added_vif);
@@ -1436,8 +1480,12 @@ int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
 void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm);
 int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
                                struct iwl_fw_dbg_trigger_tlv *trigger,
-                               const char *str, size_t len);
-
+                               const char *fmt, ...) __printf(3, 4);
+unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
+                                   struct ieee80211_vif *vif,
+                                   bool tdls, bool cmd_q);
+void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                            const char *errmsg);
 static inline bool
 iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig,
                             struct ieee80211_vif *vif)
@@ -1470,8 +1518,7 @@ iwl_fw_dbg_trigger_check_stop(struct iwl_mvm *mvm,
 static inline void
 iwl_fw_dbg_trigger_simple_stop(struct iwl_mvm *mvm,
                               struct ieee80211_vif *vif,
-                              enum iwl_fw_dbg_trigger trig,
-                              const char *str, size_t len)
+                              enum iwl_fw_dbg_trigger trig)
 {
        struct iwl_fw_dbg_trigger_tlv *trigger;
 
@@ -1482,7 +1529,7 @@ iwl_fw_dbg_trigger_simple_stop(struct iwl_mvm *mvm,
        if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trigger))
                return;
 
-       iwl_mvm_fw_dbg_collect_trig(mvm, trigger, str, len);
+       iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL);
 }
 
 #endif /* __IWL_MVM_H__ */
index 5383429d96c1c49f91539c8440b6016cee9dd31c..87b2a30a2308439c4e7a3f3bd80e419f9583af5a 100644 (file)
  *
  *****************************************************************************/
 #include <linux/firmware.h>
+#include <linux/rtnetlink.h>
+#include <linux/pci.h>
+#include <linux/acpi.h>
 #include "iwl-trans.h"
 #include "iwl-csr.h"
 #include "mvm.h"
 #include "iwl-eeprom-parse.h"
 #include "iwl-eeprom-read.h"
 #include "iwl-nvm-parse.h"
+#include "iwl-prph.h"
 
 /* Default NVM size to read */
 #define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
 #define IWL_MAX_NVM_SECTION_SIZE       0x1b58
-#define IWL_MAX_NVM_8000A_SECTION_SIZE 0xffc
-#define IWL_MAX_NVM_8000B_SECTION_SIZE 0x1ffc
+#define IWL_MAX_NVM_8000_SECTION_SIZE  0x1ffc
 
 #define NVM_WRITE_OPCODE 1
 #define NVM_READ_OPCODE 0
@@ -262,7 +265,9 @@ static struct iwl_nvm_data *
 iwl_parse_nvm_sections(struct iwl_mvm *mvm)
 {
        struct iwl_nvm_section *sections = mvm->nvm_sections;
-       const __le16 *hw, *sw, *calib, *regulatory, *mac_override;
+       const __le16 *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
+       bool lar_enabled;
+       u32 mac_addr0, mac_addr1;
 
        /* Checking for required sections */
        if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
@@ -286,22 +291,38 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
                                "Can't parse mac_address, empty sections\n");
                        return NULL;
                }
+
+               /* PHY_SKU section is mandatory in B0 */
+               if (!mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) {
+                       IWL_ERR(mvm,
+                               "Can't parse phy_sku in B0, empty sections\n");
+                       return NULL;
+               }
        }
 
        if (WARN_ON(!mvm->cfg))
                return NULL;
 
+       /* read the mac address from WFMP registers */
+       mac_addr0 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_0);
+       mac_addr1 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_1);
+
        hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data;
        sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
        calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
        regulatory = (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
        mac_override =
                (const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data;
+       phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data;
+
+       lar_enabled = !iwlwifi_mod_params.lar_disable &&
+                     (mvm->fw->ucode_capa.capa[0] &
+                      IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
 
        return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
-                                 regulatory, mac_override,
-                                 mvm->fw->valid_tx_ant,
-                                 mvm->fw->valid_rx_ant);
+                                 regulatory, mac_override, phy_sku,
+                                 mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant,
+                                 lar_enabled, mac_addr0, mac_addr1);
 }
 
 #define MAX_NVM_FILE_LEN       16384
@@ -354,10 +375,8 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
        /* Maximal size depends on HW family and step */
        if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
                max_section_size = IWL_MAX_NVM_SECTION_SIZE;
-       else if (CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_A_STEP)
-               max_section_size = IWL_MAX_NVM_8000A_SECTION_SIZE;
-       else /* Family 8000 B-step or C-step */
-               max_section_size = IWL_MAX_NVM_8000B_SECTION_SIZE;
+       else
+               max_section_size = IWL_MAX_NVM_8000_SECTION_SIZE;
 
        /*
         * Obtain NVM image via request_firmware. Since we already used
@@ -399,6 +418,15 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
                IWL_INFO(mvm, "NVM Version %08X\n", le32_to_cpu(dword_buff[2]));
                IWL_INFO(mvm, "NVM Manufacturing date %08X\n",
                         le32_to_cpu(dword_buff[3]));
+
+               /* nvm file validation, dword_buff[2] holds the file version */
+               if ((CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_C_STEP &&
+                    le32_to_cpu(dword_buff[2]) < 0xE4A) ||
+                   (CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP &&
+                    le32_to_cpu(dword_buff[2]) >= 0xE4A)) {
+                       ret = -EFAULT;
+                       goto out;
+               }
        } else {
                file_sec = (void *)fw_entry->data;
        }
@@ -497,6 +525,8 @@ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
        int ret, section;
        u32 size_read = 0;
        u8 *nvm_buffer, *temp;
+       const char *nvm_file_B = mvm->cfg->default_nvm_file_B_step;
+       const char *nvm_file_C = mvm->cfg->default_nvm_file_C_step;
 
        if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS))
                return -EINVAL;
@@ -555,10 +585,27 @@ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
 
        /* load external NVM if configured */
        if (mvm->nvm_file_name) {
-               /* move to External NVM flow */
+               /* read External NVM file - take the default */
                ret = iwl_mvm_read_external_nvm(mvm);
-               if (ret)
-                       return ret;
+               if (ret) {
+                       /* choose the nvm_file name according to the
+                        * HW step
+                        */
+                       if (CSR_HW_REV_STEP(mvm->trans->hw_rev) ==
+                           SILICON_B_STEP)
+                               mvm->nvm_file_name = nvm_file_B;
+                       else
+                               mvm->nvm_file_name = nvm_file_C;
+
+                       if (ret == -EFAULT && mvm->nvm_file_name) {
+                               /* in case nvm file was failed try again */
+                               ret = iwl_mvm_read_external_nvm(mvm);
+                               if (ret)
+                                       return ret;
+                       } else {
+                               return ret;
+                       }
+               }
        }
 
        /* parse the relevant nvm sections */
@@ -570,3 +617,257 @@ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
 
        return 0;
 }
+
+struct iwl_mcc_update_resp *
+iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
+                  enum iwl_mcc_source src_id)
+{
+       struct iwl_mcc_update_cmd mcc_update_cmd = {
+               .mcc = cpu_to_le16(alpha2[0] << 8 | alpha2[1]),
+               .source_id = (u8)src_id,
+       };
+       struct iwl_mcc_update_resp *mcc_resp, *resp_cp = NULL;
+       struct iwl_rx_packet *pkt;
+       struct iwl_host_cmd cmd = {
+               .id = MCC_UPDATE_CMD,
+               .flags = CMD_WANT_SKB,
+               .data = { &mcc_update_cmd },
+       };
+
+       int ret;
+       u32 status;
+       int resp_len, n_channels;
+       u16 mcc;
+
+       if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
+               return ERR_PTR(-EOPNOTSUPP);
+
+       cmd.len[0] = sizeof(struct iwl_mcc_update_cmd);
+
+       IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c' src = %d\n",
+                     alpha2[0], alpha2[1], src_id);
+
+       ret = iwl_mvm_send_cmd(mvm, &cmd);
+       if (ret)
+               return ERR_PTR(ret);
+
+       pkt = cmd.resp_pkt;
+       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+               IWL_ERR(mvm, "Bad return from MCC_UPDATE_COMMAND (0x%08X)\n",
+                       pkt->hdr.flags);
+               ret = -EIO;
+               goto exit;
+       }
+
+       /* Extract MCC response */
+       mcc_resp = (void *)pkt->data;
+       status = le32_to_cpu(mcc_resp->status);
+
+       mcc = le16_to_cpu(mcc_resp->mcc);
+
+       /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
+       if (mcc == 0) {
+               mcc = 0x3030;  /* "00" - world */
+               mcc_resp->mcc = cpu_to_le16(mcc);
+       }
+
+       n_channels =  __le32_to_cpu(mcc_resp->n_channels);
+       IWL_DEBUG_LAR(mvm,
+                     "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') change: %d n_chans: %d\n",
+                     status, mcc, mcc >> 8, mcc & 0xff,
+                     !!(status == MCC_RESP_NEW_CHAN_PROFILE), n_channels);
+
+       resp_len = sizeof(*mcc_resp) + n_channels * sizeof(__le32);
+       resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL);
+       if (!resp_cp) {
+               ret = -ENOMEM;
+               goto exit;
+       }
+
+       ret = 0;
+exit:
+       iwl_free_resp(&cmd);
+       if (ret)
+               return ERR_PTR(ret);
+       return resp_cp;
+}
+
+#ifdef CONFIG_ACPI
+#define WRD_METHOD             "WRDD"
+#define WRDD_WIFI              (0x07)
+#define WRDD_WIGIG             (0x10)
+
+static u32 iwl_mvm_wrdd_get_mcc(struct iwl_mvm *mvm, union acpi_object *wrdd)
+{
+       union acpi_object *mcc_pkg, *domain_type, *mcc_value;
+       u32 i;
+
+       if (wrdd->type != ACPI_TYPE_PACKAGE ||
+           wrdd->package.count < 2 ||
+           wrdd->package.elements[0].type != ACPI_TYPE_INTEGER ||
+           wrdd->package.elements[0].integer.value != 0) {
+               IWL_DEBUG_LAR(mvm, "Unsupported wrdd structure\n");
+               return 0;
+       }
+
+       for (i = 1 ; i < wrdd->package.count ; ++i) {
+               mcc_pkg = &wrdd->package.elements[i];
+
+               if (mcc_pkg->type != ACPI_TYPE_PACKAGE ||
+                   mcc_pkg->package.count < 2 ||
+                   mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
+                   mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+                       mcc_pkg = NULL;
+                       continue;
+               }
+
+               domain_type = &mcc_pkg->package.elements[0];
+               if (domain_type->integer.value == WRDD_WIFI)
+                       break;
+
+               mcc_pkg = NULL;
+       }
+
+       if (mcc_pkg) {
+               mcc_value = &mcc_pkg->package.elements[1];
+               return mcc_value->integer.value;
+       }
+
+       return 0;
+}
+
+static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc)
+{
+       acpi_handle root_handle;
+       acpi_handle handle;
+       struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL};
+       acpi_status status;
+       u32 mcc_val;
+       struct pci_dev *pdev = to_pci_dev(mvm->dev);
+
+       root_handle = ACPI_HANDLE(&pdev->dev);
+       if (!root_handle) {
+               IWL_DEBUG_LAR(mvm,
+                             "Could not retrieve root port ACPI handle\n");
+               return -ENOENT;
+       }
+
+       /* Get the method's handle */
+       status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle);
+       if (ACPI_FAILURE(status)) {
+               IWL_DEBUG_LAR(mvm, "WRD method not found\n");
+               return -ENOENT;
+       }
+
+       /* Call WRDD with no arguments */
+       status = acpi_evaluate_object(handle, NULL, NULL, &wrdd);
+       if (ACPI_FAILURE(status)) {
+               IWL_DEBUG_LAR(mvm, "WRDC invocation failed (0x%x)\n", status);
+               return -ENOENT;
+       }
+
+       mcc_val = iwl_mvm_wrdd_get_mcc(mvm, wrdd.pointer);
+       kfree(wrdd.pointer);
+       if (!mcc_val)
+               return -ENOENT;
+
+       mcc[0] = (mcc_val >> 8) & 0xff;
+       mcc[1] = mcc_val & 0xff;
+       mcc[2] = '\0';
+       return 0;
+}
+#else /* CONFIG_ACPI */
+static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc)
+{
+       return -ENOENT;
+}
+#endif
+
+int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
+{
+       bool tlv_lar;
+       bool nvm_lar;
+       int retval;
+       struct ieee80211_regdomain *regd;
+       char mcc[3];
+
+       if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
+               tlv_lar = mvm->fw->ucode_capa.capa[0] &
+                       IWL_UCODE_TLV_CAPA_LAR_SUPPORT;
+               nvm_lar = mvm->nvm_data->lar_enabled;
+               if (tlv_lar != nvm_lar)
+                       IWL_INFO(mvm,
+                                "Conflict between TLV & NVM regarding enabling LAR (TLV = %s NVM =%s)\n",
+                                tlv_lar ? "enabled" : "disabled",
+                                nvm_lar ? "enabled" : "disabled");
+       }
+
+       if (!iwl_mvm_is_lar_supported(mvm))
+               return 0;
+
+       /*
+        * try to replay the last set MCC to FW. If it doesn't exist,
+        * queue an update to cfg80211 to retrieve the default alpha2 from FW.
+        */
+       retval = iwl_mvm_init_fw_regd(mvm);
+       if (retval != -ENOENT)
+               return retval;
+
+       /*
+        * Driver regulatory hint for initial update, this also informs the
+        * firmware we support wifi location updates.
+        * Disallow scans that might crash the FW while the LAR regdomain
+        * is not set.
+        */
+       mvm->lar_regdom_set = false;
+
+       regd = iwl_mvm_get_current_regdomain(mvm, NULL);
+       if (IS_ERR_OR_NULL(regd))
+               return -EIO;
+
+       if (iwl_mvm_is_wifi_mcc_supported(mvm) &&
+           !iwl_mvm_get_bios_mcc(mvm, mcc)) {
+               kfree(regd);
+               regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc,
+                                            MCC_SOURCE_BIOS, NULL);
+               if (IS_ERR_OR_NULL(regd))
+                       return -EIO;
+       }
+
+       retval = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
+       kfree(regd);
+       return retval;
+}
+
+int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
+                              struct iwl_rx_cmd_buffer *rxb,
+                              struct iwl_device_cmd *cmd)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_mcc_chub_notif *notif = (void *)pkt->data;
+       enum iwl_mcc_source src;
+       char mcc[3];
+       struct ieee80211_regdomain *regd;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
+               return 0;
+
+       mcc[0] = notif->mcc >> 8;
+       mcc[1] = notif->mcc & 0xff;
+       mcc[2] = '\0';
+       src = notif->source_id;
+
+       IWL_DEBUG_LAR(mvm,
+                     "RX: received chub update mcc cmd (mcc '%s' src %d)\n",
+                     mcc, src);
+       regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL);
+       if (IS_ERR_OR_NULL(regd))
+               return 0;
+
+       regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
+       kfree(regd);
+
+       return 0;
+}
index fe40922a6b0d467c86da151e2c786a265e46a1e2..a08b03d58d4bf0f3ebd4773a7fdb6e07cc8406b8 100644 (file)
@@ -82,7 +82,6 @@
 #include "rs.h"
 #include "fw-api-scan.h"
 #include "time-event.h"
-#include "iwl-fw-error-dump.h"
 
 #define DRV_DESCRIPTION        "The new Intel(R) wireless AGN driver for Linux"
 MODULE_DESCRIPTION(DRV_DESCRIPTION);
@@ -234,6 +233,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
                   iwl_mvm_rx_ant_coupling_notif, true),
 
        RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false),
+       RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc, true),
 
        RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false),
 
@@ -358,6 +358,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
        CMD(TDLS_CHANNEL_SWITCH_CMD),
        CMD(TDLS_CHANNEL_SWITCH_NOTIFICATION),
        CMD(TDLS_CONFIG_CMD),
+       CMD(MCC_UPDATE_CMD),
 };
 #undef CMD
 
@@ -487,8 +488,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 
        /* Set a short watchdog for the command queue */
        trans_cfg.cmd_q_wdg_timeout =
-               iwlmvm_mod_params.tfd_q_hang_detect ? IWL_DEF_WD_TIMEOUT :
-                                                     IWL_WATCHDOG_DISABLED;
+               iwl_mvm_get_wd_timeout(mvm, NULL, false, true);
 
        snprintf(mvm->hw->wiphy->fw_version,
                 sizeof(mvm->hw->wiphy->fw_version),
@@ -523,12 +523,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        /* set the nvm_file_name according to priority */
        if (iwlwifi_mod_params.nvm_file) {
                mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
-       } else {
-               if ((trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) &&
-                   (CSR_HW_REV_STEP(trans->hw_rev) == SILICON_A_STEP))
-                       mvm->nvm_file_name = mvm->cfg->default_nvm_file_8000A;
+       } else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
+               if (CSR_HW_REV_STEP(trans->hw_rev) == SILICON_B_STEP)
+                       mvm->nvm_file_name = mvm->cfg->default_nvm_file_B_step;
                else
-                       mvm->nvm_file_name = mvm->cfg->default_nvm_file;
+                       mvm->nvm_file_name = mvm->cfg->default_nvm_file_C_step;
        }
 
        if (WARN(cfg->no_power_up_nic_in_init && !mvm->nvm_file_name,
@@ -690,7 +689,6 @@ static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
 {
        struct iwl_fw_dbg_trigger_tlv *trig;
        struct iwl_fw_dbg_trigger_cmd *cmds_trig;
-       char buf[32];
        int i;
 
        if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF))
@@ -710,9 +708,9 @@ static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
                if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd)
                        continue;
 
-               memset(buf, 0, sizeof(buf));
-               snprintf(buf, sizeof(buf), "CMD 0x%02x received", pkt->hdr.cmd);
-               iwl_mvm_fw_dbg_collect_trig(mvm, trig, buf, sizeof(buf));
+               iwl_mvm_fw_dbg_collect_trig(mvm, trig,
+                                           "CMD 0x%02x received",
+                                           pkt->hdr.cmd);
                break;
        }
 }
@@ -871,8 +869,8 @@ static void iwl_mvm_fw_error_dump_wk(struct work_struct *work)
 
        /* start recording again if the firmware is not crashed */
        WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) &&
-                     mvm->fw->dbg_dest_tlv &&
-                     iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf));
+                    mvm->fw->dbg_dest_tlv &&
+                    iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf));
 
        mutex_unlock(&mvm->mutex);
 
@@ -893,18 +891,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
         * the next start() call from mac80211. If restart isn't called
         * (no fw restart) scan status will stay busy.
         */
-       switch (mvm->scan_status) {
-       case IWL_MVM_SCAN_NONE:
-               break;
-       case IWL_MVM_SCAN_OS:
-               ieee80211_scan_completed(mvm->hw, true);
-               break;
-       case IWL_MVM_SCAN_SCHED:
-               /* Sched scan will be restarted by mac80211 in restart_hw. */
-               if (!mvm->restart_fw)
-                       ieee80211_sched_scan_stopped(mvm->hw);
-               break;
-       }
+       iwl_mvm_report_scan_aborted(mvm);
 
        /*
         * If we're restarting already, don't cycle restarts.
@@ -1174,7 +1161,7 @@ static void iwl_mvm_d0i3_disconnect_iter(void *data, u8 *mac,
 
        if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc &&
            mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
-               ieee80211_connection_loss(vif);
+               iwl_mvm_connection_loss(mvm, vif, "D0i3");
 }
 
 void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
@@ -1270,6 +1257,10 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
        iwl_free_resp(&get_status_cmd);
 out:
        iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
+
+       /* the FW might have updated the regdomain */
+       iwl_mvm_update_changed_regdom(mvm);
+
        iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK);
        mutex_unlock(&mvm->mutex);
 }
index 192b74bc8cf67270a7db805f846f16abf7a23081..e68a475e307194cd140a2708fc664e8a8f7f6f11 100644 (file)
@@ -67,7 +67,7 @@
 #include "fw-api.h"
 #include "mvm.h"
 
-/* Maps the driver specific channel width definition to the the fw values */
+/* Maps the driver specific channel width definition to the fw values */
 u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
 {
        switch (chandef->width) {
index 33bbdde0046fa29575f7fd72c9e2d1a1af144bd1..d2c6ba9d326b4656b8f6e7007554fb3a5ba8e681 100644 (file)
@@ -358,7 +358,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
        cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
 
        if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) ||
-           !mvmvif->pm_enabled || iwl_mvm_tdls_sta_count(mvm, vif))
+           !mvmvif->pm_enabled)
                return;
 
        cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@@ -639,6 +639,10 @@ static void iwl_mvm_power_set_pm(struct iwl_mvm *mvm,
        if (vifs->ap_vif)
                ap_mvmvif = iwl_mvm_vif_from_mac80211(vifs->ap_vif);
 
+       /* don't allow PM if any TDLS stations exist */
+       if (iwl_mvm_tdls_sta_count(mvm, NULL))
+               return;
+
        /* enable PM on bss if bss stand alone */
        if (vifs->bss_active && !vifs->p2p_active && !vifs->ap_active) {
                bss_mvmvif->pm_enabled = true;
index dbb2594390e96195c194f9c9ebe8d0967b3924b7..509a66d05245bb37ab9deb7391530297181be0c7 100644 (file)
@@ -172,6 +172,7 @@ static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm,
 }
 
 int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
+                         bool force_update,
                          struct ieee80211_vif *disabled_vif)
 {
        struct iwl_time_quota_cmd cmd = {};
@@ -309,7 +310,7 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
                          "zero quota on binding %d\n", i);
        }
 
-       if (!send) {
+       if (!send && !force_update) {
                /* don't send a practically unchanged command, the firmware has
                 * to re-initialize a lot of state and that can have an adverse
                 * impact on it
index 6578498dd5afde7ee13915f6e954cbfb84a3239d..f9928f2c125f726bbf89474096bd47990bfb86eb 100644 (file)
@@ -1065,6 +1065,37 @@ static inline bool rs_rate_column_match(struct rs_rate *a,
                && ant_match;
 }
 
+static inline enum rs_column rs_get_column_from_rate(struct rs_rate *rate)
+{
+       if (is_legacy(rate)) {
+               if (rate->ant == ANT_A)
+                       return RS_COLUMN_LEGACY_ANT_A;
+
+               if (rate->ant == ANT_B)
+                       return RS_COLUMN_LEGACY_ANT_B;
+
+               goto err;
+       }
+
+       if (is_siso(rate)) {
+               if (rate->ant == ANT_A || rate->stbc || rate->bfer)
+                       return rate->sgi ? RS_COLUMN_SISO_ANT_A_SGI :
+                               RS_COLUMN_SISO_ANT_A;
+
+               if (rate->ant == ANT_B)
+                       return rate->sgi ? RS_COLUMN_SISO_ANT_B_SGI :
+                               RS_COLUMN_SISO_ANT_B;
+
+               goto err;
+       }
+
+       if (is_mimo(rate))
+               return rate->sgi ? RS_COLUMN_MIMO2_SGI : RS_COLUMN_MIMO2;
+
+err:
+       return RS_COLUMN_INVALID;
+}
+
 static u8 rs_get_tid(struct ieee80211_hdr *hdr)
 {
        u8 tid = IWL_MAX_TID_COUNT;
@@ -1106,17 +1137,43 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                return;
        }
 
+       /* This packet was aggregated but doesn't carry status info */
+       if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
+           !(info->flags & IEEE80211_TX_STAT_AMPDU))
+               return;
+
+       rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, &tx_resp_rate);
+
 #ifdef CONFIG_MAC80211_DEBUGFS
-       /* Disable last tx check if we are debugging with fixed rate */
+       /* Disable last tx check if we are debugging with fixed rate but
+        * update tx stats */
        if (lq_sta->pers.dbg_fixed_rate) {
-               IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n");
+               int index = tx_resp_rate.index;
+               enum rs_column column;
+               int attempts, success;
+
+               column = rs_get_column_from_rate(&tx_resp_rate);
+               if (WARN_ONCE(column == RS_COLUMN_INVALID,
+                             "Can't map rate 0x%x to column",
+                             tx_resp_hwrate))
+                       return;
+
+               if (info->flags & IEEE80211_TX_STAT_AMPDU) {
+                       attempts = info->status.ampdu_len;
+                       success = info->status.ampdu_ack_len;
+               } else {
+                       attempts = info->status.rates[0].count;
+                       success = !!(info->flags & IEEE80211_TX_STAT_ACK);
+               }
+
+               lq_sta->pers.tx_stats[column][index].total += attempts;
+               lq_sta->pers.tx_stats[column][index].success += success;
+
+               IWL_DEBUG_RATE(mvm, "Fixed rate 0x%x success %d attempts %d\n",
+                              tx_resp_hwrate, success, attempts);
                return;
        }
 #endif
-       /* This packet was aggregated but doesn't carry status info */
-       if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
-           !(info->flags & IEEE80211_TX_STAT_AMPDU))
-               return;
 
        if (time_after(jiffies,
                       (unsigned long)(lq_sta->last_tx +
@@ -1142,7 +1199,6 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        table = &lq_sta->lq;
        lq_hwrate = le32_to_cpu(table->rs_table[0]);
        rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate);
-       rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, &tx_resp_rate);
 
        /* Here we actually compare this rate to the latest LQ command */
        if (!rs_rate_equal(&tx_resp_rate, &lq_rate, allow_ant_mismatch)) {
@@ -1221,9 +1277,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                                        info->status.ampdu_ack_len);
                }
        } else {
-       /*
-        * For legacy, update frame history with for each Tx retry.
-        */
+               /* For legacy, update frame history with for each Tx retry. */
                retries = info->status.rates[0].count - 1;
                /* HW doesn't send more than 15 retries */
                retries = min(retries, 15);
@@ -1280,6 +1334,9 @@ static void rs_mac80211_tx_status(void *mvm_r,
        struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 
+       if (!iwl_mvm_sta_from_mac80211(sta)->vif)
+               return;
+
        if (!ieee80211_is_data(hdr->frame_control) ||
            info->flags & IEEE80211_TX_CTL_NO_ACK)
                return;
@@ -1556,9 +1613,9 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
 static void rs_update_rate_tbl(struct iwl_mvm *mvm,
                               struct ieee80211_sta *sta,
                               struct iwl_lq_sta *lq_sta,
-                              struct rs_rate *rate)
+                              struct iwl_scale_tbl_info *tbl)
 {
-       rs_fill_lq_cmd(mvm, sta, lq_sta, rate);
+       rs_fill_lq_cmd(mvm, sta, lq_sta, &tbl->rate);
        iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
 }
 
@@ -2088,7 +2145,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
                        rate->type = LQ_NONE;
                        lq_sta->search_better_tbl = 0;
                        tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
-                       rs_update_rate_tbl(mvm, sta, lq_sta, &tbl->rate);
+                       rs_update_rate_tbl(mvm, sta, lq_sta, tbl);
                }
                return;
        }
@@ -2251,7 +2308,7 @@ lq_update:
        /* Replace uCode's rate table for the destination station. */
        if (update_lq) {
                tbl->rate.index = index;
-               rs_update_rate_tbl(mvm, sta, lq_sta, &tbl->rate);
+               rs_update_rate_tbl(mvm, sta, lq_sta, tbl);
        }
 
        rs_stay_in_table(lq_sta, false);
@@ -2298,8 +2355,7 @@ lq_update:
 
                        rs_dump_rate(mvm, &tbl->rate,
                                     "Switch to SEARCH TABLE:");
-                       rs_fill_lq_cmd(mvm, sta, lq_sta, &tbl->rate);
-                       iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
+                       rs_update_rate_tbl(mvm, sta, lq_sta, tbl);
                } else {
                        done_search = 1;
                }
@@ -2513,6 +2569,14 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct iwl_lq_sta *lq_sta = mvm_sta;
 
+       if (sta && !iwl_mvm_sta_from_mac80211(sta)->vif) {
+               /* if vif isn't initialized mvm doesn't know about
+                * this station, so don't do anything with the it
+                */
+               sta = NULL;
+               mvm_sta = NULL;
+       }
+
        /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */
 
        /* Treat uninitialized rate scaling data same as non-existing. */
@@ -2830,6 +2894,9 @@ static void rs_rate_update(void *mvm_r,
                        (struct iwl_op_mode *)mvm_r;
        struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
 
+       if (!iwl_mvm_sta_from_mac80211(sta)->vif)
+               return;
+
        /* Stop any ongoing aggregations as rs starts off assuming no agg */
        for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
                ieee80211_stop_tx_ba_session(sta, tid);
@@ -3168,7 +3235,7 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
        lq_cmd->agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
 
        /*
-        * In case of low latency, tell the firwmare to leave a frame in the
+        * In case of low latency, tell the firmware to leave a frame in the
         * Tx Fifo so that it can start a transaction in the same TxOP. This
         * basically allows the firmware to send bursts.
         */
@@ -3343,16 +3410,16 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
                        (is_legacy(rate)) ? "legacy" :
                        is_vht(rate) ? "VHT" : "HT");
        if (!is_legacy(rate)) {
-               desc += sprintf(buff+desc, " %s",
+               desc += sprintf(buff + desc, " %s",
                   (is_siso(rate)) ? "SISO" : "MIMO2");
-                  desc += sprintf(buff+desc, " %s",
-                                  (is_ht20(rate)) ? "20MHz" :
-                                  (is_ht40(rate)) ? "40MHz" :
-                                  (is_ht80(rate)) ? "80Mhz" : "BAD BW");
-                  desc += sprintf(buff+desc, " %s %s %s\n",
-                                  (rate->sgi) ? "SGI" : "NGI",
-                                  (rate->ldpc) ? "LDPC" : "BCC",
-                                  (lq_sta->is_agg) ? "AGG on" : "");
+               desc += sprintf(buff + desc, " %s",
+                               (is_ht20(rate)) ? "20MHz" :
+                               (is_ht40(rate)) ? "40MHz" :
+                               (is_ht80(rate)) ? "80Mhz" : "BAD BW");
+               desc += sprintf(buff + desc, " %s %s %s\n",
+                               (rate->sgi) ? "SGI" : "NGI",
+                               (rate->ldpc) ? "LDPC" : "BCC",
+                               (lq_sta->is_agg) ? "AGG on" : "");
        }
        desc += sprintf(buff+desc, "last tx rate=0x%X\n",
                        lq_sta->last_rate_n_flags);
@@ -3373,13 +3440,13 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
        ss_params = le32_to_cpu(lq_sta->lq.ss_params);
        desc += sprintf(buff+desc, "single stream params: %s%s%s%s\n",
                        (ss_params & LQ_SS_PARAMS_VALID) ?
-                       "VALID," : "INVALID",
+                       "VALID" : "INVALID",
                        (ss_params & LQ_SS_BFER_ALLOWED) ?
-                       "BFER," : "",
+                       ", BFER" : "",
                        (ss_params & LQ_SS_STBC_1SS_ALLOWED) ?
-                       "STBC," : "",
+                       ", STBC" : "",
                        (ss_params & LQ_SS_FORCE) ?
-                       "FORCE" : "");
+                       "FORCE" : "");
        desc += sprintf(buff+desc,
                        "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
                        lq_sta->lq.initial_rate_index[0],
@@ -3603,9 +3670,15 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
 
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32);
 
-static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
+static void rs_add_debugfs(void *mvm, void *priv_sta, struct dentry *dir)
 {
-       struct iwl_lq_sta *lq_sta = mvm_sta;
+       struct iwl_lq_sta *lq_sta = priv_sta;
+       struct iwl_mvm_sta *mvmsta;
+
+       mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta);
+
+       if (!mvmsta->vif)
+               return;
 
        debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
                            lq_sta, &rs_sta_dbgfs_scale_table_ops);
@@ -3669,7 +3742,7 @@ void iwl_mvm_rate_control_unregister(void)
 
 /**
  * iwl_mvm_tx_protection - Gets LQ command, change it to enable/disable
- * Tx protection, according to this rquest and previous requests,
+ * Tx protection, according to this request and previous requests,
  * and send the LQ command.
  * @mvmsta: The station
  * @enable: Enable Tx protection?
index 6177e24f4c016d09c8496186d65c394d05bb5eb3..78ec7db64ba59e886e2a7b18a3df64f70a4ea29c 100644 (file)
@@ -362,7 +362,7 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                                iwl_fw_dbg_trigger_check_stop(mvm, mvmsta->vif,
                                                              trig);
                        if (trig_check && rx_status->signal < rssi)
-                               iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL, 0);
+                               iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL);
                }
        }
 
@@ -552,7 +552,7 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
        if (le32_to_cpup((__le32 *) (pkt->data + trig_offset)) < trig_thold)
                return;
 
-       iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL, 0);
+       iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL);
 }
 
 void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
index a75bb150ea275ef18fd6449e8c1d5543fc466bf6..74e1c86289dcbcedc1f5c7b963e468095de7cf25 100644 (file)
@@ -935,6 +935,8 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
 
        cmd->n_channels = (u8)req->n_channels;
 
+       cmd->delay = cpu_to_le32(req->delay);
+
        if (iwl_mvm_scan_pass_all(mvm, req))
                flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
        else
@@ -1177,6 +1179,18 @@ static bool iwl_mvm_find_scan_type(struct iwl_mvm *mvm,
        return false;
 }
 
+static int iwl_mvm_find_first_scan(struct iwl_mvm *mvm,
+                                  enum iwl_umac_scan_uid_type type)
+{
+       int i;
+
+       for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++)
+               if (mvm->scan_uid[i] & type)
+                       return i;
+
+       return i;
+}
+
 static u32 iwl_generate_scan_uid(struct iwl_mvm *mvm,
                                 enum iwl_umac_scan_uid_type type)
 {
@@ -1436,7 +1450,13 @@ int iwl_mvm_sched_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                                cpu_to_le16(req->interval / MSEC_PER_SEC);
        sec_part->schedule[0].iter_count = 0xff;
 
-       sec_part->delay = 0;
+       if (req->delay > U16_MAX) {
+               IWL_DEBUG_SCAN(mvm,
+                              "delay value is > 16-bits, set to max possible\n");
+               sec_part->delay = cpu_to_le16(U16_MAX);
+       } else {
+               sec_part->delay = cpu_to_le16(req->delay);
+       }
 
        iwl_mvm_build_unified_scan_probe(mvm, vif, ies, &sec_part->preq,
                req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
@@ -1613,3 +1633,54 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm)
                mvm->fw->ucode_capa.n_scan_channels +
                sizeof(struct iwl_scan_probe_req);
 }
+
+/*
+ * This function is used in nic restart flow, to inform mac80211 about scans
+ * that was aborted by restart flow or by an assert.
+ */
+void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
+{
+       if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
+               u32 uid, i;
+
+               uid = iwl_mvm_find_first_scan(mvm, IWL_UMAC_SCAN_UID_REG_SCAN);
+               if (uid < IWL_MVM_MAX_SIMULTANEOUS_SCANS) {
+                       ieee80211_scan_completed(mvm->hw, true);
+                       mvm->scan_uid[uid] = 0;
+               }
+               uid = iwl_mvm_find_first_scan(mvm,
+                                             IWL_UMAC_SCAN_UID_SCHED_SCAN);
+               if (uid < IWL_MVM_MAX_SIMULTANEOUS_SCANS && !mvm->restart_fw) {
+                       ieee80211_sched_scan_stopped(mvm->hw);
+                       mvm->scan_uid[uid] = 0;
+               }
+
+               /* We shouldn't have any UIDs still set.  Loop over all the
+                * UIDs to make sure there's nothing left there and warn if
+                * any is found.
+                */
+               for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++) {
+                       if (WARN_ONCE(mvm->scan_uid[i],
+                                     "UMAC scan UID %d was not cleaned\n",
+                                     mvm->scan_uid[i]))
+                               mvm->scan_uid[i] = 0;
+               }
+       } else {
+               switch (mvm->scan_status) {
+               case IWL_MVM_SCAN_NONE:
+                       break;
+               case IWL_MVM_SCAN_OS:
+                       ieee80211_scan_completed(mvm->hw, true);
+                       break;
+               case IWL_MVM_SCAN_SCHED:
+                       /*
+                        * Sched scan will be restarted by mac80211 in
+                        * restart_hw, so do not report if FW is about to be
+                        * restarted.
+                        */
+                       if (!mvm->restart_fw)
+                               ieee80211_sched_scan_stopped(mvm->hw);
+                       break;
+               }
+       }
+}
index 7eb78e2c240a6e7778810869b9c31dc9b1ee2850..b0f59fdd287c787cbb0f65ca319aebfadb850d31 100644 (file)
@@ -99,7 +99,35 @@ static void iwl_mvm_bound_iface_iterator(void *_data, u8 *mac,
 
 /*
  * Aging and idle timeouts for the different possible scenarios
- * in SF_FULL_ON state.
+ * in default configuration
+ */
+static const
+__le32 sf_full_timeout_def[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = {
+       {
+               cpu_to_le32(SF_SINGLE_UNICAST_AGING_TIMER_DEF),
+               cpu_to_le32(SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
+       },
+       {
+               cpu_to_le32(SF_AGG_UNICAST_AGING_TIMER_DEF),
+               cpu_to_le32(SF_AGG_UNICAST_IDLE_TIMER_DEF)
+       },
+       {
+               cpu_to_le32(SF_MCAST_AGING_TIMER_DEF),
+               cpu_to_le32(SF_MCAST_IDLE_TIMER_DEF)
+       },
+       {
+               cpu_to_le32(SF_BA_AGING_TIMER_DEF),
+               cpu_to_le32(SF_BA_IDLE_TIMER_DEF)
+       },
+       {
+               cpu_to_le32(SF_TX_RE_AGING_TIMER_DEF),
+               cpu_to_le32(SF_TX_RE_IDLE_TIMER_DEF)
+       },
+};
+
+/*
+ * Aging and idle timeouts for the different possible scenarios
+ * in single BSS MAC configuration.
  */
 static const __le32 sf_full_timeout[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = {
        {
@@ -124,7 +152,8 @@ static const __le32 sf_full_timeout[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = {
        },
 };
 
-static void iwl_mvm_fill_sf_command(struct iwl_sf_cfg_cmd *sf_cmd,
+static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm,
+                                   struct iwl_sf_cfg_cmd *sf_cmd,
                                    struct ieee80211_sta *sta)
 {
        int i, j, watermark;
@@ -163,24 +192,38 @@ static void iwl_mvm_fill_sf_command(struct iwl_sf_cfg_cmd *sf_cmd,
                                        cpu_to_le32(SF_LONG_DELAY_AGING_TIMER);
                }
        }
-       BUILD_BUG_ON(sizeof(sf_full_timeout) !=
-                    sizeof(__le32) * SF_NUM_SCENARIO * SF_NUM_TIMEOUT_TYPES);
 
-       memcpy(sf_cmd->full_on_timeouts, sf_full_timeout,
-              sizeof(sf_full_timeout));
+       if (sta || IWL_UCODE_API(mvm->fw->ucode_ver) < 13) {
+               BUILD_BUG_ON(sizeof(sf_full_timeout) !=
+                            sizeof(__le32) * SF_NUM_SCENARIO *
+                            SF_NUM_TIMEOUT_TYPES);
+
+               memcpy(sf_cmd->full_on_timeouts, sf_full_timeout,
+                      sizeof(sf_full_timeout));
+       } else {
+               BUILD_BUG_ON(sizeof(sf_full_timeout_def) !=
+                            sizeof(__le32) * SF_NUM_SCENARIO *
+                            SF_NUM_TIMEOUT_TYPES);
+
+               memcpy(sf_cmd->full_on_timeouts, sf_full_timeout_def,
+                      sizeof(sf_full_timeout_def));
+       }
+
 }
 
 static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
                             enum iwl_sf_state new_state)
 {
        struct iwl_sf_cfg_cmd sf_cmd = {
-               .state = cpu_to_le32(new_state),
+               .state = cpu_to_le32(SF_FULL_ON),
        };
        struct ieee80211_sta *sta;
        int ret = 0;
 
-       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF &&
-           mvm->cfg->disable_dummy_notification)
+       if (IWL_UCODE_API(mvm->fw->ucode_ver) < 13)
+               sf_cmd.state = cpu_to_le32(new_state);
+
+       if (mvm->cfg->disable_dummy_notification)
                sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF);
 
        /*
@@ -192,6 +235,8 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
 
        switch (new_state) {
        case SF_UNINIT:
+               if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 13)
+                       iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
                break;
        case SF_FULL_ON:
                if (sta_id == IWL_MVM_STATION_COUNT) {
@@ -206,11 +251,11 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
                        rcu_read_unlock();
                        return -EINVAL;
                }
-               iwl_mvm_fill_sf_command(&sf_cmd, sta);
+               iwl_mvm_fill_sf_command(mvm, &sf_cmd, sta);
                rcu_read_unlock();
                break;
        case SF_INIT_OFF:
-               iwl_mvm_fill_sf_command(&sf_cmd, NULL);
+               iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
                break;
        default:
                WARN_ONCE(1, "Invalid state: %d. not sending Smart Fifo cmd\n",
index 5c23cddaaae34ea26a4722f9dbdc192e3e218ab5..1845b79487c81b446e0432bc385e9fc12884ff68 100644 (file)
@@ -209,9 +209,8 @@ static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
 {
        unsigned long used_hw_queues;
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-       unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
-                                       mvm->cfg->base_params->wd_timeout :
-                                       IWL_WATCHDOG_DISABLED;
+       unsigned int wdg_timeout =
+               iwl_mvm_get_wd_timeout(mvm, NULL, true, false);
        u32 ac;
 
        lockdep_assert_held(&mvm->mutex);
@@ -273,7 +272,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
        else
                sta_id = mvm_sta->sta_id;
 
-       if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT))
+       if (sta_id == IWL_MVM_STATION_COUNT)
                return -ENOSPC;
 
        spin_lock_init(&mvm_sta->lock);
@@ -491,8 +490,18 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
 
        if (vif->type == NL80211_IFTYPE_STATION &&
            mvmvif->ap_sta_id == mvm_sta->sta_id) {
+               ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
+               if (ret)
+                       return ret;
                /* flush its queues here since we are freeing mvm_sta */
                ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, true);
+               if (ret)
+                       return ret;
+               ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
+                                                   mvm_sta->tfd_queue_msk);
+               if (ret)
+                       return ret;
+               ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
 
                /* if we are associated - we can't remove the AP STA now */
                if (vif->bss_conf.assoc)
@@ -971,9 +980,8 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 {
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
        struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
-       unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
-                                       mvm->cfg->base_params->wd_timeout :
-                                       IWL_WATCHDOG_DISABLED;
+       unsigned int wdg_timeout =
+               iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
        int queue, fifo, ret;
        u16 ssn;
 
@@ -1120,8 +1128,12 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        spin_unlock_bh(&mvmsta->lock);
 
        if (old_state >= IWL_AGG_ON) {
+               iwl_mvm_drain_sta(mvm, mvmsta, true);
                if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
                        IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
+               iwl_trans_wait_tx_queue_empty(mvm->trans,
+                                             mvmsta->tfd_queue_msk);
+               iwl_mvm_drain_sta(mvm, mvmsta, false);
 
                iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
 
@@ -1681,9 +1693,6 @@ void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
        };
        int ret;
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_DISABLE_STA_TX))
-               return;
-
        ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
        if (ret)
                IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
@@ -1705,8 +1714,8 @@ void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
        mvm_sta->disable_tx = disable;
 
        /*
-        * Tell mac80211 to start/stop queueing tx for this station,
-        * but don't stop queueing if there are still pending frames
+        * Tell mac80211 to start/stop queuing tx for this station,
+        * but don't stop queuing if there are still pending frames
         * for this station.
         */
        if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id]))
index d8f48975ad087db1fad57b9f764fcb137416e2d0..748f5dc3f9f4337952efc84fe93e5bfb89f97c81 100644 (file)
@@ -150,7 +150,7 @@ struct iwl_mvm_vif;
  * DOC: station table - AP Station in STA mode
  *
  * %iwl_mvm_vif includes the index of the AP station in the fw's STA table:
- * %ap_sta_id. To get the point to the coresponsding %ieee80211_sta,
+ * %ap_sta_id. To get the point to the corresponding %ieee80211_sta,
  * &fw_id_to_mac_id can be used. Due to the way the fw works, we must not remove
  * the AP station from the fw before setting the MAC context as unassociated.
  * Hence, %fw_id_to_mac_id[%ap_sta_id] will be NULLed when the AP station is
@@ -209,14 +209,14 @@ struct iwl_mvm_vif;
  * When a trigger frame is received, mac80211 tells the driver to send frames
  * from the AMPDU queues or sends frames to non-aggregation queues itself,
  * depending on which ACs are delivery-enabled and what TID has frames to
- * transmit. Note that mac80211 has all the knowledege since all the non-agg
+ * transmit. Note that mac80211 has all the knowledge since all the non-agg
  * frames are buffered / filtered, and the driver tells mac80211 about agg
  * frames). The driver needs to tell the fw to let frames out even if the
  * station is asleep. This is done by %iwl_mvm_sta_modify_sleep_tx_count.
  *
  * When we receive a frame from that station with PM bit unset, the driver
  * needs to let the fw know that this station isn't asleep any more. This is
- * done by %iwl_mvm_sta_modify_ps_wake in response to mac80211 signalling the
+ * done by %iwl_mvm_sta_modify_ps_wake in response to mac80211 signaling the
  * station's wakeup.
  *
  * For a GO, the Service Period might be cut short due to an absence period
index f8d6f306dd76d276b82056c9fb1a74956e78ac3c..fd7b0d36f9a620b8e99dcc7b643806b76f09af4d 100644 (file)
@@ -119,7 +119,7 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
 
        /*
         * Flush the offchannel queue -- this is called when the time
-        * event finishes or is cancelled, so that frames queued for it
+        * event finishes or is canceled, so that frames queued for it
         * won't get stuck on the queue and be transmitted in the next
         * time event.
         * We have to send the command asynchronously since this cannot
@@ -187,7 +187,8 @@ static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
                return false;
        if (errmsg)
                IWL_ERR(mvm, "%s\n", errmsg);
-       ieee80211_connection_loss(vif);
+
+       iwl_mvm_connection_loss(mvm, vif, errmsg);
        return true;
 }
 
@@ -196,17 +197,24 @@ iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
                             struct iwl_mvm_time_event_data *te_data,
                             struct iwl_time_event_notif *notif)
 {
-       if (!le32_to_cpu(notif->status)) {
+       struct ieee80211_vif *vif = te_data->vif;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       if (!notif->status)
                IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
-               iwl_mvm_te_clear_data(mvm, te_data);
-               return;
-       }
 
        switch (te_data->vif->type) {
        case NL80211_IFTYPE_AP:
+               if (!notif->status)
+                       mvmvif->csa_failed = true;
                iwl_mvm_csa_noa_start(mvm);
                break;
        case NL80211_IFTYPE_STATION:
+               if (!notif->status) {
+                       iwl_mvm_connection_loss(mvm, vif,
+                                               "CSA TE failed to start");
+                       break;
+               }
                iwl_mvm_csa_client_absent(mvm, te_data->vif);
                ieee80211_chswitch_done(te_data->vif, true);
                break;
@@ -220,6 +228,44 @@ iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
        iwl_mvm_te_clear_data(mvm, te_data);
 }
 
+static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
+                                    struct iwl_time_event_notif *notif,
+                                    struct iwl_mvm_time_event_data *te_data)
+{
+       struct iwl_fw_dbg_trigger_tlv *trig;
+       struct iwl_fw_dbg_trigger_time_event *te_trig;
+       int i;
+
+       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT))
+               return;
+
+       trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT);
+       te_trig = (void *)trig->data;
+
+       if (!iwl_fw_dbg_trigger_check_stop(mvm, te_data->vif, trig))
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) {
+               u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id);
+               u32 trig_action_bitmap =
+                       le32_to_cpu(te_trig->time_events[i].action_bitmap);
+               u32 trig_status_bitmap =
+                       le32_to_cpu(te_trig->time_events[i].status_bitmap);
+
+               if (trig_te_id != te_data->id ||
+                   !(trig_action_bitmap & le32_to_cpu(notif->action)) ||
+                   !(trig_status_bitmap & BIT(le32_to_cpu(notif->status))))
+                       continue;
+
+               iwl_mvm_fw_dbg_collect_trig(mvm, trig,
+                                           "Time event %d Action 0x%x received status: %d",
+                                           te_data->id,
+                                           le32_to_cpu(notif->action),
+                                           le32_to_cpu(notif->status));
+               break;
+       }
+}
+
 /*
  * Handles a FW notification for an event that is known to the driver.
  *
@@ -237,6 +283,8 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
                     le32_to_cpu(notif->unique_id),
                     le32_to_cpu(notif->action));
 
+       iwl_mvm_te_check_trigger(mvm, notif, te_data);
+
        /*
         * The FW sends the start/end time event notifications even for events
         * that it fails to schedule. This is indicated in the status field of
@@ -246,11 +294,16 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
         * events in the system).
         */
        if (!le32_to_cpu(notif->status)) {
-               bool start = le32_to_cpu(notif->action) &
-                               TE_V2_NOTIF_HOST_EVENT_START;
-               IWL_WARN(mvm, "Time Event %s notification failure\n",
-                        start ? "start" : "end");
-               if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, NULL)) {
+               const char *msg;
+
+               if (notif->action & cpu_to_le32(TE_V2_NOTIF_HOST_EVENT_START))
+                       msg = "Time Event start notification failure";
+               else
+                       msg = "Time Event end notification failure";
+
+               IWL_DEBUG_TE(mvm, "%s\n", msg);
+
+               if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, msg)) {
                        iwl_mvm_te_clear_data(mvm, te_data);
                        return;
                }
@@ -261,17 +314,23 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
                             "TE ended - current time %lu, estimated end %lu\n",
                             jiffies, te_data->end_jiffies);
 
-               if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+               switch (te_data->vif->type) {
+               case NL80211_IFTYPE_P2P_DEVICE:
                        ieee80211_remain_on_channel_expired(mvm->hw);
                        iwl_mvm_roc_finished(mvm);
+                       break;
+               case NL80211_IFTYPE_STATION:
+                       /*
+                        * By now, we should have finished association
+                        * and know the dtim period.
+                        */
+                       iwl_mvm_te_check_disconnect(mvm, te_data->vif,
+                               "No association and the time event is over already...");
+                       break;
+               default:
+                       break;
                }
 
-               /*
-                * By now, we should have finished association
-                * and know the dtim period.
-                */
-               iwl_mvm_te_check_disconnect(mvm, te_data->vif,
-                       "No association and the time event is over already...");
                iwl_mvm_te_clear_data(mvm, te_data);
        } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
                te_data->running = true;
@@ -307,6 +366,8 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
        if (!aux_roc_te) /* Not a Aux ROC time event */
                return -EINVAL;
 
+       iwl_mvm_te_check_trigger(mvm, notif, te_data);
+
        if (!le32_to_cpu(notif->status)) {
                IWL_DEBUG_TE(mvm,
                             "ERROR: Aux ROC Time Event %s notification failure\n",
@@ -761,7 +822,7 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
         * Iterate over the list of aux roc time events and find the time
         * event that is associated with a BSS interface.
         * This assumes that a BSS interface can have only a single time
-        * event at any given time and this time event coresponds to a ROC
+        * event at any given time and this time event corresponds to a ROC
         * request
         */
        list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) {
index 6f6b35db3ab8eb9b71c8bf178d7227254340a41a..de4fbc6d57f150130e095fbec5471e9149d951bf 100644 (file)
@@ -147,7 +147,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
  * @vif: the virtual interface for which the session is issued
  *
  * This functions cancels the session protection which is an act of good
- * citizenship. If it is not needed any more it should be cancelled because
+ * citizenship. If it is not needed any more it should be canceled because
  * the other bindings wait for the medium during that time.
  * This funtions doesn't sleep.
  */
@@ -162,7 +162,7 @@ int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
                                struct iwl_device_cmd *cmd);
 
 /**
- * iwl_mvm_start_p2p_roc - start remain on channel for p2p device functionlity
+ * iwl_mvm_start_p2p_roc - start remain on channel for p2p device functionality
  * @mvm: the mvm component
  * @vif: the virtual interface for which the roc is requested. It is assumed
  * that the vif type is NL80211_IFTYPE_P2P_DEVICE
index 7906b97c81b96d357270d5502d92e3bcb17c6f73..ef32e177f662b3ba03772e02b1c9b9e512bcd64f 100644 (file)
@@ -953,8 +953,10 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
        mvmsta = iwl_mvm_sta_from_mac80211(sta);
        tid_data = &mvmsta->tid_data[tid];
 
-       if (WARN_ONCE(tid_data->txq_id != scd_flow, "Q %d, tid %d, flow %d",
-                     tid_data->txq_id, tid, scd_flow)) {
+       if (tid_data->txq_id != scd_flow) {
+               IWL_ERR(mvm,
+                       "invalid BA notification: Q %d, tid %d, flow %d\n",
+                       tid_data->txq_id, tid, scd_flow);
                rcu_read_unlock();
                return 0;
        }
@@ -1047,6 +1049,14 @@ out:
        return 0;
 }
 
+/*
+ * Note that there are transports that buffer frames before they reach
+ * the firmware. This means that after flush_tx_path is called, the
+ * queue might not be empty. The race-free way to handle this is to:
+ * 1) set the station as draining
+ * 2) flush the Tx path
+ * 3) wait for the transport queues to be empty
+ */
 int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync)
 {
        int ret;
index 2b9de63951e609463a5aa65b289e7908b1f163a2..bc55a8b82db6d88ad42ef1151e0c5c53c2cbf64c 100644 (file)
@@ -122,7 +122,7 @@ int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id,
 }
 
 /*
- * We assume that the caller set the status to the sucess value
+ * We assume that the caller set the status to the success value
  */
 int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
                            u32 *status)
@@ -737,7 +737,7 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
 }
 
 /**
- * iwl_mvm_update_smps - Get a requst to change the SMPS mode
+ * iwl_mvm_update_smps - Get a request to change the SMPS mode
  * @req_type: The part of the driver who call for a change.
  * @smps_requests: The request to change the SMPS mode.
  *
@@ -857,7 +857,7 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
        mvmvif->low_latency = value;
 
-       res = iwl_mvm_update_quotas(mvm, NULL);
+       res = iwl_mvm_update_quotas(mvm, false, NULL);
        if (res)
                return res;
 
@@ -921,3 +921,71 @@ struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm)
 
        return bss_iter_data.vif;
 }
+
+unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
+                                   struct ieee80211_vif *vif,
+                                   bool tdls, bool cmd_q)
+{
+       struct iwl_fw_dbg_trigger_tlv *trigger;
+       struct iwl_fw_dbg_trigger_txq_timer *txq_timer;
+       unsigned int default_timeout =
+               cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout;
+
+       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS))
+               return iwlmvm_mod_params.tfd_q_hang_detect ?
+                       default_timeout : IWL_WATCHDOG_DISABLED;
+
+       trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS);
+       txq_timer = (void *)trigger->data;
+
+       if (tdls)
+               return le32_to_cpu(txq_timer->tdls);
+
+       if (cmd_q)
+               return le32_to_cpu(txq_timer->command_queue);
+
+       if (WARN_ON(!vif))
+               return default_timeout;
+
+       switch (ieee80211_vif_type_p2p(vif)) {
+       case NL80211_IFTYPE_ADHOC:
+               return le32_to_cpu(txq_timer->ibss);
+       case NL80211_IFTYPE_STATION:
+               return le32_to_cpu(txq_timer->bss);
+       case NL80211_IFTYPE_AP:
+               return le32_to_cpu(txq_timer->softap);
+       case NL80211_IFTYPE_P2P_CLIENT:
+               return le32_to_cpu(txq_timer->p2p_client);
+       case NL80211_IFTYPE_P2P_GO:
+               return le32_to_cpu(txq_timer->p2p_go);
+       case NL80211_IFTYPE_P2P_DEVICE:
+               return le32_to_cpu(txq_timer->p2p_device);
+       default:
+               WARN_ON(1);
+               return mvm->cfg->base_params->wd_timeout;
+       }
+}
+
+void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                            const char *errmsg)
+{
+       struct iwl_fw_dbg_trigger_tlv *trig;
+       struct iwl_fw_dbg_trigger_mlme *trig_mlme;
+
+       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
+               goto out;
+
+       trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
+       trig_mlme = (void *)trig->data;
+       if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
+               goto out;
+
+       if (trig_mlme->stop_connection_loss &&
+           --trig_mlme->stop_connection_loss)
+               goto out;
+
+       iwl_mvm_fw_dbg_collect_trig(mvm, trig, "%s", errmsg);
+
+out:
+       ieee80211_connection_loss(vif);
+}
index dbd6bcf5220563c03727bccde83083032b977ff8..b1856973492237dcbee2ebf640ebeb0bf593fecb 100644 (file)
@@ -368,10 +368,12 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
 /* 3165 Series */
        {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)},
 
 /* 7265 Series */
        {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
@@ -413,10 +415,35 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
 
 /* 8000 Series */
        {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)},
        {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)},
 #endif /* CONFIG_IWLMVM */
 
        {0}
index cae0eb8835ceae9d2c11190041417482a5b6f436..01996c9d98a79b1d62e3a665cd0c720df79ad04e 100644 (file)
@@ -217,6 +217,8 @@ struct iwl_pcie_txq_scratch_buf {
  * @active: stores if queue is active
  * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
  * @wd_timeout: queue watchdog timeout (jiffies) - per queue
+ * @frozen: tx stuck queue timer is frozen
+ * @frozen_expiry_remainder: remember how long until the timer fires
  *
  * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
  * descriptors) and required locking structures.
@@ -228,9 +230,11 @@ struct iwl_txq {
        dma_addr_t scratchbufs_dma;
        struct iwl_pcie_txq_entry *entries;
        spinlock_t lock;
+       unsigned long frozen_expiry_remainder;
        struct timer_list stuck_timer;
        struct iwl_trans_pcie *trans_pcie;
        bool need_update;
+       bool frozen;
        u8 active;
        bool ampdu;
        unsigned long wd_timeout;
index 7b7e2f223fb230fad922e7fcaec08ec8ae7eb5ac..7ff69c642103f1febeea67d9c082a035039b4121 100644 (file)
@@ -600,9 +600,11 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
                        break;
 
-               IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n",
-                       rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd),
-                       pkt->hdr.cmd);
+               IWL_DEBUG_RX(trans,
+                            "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n",
+                            rxcb._offset,
+                            get_cmd_string(trans_pcie, pkt->hdr.cmd),
+                            pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence));
 
                len = iwl_rx_packet_len(pkt);
                len += sizeof(u32); /* account for status word */
index f31a941607719053e2e18bcacb24f3d5b6bd06ce..2de8fbfe4edf4d6c6997307fb91052177fd7e6e4 100644 (file)
@@ -682,10 +682,51 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
        return ret;
 }
 
-static int iwl_pcie_load_cpu_sections_8000b(struct iwl_trans *trans,
-                                           const struct fw_img *image,
-                                           int cpu,
-                                           int *first_ucode_section)
+/*
+ * Driver Takes the ownership on secure machine before FW load
+ * and prevent race with the BT load.
+ * W/A for ROM bug. (should be remove in the next Si step)
+ */
+static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
+{
+       u32 val, loop = 1000;
+
+       /*
+        * Check the RSA semaphore is accessible.
+        * If the HW isn't locked and the rsa semaphore isn't accessible,
+        * we are in trouble.
+        */
+       val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
+       if (val & (BIT(1) | BIT(17))) {
+               IWL_INFO(trans,
+                        "can't access the RSA semaphore it is write protected\n");
+               return 0;
+       }
+
+       /* take ownership on the AUX IF */
+       iwl_write_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK);
+       iwl_write_prph(trans, AUX_MISC_MASTER1_EN, AUX_MISC_MASTER1_EN_SBE_MSK);
+
+       do {
+               iwl_write_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS, 0x1);
+               val = iwl_read_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS);
+               if (val == 0x1) {
+                       iwl_write_prph(trans, RSA_ENABLE, 0);
+                       return 0;
+               }
+
+               udelay(10);
+               loop--;
+       } while (loop > 0);
+
+       IWL_ERR(trans, "Failed to take ownership on secure machine\n");
+       return -EIO;
+}
+
+static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
+                                          const struct fw_img *image,
+                                          int cpu,
+                                          int *first_ucode_section)
 {
        int shift_param;
        int i, ret = 0, sec_num = 0x1;
@@ -880,20 +921,16 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
        }
 
        /* release CPU reset */
-       if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
-               iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
-       else
-               iwl_write32(trans, CSR_RESET, 0);
+       iwl_write32(trans, CSR_RESET, 0);
 
        return 0;
 }
 
-static int iwl_pcie_load_given_ucode_8000b(struct iwl_trans *trans,
-                                          const struct fw_img *image)
+static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
+                                         const struct fw_img *image)
 {
        int ret = 0;
        int first_ucode_section;
-       u32 reg;
 
        IWL_DEBUG_FW(trans, "working with %s CPU\n",
                     image->is_dual_cpus ? "Dual" : "Single");
@@ -901,43 +938,33 @@ static int iwl_pcie_load_given_ucode_8000b(struct iwl_trans *trans,
        if (trans->dbg_dest_tlv)
                iwl_pcie_apply_destination(trans);
 
+       /* TODO: remove in the next Si step */
+       ret = iwl_pcie_rsa_race_bug_wa(trans);
+       if (ret)
+               return ret;
+
        /* configure the ucode to be ready to get the secured image */
        /* release CPU reset */
        iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
 
        /* load to FW the binary Secured sections of CPU1 */
-       ret = iwl_pcie_load_cpu_sections_8000b(trans, image, 1,
-                                              &first_ucode_section);
+       ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1,
+                                             &first_ucode_section);
        if (ret)
                return ret;
 
        /* load to FW the binary sections of CPU2 */
-       ret = iwl_pcie_load_cpu_sections_8000b(trans, image, 2,
-                                              &first_ucode_section);
+       ret = iwl_pcie_load_cpu_sections_8000(trans, image, 2,
+                                             &first_ucode_section);
        if (ret)
                return ret;
 
-       /* wait for image verification to complete  */
-       ret = iwl_poll_prph_bit(trans, LMPM_SECURE_BOOT_CPU1_STATUS_ADDR_B0,
-                               LMPM_SECURE_BOOT_STATUS_SUCCESS,
-                               LMPM_SECURE_BOOT_STATUS_SUCCESS,
-                               LMPM_SECURE_TIME_OUT);
-       if (ret < 0) {
-               reg = iwl_read_prph(trans,
-                                   LMPM_SECURE_BOOT_CPU1_STATUS_ADDR_B0);
-
-               IWL_ERR(trans, "Timeout on secure boot process, reg = %x\n",
-                       reg);
-               return ret;
-       }
-
        return 0;
 }
 
 static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
                                   const struct fw_img *fw, bool run_in_rfkill)
 {
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        int ret;
        bool hw_rfkill;
 
@@ -967,9 +994,6 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
                return ret;
        }
 
-       /* init ref_count to 1 (should be cleared when ucode is loaded) */
-       trans_pcie->ref_count = 1;
-
        /* make sure rfkill handshake bits are cleared */
        iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
        iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
@@ -984,9 +1008,8 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
        iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
 
        /* Load the given image to the HW */
-       if ((trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) &&
-           (CSR_HW_REV_STEP(trans->hw_rev) != SILICON_A_STEP))
-               return iwl_pcie_load_given_ucode_8000b(trans, fw);
+       if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+               return iwl_pcie_load_given_ucode_8000(trans, fw);
        else
                return iwl_pcie_load_given_ucode(trans, fw);
 }
@@ -1288,6 +1311,9 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
        trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
        trans_pcie->scd_set_active = trans_cfg->scd_set_active;
 
+       /* init ref_count to 1 (should be cleared when ucode is loaded) */
+       trans_pcie->ref_count = 1;
+
        /* Initialize NAPI here - it should be before registering to mac80211
         * in the opmode but after the HW struct is allocated.
         * As this function may be called again in some corner cases don't
@@ -1462,6 +1488,60 @@ static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
        return ret;
 }
 
+static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
+                                           unsigned long txqs,
+                                           bool freeze)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       int queue;
+
+       for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
+               struct iwl_txq *txq = &trans_pcie->txq[queue];
+               unsigned long now;
+
+               spin_lock_bh(&txq->lock);
+
+               now = jiffies;
+
+               if (txq->frozen == freeze)
+                       goto next_queue;
+
+               IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
+                                   freeze ? "Freezing" : "Waking", queue);
+
+               txq->frozen = freeze;
+
+               if (txq->q.read_ptr == txq->q.write_ptr)
+                       goto next_queue;
+
+               if (freeze) {
+                       if (unlikely(time_after(now,
+                                               txq->stuck_timer.expires))) {
+                               /*
+                                * The timer should have fired, maybe it is
+                                * spinning right now on the lock.
+                                */
+                               goto next_queue;
+                       }
+                       /* remember how long until the timer fires */
+                       txq->frozen_expiry_remainder =
+                               txq->stuck_timer.expires - now;
+                       del_timer(&txq->stuck_timer);
+                       goto next_queue;
+               }
+
+               /*
+                * Wake a non-empty queue -> arm timer with the
+                * remainder before it froze
+                */
+               mod_timer(&txq->stuck_timer,
+                         now + txq->frozen_expiry_remainder);
+
+next_queue:
+               spin_unlock_bh(&txq->lock);
+       }
+}
+
 #define IWL_FLUSH_WAIT_MS      2000
 
 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
@@ -1713,7 +1793,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
        int ret;
        size_t bufsz;
 
-       bufsz = sizeof(char) * 64 * trans->cfg->base_params->num_of_queues;
+       bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues;
 
        if (!trans_pcie->txq)
                return -EAGAIN;
@@ -1726,11 +1806,11 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
                txq = &trans_pcie->txq[cnt];
                q = &txq->q;
                pos += scnprintf(buf + pos, bufsz - pos,
-                               "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d%s\n",
+                               "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
                                cnt, q->read_ptr, q->write_ptr,
                                !!test_bit(cnt, trans_pcie->queue_used),
                                 !!test_bit(cnt, trans_pcie->queue_stopped),
-                                txq->need_update,
+                                txq->need_update, txq->frozen,
                                 (cnt == trans_pcie->cmd_queue ? " HCMD" : ""));
        }
        ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
@@ -1961,24 +2041,25 @@ static const struct {
        { .start = 0x00a01c7c, .end = 0x00a01c7c },
        { .start = 0x00a01c28, .end = 0x00a01c54 },
        { .start = 0x00a01c5c, .end = 0x00a01c5c },
-       { .start = 0x00a01c84, .end = 0x00a01c84 },
+       { .start = 0x00a01c60, .end = 0x00a01cdc },
        { .start = 0x00a01ce0, .end = 0x00a01d0c },
        { .start = 0x00a01d18, .end = 0x00a01d20 },
        { .start = 0x00a01d2c, .end = 0x00a01d30 },
        { .start = 0x00a01d40, .end = 0x00a01d5c },
        { .start = 0x00a01d80, .end = 0x00a01d80 },
-       { .start = 0x00a01d98, .end = 0x00a01d98 },
+       { .start = 0x00a01d98, .end = 0x00a01d9c },
+       { .start = 0x00a01da8, .end = 0x00a01da8 },
+       { .start = 0x00a01db8, .end = 0x00a01df4 },
        { .start = 0x00a01dc0, .end = 0x00a01dfc },
        { .start = 0x00a01e00, .end = 0x00a01e2c },
        { .start = 0x00a01e40, .end = 0x00a01e60 },
+       { .start = 0x00a01e68, .end = 0x00a01e6c },
+       { .start = 0x00a01e74, .end = 0x00a01e74 },
        { .start = 0x00a01e84, .end = 0x00a01e90 },
        { .start = 0x00a01e9c, .end = 0x00a01ec4 },
-       { .start = 0x00a01ed0, .end = 0x00a01ed0 },
-       { .start = 0x00a01f00, .end = 0x00a01f14 },
-       { .start = 0x00a01f44, .end = 0x00a01f58 },
-       { .start = 0x00a01f80, .end = 0x00a01fa8 },
-       { .start = 0x00a01fb0, .end = 0x00a01fbc },
-       { .start = 0x00a01ff8, .end = 0x00a01ffc },
+       { .start = 0x00a01ed0, .end = 0x00a01ee0 },
+       { .start = 0x00a01f00, .end = 0x00a01f1c },
+       { .start = 0x00a01f44, .end = 0x00a01ffc },
        { .start = 0x00a02000, .end = 0x00a02048 },
        { .start = 0x00a02068, .end = 0x00a020f0 },
        { .start = 0x00a02100, .end = 0x00a02118 },
@@ -2305,6 +2386,7 @@ static const struct iwl_trans_ops trans_ops_pcie = {
        .dbgfs_register = iwl_trans_pcie_dbgfs_register,
 
        .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
+       .freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
 
        .write8 = iwl_trans_pcie_write8,
        .write32 = iwl_trans_pcie_write32,
@@ -2423,10 +2505,45 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
         * "dash" value). To keep hw_rev backwards compatible - we'll store it
         * in the old format.
         */
-       if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+       if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
+               unsigned long flags;
+               int ret;
+
                trans->hw_rev = (trans->hw_rev & 0xfff0) |
                                (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
 
+               /*
+                * in-order to recognize C step driver should read chip version
+                * id located at the AUX bus MISC address space.
+                */
+               iwl_set_bit(trans, CSR_GP_CNTRL,
+                           CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+               udelay(2);
+
+               ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+                                  CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+                                  CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+                                  25000);
+               if (ret < 0) {
+                       IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n");
+                       goto out_pci_disable_msi;
+               }
+
+               if (iwl_trans_grab_nic_access(trans, false, &flags)) {
+                       u32 hw_step;
+
+                       hw_step = __iwl_read_prph(trans, WFPM_CTRL_REG);
+                       hw_step |= ENABLE_WFPM;
+                       __iwl_write_prph(trans, WFPM_CTRL_REG, hw_step);
+                       hw_step = __iwl_read_prph(trans, AUX_MISC_REG);
+                       hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF;
+                       if (hw_step == 0x3)
+                               trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) |
+                                               (SILICON_C_STEP << 2);
+                       iwl_trans_release_nic_access(trans, &flags);
+               }
+       }
+
        trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
        snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
                 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
index af0bce736358dc2a6d12ed5c922f6349ce95f7b2..06952aadfd7b5d4dccfff9f9689cd804031ab0eb 100644 (file)
@@ -725,33 +725,50 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
        iwl_pcie_tx_start(trans, 0);
 }
 
+static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       unsigned long flags;
+       int ch, ret;
+       u32 mask = 0;
+
+       spin_lock(&trans_pcie->irq_lock);
+
+       if (!iwl_trans_grab_nic_access(trans, false, &flags))
+               goto out;
+
+       /* Stop each Tx DMA channel */
+       for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
+               iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
+               mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch);
+       }
+
+       /* Wait for DMA channels to be idle */
+       ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000);
+       if (ret < 0)
+               IWL_ERR(trans,
+                       "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
+                       ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG));
+
+       iwl_trans_release_nic_access(trans, &flags);
+
+out:
+       spin_unlock(&trans_pcie->irq_lock);
+}
+
 /*
  * iwl_pcie_tx_stop - Stop all Tx DMA channels
  */
 int iwl_pcie_tx_stop(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       int ch, txq_id, ret;
+       int txq_id;
 
        /* Turn off all Tx DMA fifos */
-       spin_lock(&trans_pcie->irq_lock);
-
        iwl_scd_deactivate_fifos(trans);
 
-       /* Stop each Tx DMA channel, and wait for it to be idle */
-       for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
-               iwl_write_direct32(trans,
-                                  FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
-               ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
-                       FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
-               if (ret < 0)
-                       IWL_ERR(trans,
-                               "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
-                               ch,
-                               iwl_read_direct32(trans,
-                                                 FH_TSSR_TX_STATUS_REG));
-       }
-       spin_unlock(&trans_pcie->irq_lock);
+       /* Turn off all Tx DMA channels */
+       iwl_pcie_tx_stop_fh(trans);
 
        /*
         * This function can be called before the op_mode disabled the
@@ -912,9 +929,18 @@ error:
 
 static inline void iwl_pcie_txq_progress(struct iwl_txq *txq)
 {
+       lockdep_assert_held(&txq->lock);
+
        if (!txq->wd_timeout)
                return;
 
+       /*
+        * station is asleep and we send data - that must
+        * be uAPSD or PS-Poll. Don't rearm the timer.
+        */
+       if (txq->frozen)
+               return;
+
        /*
         * if empty delete timer, otherwise move timer forward
         * since we're making progress on this queue
@@ -1248,6 +1274,9 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
                        SCD_TX_STTS_QUEUE_OFFSET(txq_id);
        static const u32 zero_val[4] = {};
 
+       trans_pcie->txq[txq_id].frozen_expiry_remainder = 0;
+       trans_pcie->txq[txq_id].frozen = false;
+
        /*
         * Upon HW Rfkill - we stop the device, and then stop the queues
         * in the op_mode. Just for the sake of the simplicity of the op_mode,
index d576dd6665d38d182053a6a7622ff1ddd0b41926..1a20cee5febea93aa350e1ff401e88e04aa2c4b7 100644 (file)
@@ -365,7 +365,6 @@ static int if_usb_reset_device(struct if_usb_card *cardp)
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(if_usb_reset_device);
 
 /**
  *  usb_tx_block - transfer data to the device
@@ -907,7 +906,6 @@ restart:
        lbtf_deb_leave_args(LBTF_DEB_USB, "ret %d", ret);
        return ret;
 }
-EXPORT_SYMBOL_GPL(if_usb_prog_firmware);
 
 
 #define if_usb_suspend NULL
index 543148d27b01cb659dfc65dfe5c3a977c9b15b9d..433bd6837c79042b3a5e7daa2bb20f594fd7dd28 100644 (file)
@@ -159,6 +159,7 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
        int tid;
        struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &resp->params.add_ba_rsp;
        struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl;
+       struct mwifiex_ra_list_tbl *ra_list;
        u16 block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
 
        add_ba_rsp->ssn = cpu_to_le16((le16_to_cpu(add_ba_rsp->ssn))
@@ -166,7 +167,13 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
 
        tid = (block_ack_param_set & IEEE80211_ADDBA_PARAM_TID_MASK)
               >> BLOCKACKPARAM_TID_POS;
+       ra_list = mwifiex_wmm_get_ralist_node(priv, tid, add_ba_rsp->
+               peer_mac_addr);
        if (le16_to_cpu(add_ba_rsp->status_code) != BA_RESULT_SUCCESS) {
+               if (ra_list) {
+                       ra_list->ba_status = BA_SETUP_NONE;
+                       ra_list->amsdu_in_ampdu = false;
+               }
                mwifiex_del_ba_tbl(priv, tid, add_ba_rsp->peer_mac_addr,
                                   TYPE_DELBA_SENT, true);
                if (add_ba_rsp->add_rsp_result != BA_RESULT_TIMEOUT)
@@ -185,6 +192,10 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
                        tx_ba_tbl->amsdu = true;
                else
                        tx_ba_tbl->amsdu = false;
+               if (ra_list) {
+                       ra_list->amsdu_in_ampdu = tx_ba_tbl->amsdu;
+                       ra_list->ba_status = BA_SETUP_COMPLETE;
+               }
        } else {
                dev_err(priv->adapter->dev, "BA stream not created\n");
        }
@@ -515,6 +526,7 @@ void mwifiex_create_ba_tbl(struct mwifiex_private *priv, u8 *ra, int tid,
                           enum mwifiex_ba_status ba_status)
 {
        struct mwifiex_tx_ba_stream_tbl *new_node;
+       struct mwifiex_ra_list_tbl *ra_list;
        unsigned long flags;
 
        if (!mwifiex_get_ba_tbl(priv, tid, ra)) {
@@ -522,7 +534,11 @@ void mwifiex_create_ba_tbl(struct mwifiex_private *priv, u8 *ra, int tid,
                                   GFP_ATOMIC);
                if (!new_node)
                        return;
-
+               ra_list = mwifiex_wmm_get_ralist_node(priv, tid, ra);
+               if (ra_list) {
+                       ra_list->ba_status = ba_status;
+                       ra_list->amsdu_in_ampdu = false;
+               }
                INIT_LIST_HEAD(&new_node->list);
 
                new_node->tid = tid;
index 8e2e39422ad80edaa303c7a5877651d3ceb92a9f..afdd58aa90deda02793601bbf534e6eb0ad9180d 100644 (file)
@@ -77,22 +77,6 @@ mwifiex_is_station_ampdu_allowed(struct mwifiex_private *priv,
        return (node->ampdu_sta[tid] != BA_STREAM_NOT_ALLOWED) ? true : false;
 }
 
-/* This function checks whether AMSDU is allowed for BA stream. */
-static inline u8
-mwifiex_is_amsdu_in_ampdu_allowed(struct mwifiex_private *priv,
-                                 struct mwifiex_ra_list_tbl *ptr, int tid)
-{
-       struct mwifiex_tx_ba_stream_tbl *tx_tbl;
-
-       if (is_broadcast_ether_addr(ptr->ra))
-               return false;
-       tx_tbl = mwifiex_get_ba_tbl(priv, tid, ptr->ra);
-       if (tx_tbl)
-               return tx_tbl->amsdu;
-
-       return false;
-}
-
 /* This function checks whether AMPDU is allowed or not for a particular TID. */
 static inline u8
 mwifiex_is_ampdu_allowed(struct mwifiex_private *priv,
@@ -181,22 +165,6 @@ mwifiex_find_stream_to_delete(struct mwifiex_private *priv, int ptr_tid,
        return ret;
 }
 
-/*
- * This function checks whether BA stream is set up or not.
- */
-static inline int
-mwifiex_is_ba_stream_setup(struct mwifiex_private *priv,
-                          struct mwifiex_ra_list_tbl *ptr, int tid)
-{
-       struct mwifiex_tx_ba_stream_tbl *tx_tbl;
-
-       tx_tbl = mwifiex_get_ba_tbl(priv, tid, ptr->ra);
-       if (tx_tbl && IS_BASTREAM_SETUP(tx_tbl))
-               return true;
-
-       return false;
-}
-
 /*
  * This function checks whether associated station is 11n enabled
  */
index 9b983b5cebbdf0dbda7ff51596df1e13de759a09..6183e255e62ac380a614cb593cb226d27da5909d 100644 (file)
@@ -170,7 +170,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
        struct mwifiex_adapter *adapter = priv->adapter;
        struct sk_buff *skb_aggr, *skb_src;
        struct mwifiex_txinfo *tx_info_aggr, *tx_info_src;
-       int pad = 0, ret;
+       int pad = 0, aggr_num = 0, ret;
        struct mwifiex_tx_param tx_param;
        struct txpd *ptx_pd = NULL;
        struct timeval tv;
@@ -184,7 +184,8 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
        }
 
        tx_info_src = MWIFIEX_SKB_TXCB(skb_src);
-       skb_aggr = dev_alloc_skb(adapter->tx_buf_size);
+       skb_aggr = mwifiex_alloc_dma_align_buf(adapter->tx_buf_size,
+                                              GFP_ATOMIC | GFP_DMA);
        if (!skb_aggr) {
                dev_err(adapter->dev, "%s: alloc skb_aggr\n", __func__);
                spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
@@ -200,6 +201,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
 
        if (tx_info_src->flags & MWIFIEX_BUF_FLAG_TDLS_PKT)
                tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
+       tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_AGGR_PKT;
        skb_aggr->priority = skb_src->priority;
 
        do_gettimeofday(&tv);
@@ -211,11 +213,9 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
                        break;
 
                skb_src = skb_dequeue(&pra_list->skb_head);
-
                pra_list->total_pkt_count--;
-
                atomic_dec(&priv->wmm.tx_pkts_queued);
-
+               aggr_num++;
                spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
                                       ra_list_flags);
                mwifiex_11n_form_amsdu_pkt(skb_aggr, skb_src, &pad);
@@ -251,6 +251,12 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
                ptx_pd = (struct txpd *)skb_aggr->data;
 
        skb_push(skb_aggr, headroom);
+       tx_info_aggr->aggr_num = aggr_num * 2;
+       if (adapter->data_sent || adapter->tx_lock_flag) {
+               atomic_add(aggr_num * 2, &adapter->tx_queued);
+               skb_queue_tail(&adapter->tx_data_q, skb_aggr);
+               return 0;
+       }
 
        if (adapter->iface_type == MWIFIEX_USB) {
                adapter->data_sent = true;
index a2e8817b56d8d317a59e40f7901f2052384ecef4..f75f8acfaca0332cef494e8146d4e331db683385 100644 (file)
@@ -659,6 +659,7 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
 {
        struct mwifiex_rx_reorder_tbl *tbl;
        struct mwifiex_tx_ba_stream_tbl *ptx_tbl;
+       struct mwifiex_ra_list_tbl *ra_list;
        u8 cleanup_rx_reorder_tbl;
        unsigned long flags;
 
@@ -686,7 +687,11 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
                                "event: TID, RA not found in table\n");
                        return;
                }
-
+               ra_list = mwifiex_wmm_get_ralist_node(priv, tid, peer_mac);
+               if (ra_list) {
+                       ra_list->amsdu_in_ampdu = false;
+                       ra_list->ba_status = BA_SETUP_NONE;
+               }
                spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
                mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, ptx_tbl);
                spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
index 6f8993c12373587e3839e5d35a458a68ce882968..bf9020ff2d33cf1cf9dc2c044f06a403b655e593 100644 (file)
@@ -717,6 +717,9 @@ mwifiex_cfg80211_init_p2p_go(struct mwifiex_private *priv)
 
 static int mwifiex_deinit_priv_params(struct mwifiex_private *priv)
 {
+       struct mwifiex_adapter *adapter = priv->adapter;
+       unsigned long flags;
+
        priv->mgmt_frame_mask = 0;
        if (mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG,
                             HostCmd_ACT_GEN_SET, 0,
@@ -727,6 +730,25 @@ static int mwifiex_deinit_priv_params(struct mwifiex_private *priv)
        }
 
        mwifiex_deauthenticate(priv, NULL);
+
+       spin_lock_irqsave(&adapter->main_proc_lock, flags);
+       adapter->main_locked = true;
+       if (adapter->mwifiex_processing) {
+               spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
+               flush_workqueue(adapter->workqueue);
+       } else {
+               spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
+       }
+
+       spin_lock_irqsave(&adapter->rx_proc_lock, flags);
+       adapter->rx_locked = true;
+       if (adapter->rx_processing) {
+               spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+               flush_workqueue(adapter->rx_workqueue);
+       } else {
+       spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+       }
+
        mwifiex_free_priv(priv);
        priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
        priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
@@ -740,6 +762,9 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv,
                             struct net_device *dev,
                             enum nl80211_iftype type)
 {
+       struct mwifiex_adapter *adapter = priv->adapter;
+       unsigned long flags;
+
        mwifiex_init_priv(priv);
 
        priv->bss_mode = type;
@@ -770,6 +795,14 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv,
                return -EOPNOTSUPP;
        }
 
+       spin_lock_irqsave(&adapter->main_proc_lock, flags);
+       adapter->main_locked = false;
+       spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
+
+       spin_lock_irqsave(&adapter->rx_proc_lock, flags);
+       adapter->rx_locked = false;
+       spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+
        return 0;
 }
 
@@ -2733,24 +2766,71 @@ mwifiex_is_pattern_supported(struct cfg80211_pkt_pattern *pat, s8 *byte_seq,
 }
 
 #ifdef CONFIG_PM
-static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
-                                 struct cfg80211_wowlan *wowlan)
+static void mwifiex_set_auto_arp_mef_entry(struct mwifiex_private *priv,
+                                          struct mwifiex_mef_entry *mef_entry)
+{
+       int i, filt_num = 0, num_ipv4 = 0;
+       struct in_device *in_dev;
+       struct in_ifaddr *ifa;
+       __be32 ips[MWIFIEX_MAX_SUPPORTED_IPADDR];
+       struct mwifiex_adapter *adapter = priv->adapter;
+
+       mef_entry->mode = MEF_MODE_HOST_SLEEP;
+       mef_entry->action = MEF_ACTION_AUTO_ARP;
+
+       /* Enable ARP offload feature */
+       memset(ips, 0, sizeof(ips));
+       for (i = 0; i < MWIFIEX_MAX_BSS_NUM; i++) {
+               if (adapter->priv[i]->netdev) {
+                       in_dev = __in_dev_get_rtnl(adapter->priv[i]->netdev);
+                       if (!in_dev)
+                               continue;
+                       ifa = in_dev->ifa_list;
+                       if (!ifa || !ifa->ifa_local)
+                               continue;
+                       ips[i] = ifa->ifa_local;
+                       num_ipv4++;
+               }
+       }
+
+       for (i = 0; i < num_ipv4; i++) {
+               if (!ips[i])
+                       continue;
+               mef_entry->filter[filt_num].repeat = 1;
+               memcpy(mef_entry->filter[filt_num].byte_seq,
+                      (u8 *)&ips[i], sizeof(ips[i]));
+               mef_entry->filter[filt_num].
+                       byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] =
+                       sizeof(ips[i]);
+               mef_entry->filter[filt_num].offset = 46;
+               mef_entry->filter[filt_num].filt_type = TYPE_EQ;
+               if (filt_num) {
+                       mef_entry->filter[filt_num].filt_action =
+                               TYPE_OR;
+               }
+               filt_num++;
+       }
+
+       mef_entry->filter[filt_num].repeat = 1;
+       mef_entry->filter[filt_num].byte_seq[0] = 0x08;
+       mef_entry->filter[filt_num].byte_seq[1] = 0x06;
+       mef_entry->filter[filt_num].byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] = 2;
+       mef_entry->filter[filt_num].offset = 20;
+       mef_entry->filter[filt_num].filt_type = TYPE_EQ;
+       mef_entry->filter[filt_num].filt_action = TYPE_AND;
+}
+
+static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv,
+                                       struct mwifiex_ds_mef_cfg *mef_cfg,
+                                       struct mwifiex_mef_entry *mef_entry,
+                                       struct cfg80211_wowlan *wowlan)
 {
        int i, filt_num = 0, ret = 0;
        bool first_pat = true;
        u8 byte_seq[MWIFIEX_MEF_MAX_BYTESEQ + 1];
        const u8 ipv4_mc_mac[] = {0x33, 0x33};
        const u8 ipv6_mc_mac[] = {0x01, 0x00, 0x5e};
-       struct mwifiex_ds_mef_cfg mef_cfg;
-       struct mwifiex_mef_entry *mef_entry;
 
-       mef_entry = kzalloc(sizeof(*mef_entry), GFP_KERNEL);
-       if (!mef_entry)
-               return -ENOMEM;
-
-       memset(&mef_cfg, 0, sizeof(mef_cfg));
-       mef_cfg.num_entries = 1;
-       mef_cfg.mef_entry = mef_entry;
        mef_entry->mode = MEF_MODE_HOST_SLEEP;
        mef_entry->action = MEF_ACTION_ALLOW_AND_WAKEUP_HOST;
 
@@ -2767,20 +2847,19 @@ static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
                if (!wowlan->patterns[i].pkt_offset) {
                        if (!(byte_seq[0] & 0x01) &&
                            (byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 1)) {
-                               mef_cfg.criteria |= MWIFIEX_CRITERIA_UNICAST;
+                               mef_cfg->criteria |= MWIFIEX_CRITERIA_UNICAST;
                                continue;
                        } else if (is_broadcast_ether_addr(byte_seq)) {
-                               mef_cfg.criteria |= MWIFIEX_CRITERIA_BROADCAST;
+                               mef_cfg->criteria |= MWIFIEX_CRITERIA_BROADCAST;
                                continue;
                        } else if ((!memcmp(byte_seq, ipv4_mc_mac, 2) &&
                                    (byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 2)) ||
                                   (!memcmp(byte_seq, ipv6_mc_mac, 3) &&
                                    (byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 3))) {
-                               mef_cfg.criteria |= MWIFIEX_CRITERIA_MULTICAST;
+                               mef_cfg->criteria |= MWIFIEX_CRITERIA_MULTICAST;
                                continue;
                        }
                }
-
                mef_entry->filter[filt_num].repeat = 1;
                mef_entry->filter[filt_num].offset =
                        wowlan->patterns[i].pkt_offset;
@@ -2797,7 +2876,7 @@ static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
        }
 
        if (wowlan->magic_pkt) {
-               mef_cfg.criteria |= MWIFIEX_CRITERIA_UNICAST;
+               mef_cfg->criteria |= MWIFIEX_CRITERIA_UNICAST;
                mef_entry->filter[filt_num].repeat = 16;
                memcpy(mef_entry->filter[filt_num].byte_seq, priv->curr_addr,
                                ETH_ALEN);
@@ -2818,6 +2897,34 @@ static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
                mef_entry->filter[filt_num].filt_type = TYPE_EQ;
                mef_entry->filter[filt_num].filt_action = TYPE_OR;
        }
+       return ret;
+}
+
+static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
+                                 struct cfg80211_wowlan *wowlan)
+{
+       int ret = 0, num_entries = 1;
+       struct mwifiex_ds_mef_cfg mef_cfg;
+       struct mwifiex_mef_entry *mef_entry;
+
+       if (wowlan->n_patterns || wowlan->magic_pkt)
+               num_entries++;
+
+       mef_entry = kcalloc(num_entries, sizeof(*mef_entry), GFP_KERNEL);
+       if (!mef_entry)
+               return -ENOMEM;
+
+       memset(&mef_cfg, 0, sizeof(mef_cfg));
+       mef_cfg.criteria |= MWIFIEX_CRITERIA_BROADCAST |
+               MWIFIEX_CRITERIA_UNICAST;
+       mef_cfg.num_entries = num_entries;
+       mef_cfg.mef_entry = mef_entry;
+
+       mwifiex_set_auto_arp_mef_entry(priv, &mef_entry[0]);
+
+       if (wowlan->n_patterns || wowlan->magic_pkt)
+               ret = mwifiex_set_wowlan_mef_entry(priv, &mef_cfg,
+                                                  &mef_entry[1], wowlan);
 
        if (!mef_cfg.criteria)
                mef_cfg.criteria = MWIFIEX_CRITERIA_BROADCAST |
@@ -2825,8 +2932,8 @@ static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
                        MWIFIEX_CRITERIA_MULTICAST;
 
        ret = mwifiex_send_cmd(priv, HostCmd_CMD_MEF_CFG,
-                       HostCmd_ACT_GEN_SET, 0, &mef_cfg, true);
-
+                       HostCmd_ACT_GEN_SET, 0,
+                       &mef_cfg, true);
        kfree(mef_entry);
        return ret;
 }
@@ -2836,27 +2943,33 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
 {
        struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
        struct mwifiex_ds_hs_cfg hs_cfg;
-       int ret = 0;
-       struct mwifiex_private *priv =
-                       mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
+       int i, ret = 0;
+       struct mwifiex_private *priv;
+
+       for (i = 0; i < adapter->priv_num; i++) {
+               priv = adapter->priv[i];
+               mwifiex_abort_cac(priv);
+       }
+
+       mwifiex_cancel_all_pending_cmd(adapter);
 
        if (!wowlan) {
                dev_warn(adapter->dev, "None of the WOWLAN triggers enabled\n");
                return 0;
        }
 
+       priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
+
        if (!priv->media_connected) {
                dev_warn(adapter->dev,
                         "Can not configure WOWLAN in disconnected state\n");
                return 0;
        }
 
-       if (wowlan->n_patterns || wowlan->magic_pkt) {
-               ret = mwifiex_set_mef_filter(priv, wowlan);
-               if (ret) {
-                       dev_err(adapter->dev, "Failed to set MEF filter\n");
-                       return ret;
-               }
+       ret = mwifiex_set_mef_filter(priv, wowlan);
+       if (ret) {
+               dev_err(adapter->dev, "Failed to set MEF filter\n");
+               return ret;
        }
 
        if (wowlan->disconnect) {
index cf2fa110e2514f4e56798768463d57970c1481d6..38f24e0427d28b02a979eaec1ce066e648829048 100644 (file)
@@ -83,6 +83,7 @@
 #define MWIFIEX_BUF_FLAG_TDLS_PKT         BIT(2)
 #define MWIFIEX_BUF_FLAG_EAPOL_TX_STATUS   BIT(3)
 #define MWIFIEX_BUF_FLAG_ACTION_TX_STATUS  BIT(4)
+#define MWIFIEX_BUF_FLAG_AGGR_PKT          BIT(5)
 
 #define MWIFIEX_BRIDGED_PKTS_THR_HIGH      1024
 #define MWIFIEX_BRIDGED_PKTS_THR_LOW        128
 
 #define MWIFIEX_A_BAND_START_FREQ      5000
 
+/* SDIO Aggr data packet special info */
+#define SDIO_MAX_AGGR_BUF_SIZE         (256 * 255)
+#define BLOCK_NUMBER_OFFSET            15
+#define SDIO_HEADER_OFFSET             28
+
 enum mwifiex_bss_type {
        MWIFIEX_BSS_TYPE_STA = 0,
        MWIFIEX_BSS_TYPE_UAP = 1,
@@ -168,10 +174,11 @@ struct mwifiex_wait_queue {
 };
 
 struct mwifiex_rxinfo {
+       struct sk_buff *parent;
        u8 bss_num;
        u8 bss_type;
-       struct sk_buff *parent;
        u8 use_count;
+       u8 buf_type;
 };
 
 struct mwifiex_txinfo {
@@ -179,6 +186,7 @@ struct mwifiex_txinfo {
        u8 flags;
        u8 bss_num;
        u8 bss_type;
+       u8 aggr_num;
        u32 pkt_len;
        u8 ack_frame_id;
        u64 cookie;
index df553e86a0ad3bea74fc4ccee87db7c6e3d77e53..59d8964dd0dcaaadc39d0c09f872fe46c5488c4d 100644 (file)
@@ -197,6 +197,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 
 #define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11))
 #define ISSUPP_TDLS_ENABLED(FwCapInfo) (FwCapInfo & BIT(14))
+#define ISSUPP_SDIO_SPA_ENABLED(FwCapInfo) (FwCapInfo & BIT(16))
 
 #define MWIFIEX_DEF_HT_CAP     (IEEE80211_HT_CAP_DSSSCCK40 | \
                                 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | \
@@ -353,6 +354,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define HostCmd_CMD_REMAIN_ON_CHAN                    0x010d
 #define HostCmd_CMD_11AC_CFG                         0x0112
 #define HostCmd_CMD_TDLS_OPER                         0x0122
+#define HostCmd_CMD_SDIO_SP_RX_AGGR_CFG               0x0223
 
 #define PROTOCOL_NO_SECURITY        0x01
 #define PROTOCOL_STATIC_WEP         0x02
@@ -523,9 +525,11 @@ enum P2P_MODES {
 #define TYPE_OR                                (MAX_OPERAND+5)
 #define MEF_MODE_HOST_SLEEP                    1
 #define MEF_ACTION_ALLOW_AND_WAKEUP_HOST       3
+#define MEF_ACTION_AUTO_ARP                    0x10
 #define MWIFIEX_CRITERIA_BROADCAST     BIT(0)
 #define MWIFIEX_CRITERIA_UNICAST       BIT(1)
 #define MWIFIEX_CRITERIA_MULTICAST     BIT(3)
+#define MWIFIEX_MAX_SUPPORTED_IPADDR              4
 
 #define ACT_TDLS_DELETE            0x00
 #define ACT_TDLS_CREATE            0x01
@@ -1240,6 +1244,12 @@ struct host_cmd_ds_chan_rpt_event {
        u8 tlvbuf[0];
 } __packed;
 
+struct host_cmd_sdio_sp_rx_aggr_cfg {
+       u8 action;
+       u8 enable;
+       __le16 block_size;
+} __packed;
+
 struct mwifiex_fixed_bcn_param {
        __le64 timestamp;
        __le16 beacon_period;
@@ -1962,6 +1972,7 @@ struct host_cmd_ds_command {
                struct host_cmd_ds_coalesce_cfg coalesce_cfg;
                struct host_cmd_ds_tdls_oper tdls_oper;
                struct host_cmd_ds_chan_rpt_req chan_rpt_req;
+               struct host_cmd_sdio_sp_rx_aggr_cfg sdio_rx_aggr_cfg;
        } params;
 } __packed;
 
index 0153ce6d5879bacd48d6d42aaca3d0ddec5412cb..e12192f5cfad306b8cd9d4e5fce7ec2bd2e67957 100644 (file)
@@ -266,18 +266,15 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
 
        mwifiex_wmm_init(adapter);
 
-       if (adapter->sleep_cfm) {
-               sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *)
-                                               adapter->sleep_cfm->data;
-               memset(sleep_cfm_buf, 0, adapter->sleep_cfm->len);
-               sleep_cfm_buf->command =
-                               cpu_to_le16(HostCmd_CMD_802_11_PS_MODE_ENH);
-               sleep_cfm_buf->size =
-                               cpu_to_le16(adapter->sleep_cfm->len);
-               sleep_cfm_buf->result = 0;
-               sleep_cfm_buf->action = cpu_to_le16(SLEEP_CONFIRM);
-               sleep_cfm_buf->resp_ctrl = cpu_to_le16(RESP_NEEDED);
-       }
+       sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *)
+                                       adapter->sleep_cfm->data;
+       memset(sleep_cfm_buf, 0, adapter->sleep_cfm->len);
+       sleep_cfm_buf->command = cpu_to_le16(HostCmd_CMD_802_11_PS_MODE_ENH);
+       sleep_cfm_buf->size = cpu_to_le16(adapter->sleep_cfm->len);
+       sleep_cfm_buf->result = 0;
+       sleep_cfm_buf->action = cpu_to_le16(SLEEP_CONFIRM);
+       sleep_cfm_buf->resp_ctrl = cpu_to_le16(RESP_NEEDED);
+
        memset(&adapter->sleep_params, 0, sizeof(adapter->sleep_params));
        memset(&adapter->sleep_period, 0, sizeof(adapter->sleep_period));
        adapter->tx_lock_flag = false;
@@ -481,6 +478,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
        spin_lock_init(&adapter->rx_proc_lock);
 
        skb_queue_head_init(&adapter->rx_data_q);
+       skb_queue_head_init(&adapter->tx_data_q);
 
        for (i = 0; i < adapter->priv_num; ++i) {
                INIT_LIST_HEAD(&adapter->bss_prio_tbl[i].bss_prio_head);
@@ -688,6 +686,10 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
                }
        }
 
+       atomic_set(&adapter->tx_queued, 0);
+       while ((skb = skb_dequeue(&adapter->tx_data_q)))
+               mwifiex_write_data_complete(adapter, skb, 0, 0);
+
        spin_lock_irqsave(&adapter->rx_proc_lock, flags);
 
        while ((skb = skb_dequeue(&adapter->rx_data_q))) {
index d73a9217b9da66abe59a97df5409a99946325b17..03a95c7d34bf9ef1e12524d836c3adb3eab8e8fd 100644 (file)
@@ -131,10 +131,39 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter)
        return 0;
 }
 
+void mwifiex_queue_main_work(struct mwifiex_adapter *adapter)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&adapter->main_proc_lock, flags);
+       if (adapter->mwifiex_processing) {
+               adapter->more_task_flag = true;
+               spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
+       } else {
+               spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
+               queue_work(adapter->workqueue, &adapter->main_work);
+       }
+}
+EXPORT_SYMBOL_GPL(mwifiex_queue_main_work);
+
+static void mwifiex_queue_rx_work(struct mwifiex_adapter *adapter)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&adapter->rx_proc_lock, flags);
+       if (adapter->rx_processing) {
+               spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+       } else {
+               spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+               queue_work(adapter->rx_workqueue, &adapter->rx_work);
+       }
+}
+
 static int mwifiex_process_rx(struct mwifiex_adapter *adapter)
 {
        unsigned long flags;
        struct sk_buff *skb;
+       struct mwifiex_rxinfo *rx_info;
 
        spin_lock_irqsave(&adapter->rx_proc_lock, flags);
        if (adapter->rx_processing || adapter->rx_locked) {
@@ -154,9 +183,16 @@ static int mwifiex_process_rx(struct mwifiex_adapter *adapter)
                        if (adapter->if_ops.submit_rem_rx_urbs)
                                adapter->if_ops.submit_rem_rx_urbs(adapter);
                        adapter->delay_main_work = false;
-                       queue_work(adapter->workqueue, &adapter->main_work);
+                       mwifiex_queue_main_work(adapter);
+               }
+               rx_info = MWIFIEX_SKB_RXCB(skb);
+               if (rx_info->buf_type == MWIFIEX_TYPE_AGGR_DATA) {
+                       if (adapter->if_ops.deaggr_pkt)
+                               adapter->if_ops.deaggr_pkt(adapter, skb);
+                       dev_kfree_skb_any(skb);
+               } else {
+                       mwifiex_handle_rx_packet(adapter, skb);
                }
-               mwifiex_handle_rx_packet(adapter, skb);
        }
        spin_lock_irqsave(&adapter->rx_proc_lock, flags);
        adapter->rx_processing = false;
@@ -189,7 +225,7 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter)
        spin_lock_irqsave(&adapter->main_proc_lock, flags);
 
        /* Check if already processing */
-       if (adapter->mwifiex_processing) {
+       if (adapter->mwifiex_processing || adapter->main_locked) {
                adapter->more_task_flag = true;
                spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
                goto exit_main_proc;
@@ -214,9 +250,7 @@ process_start:
                if (atomic_read(&adapter->rx_pending) >= HIGH_RX_PENDING &&
                    adapter->iface_type != MWIFIEX_USB) {
                        adapter->delay_main_work = true;
-                       if (!adapter->rx_processing)
-                               queue_work(adapter->rx_workqueue,
-                                          &adapter->rx_work);
+                       mwifiex_queue_rx_work(adapter);
                        break;
                }
 
@@ -229,13 +263,14 @@ process_start:
                }
 
                if (adapter->rx_work_enabled && adapter->data_received)
-                       queue_work(adapter->rx_workqueue, &adapter->rx_work);
+                       mwifiex_queue_rx_work(adapter);
 
                /* Need to wake up the card ? */
                if ((adapter->ps_state == PS_STATE_SLEEP) &&
                    (adapter->pm_wakeup_card_req &&
                     !adapter->pm_wakeup_fw_try) &&
                    (is_command_pending(adapter) ||
+                    !skb_queue_empty(&adapter->tx_data_q) ||
                     !mwifiex_wmm_lists_empty(adapter))) {
                        adapter->pm_wakeup_fw_try = true;
                        mod_timer(&adapter->wakeup_timer, jiffies + (HZ*3));
@@ -247,7 +282,7 @@ process_start:
                if (IS_CARD_RX_RCVD(adapter)) {
                        adapter->data_received = false;
                        adapter->pm_wakeup_fw_try = false;
-                       del_timer_sync(&adapter->wakeup_timer);
+                       del_timer(&adapter->wakeup_timer);
                        if (adapter->ps_state == PS_STATE_SLEEP)
                                adapter->ps_state = PS_STATE_AWAKE;
                } else {
@@ -260,7 +295,8 @@ process_start:
 
                        if ((!adapter->scan_chan_gap_enabled &&
                             adapter->scan_processing) || adapter->data_sent ||
-                           mwifiex_wmm_lists_empty(adapter)) {
+                           (mwifiex_wmm_lists_empty(adapter) &&
+                            skb_queue_empty(&adapter->tx_data_q))) {
                                if (adapter->cmd_sent || adapter->curr_cmd ||
                                    (!is_command_pending(adapter)))
                                        break;
@@ -310,6 +346,20 @@ process_start:
                        }
                }
 
+               if ((adapter->scan_chan_gap_enabled ||
+                    !adapter->scan_processing) &&
+                   !adapter->data_sent &&
+                   !skb_queue_empty(&adapter->tx_data_q)) {
+                       mwifiex_process_tx_queue(adapter);
+                       if (adapter->hs_activated) {
+                               adapter->is_hs_configured = false;
+                               mwifiex_hs_activated_event
+                                       (mwifiex_get_priv
+                                       (adapter, MWIFIEX_BSS_ROLE_ANY),
+                                       false);
+                       }
+               }
+
                if ((adapter->scan_chan_gap_enabled ||
                     !adapter->scan_processing) &&
                    !adapter->data_sent && !mwifiex_wmm_lists_empty(adapter)) {
@@ -325,7 +375,8 @@ process_start:
 
                if (adapter->delay_null_pkt && !adapter->cmd_sent &&
                    !adapter->curr_cmd && !is_command_pending(adapter) &&
-                   mwifiex_wmm_lists_empty(adapter)) {
+                   (mwifiex_wmm_lists_empty(adapter) &&
+                    skb_queue_empty(&adapter->tx_data_q))) {
                        if (!mwifiex_send_null_packet
                            (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
                             MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET |
@@ -606,7 +657,7 @@ int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb)
        atomic_inc(&priv->adapter->tx_pending);
        mwifiex_wmm_add_buf_txqueue(priv, skb);
 
-       queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
+       mwifiex_queue_main_work(priv->adapter);
 
        return 0;
 }
@@ -1098,9 +1149,6 @@ mwifiex_add_card(void *card, struct semaphore *sem,
                INIT_WORK(&adapter->rx_work, mwifiex_rx_work_queue);
        }
 
-       if (adapter->if_ops.iface_work)
-               INIT_WORK(&adapter->iface_work, adapter->if_ops.iface_work);
-
        /* Register the device. Fill up the private data structure with relevant
           information from the card. */
        if (adapter->if_ops.register_dev(adapter)) {
index ad8db61aeeeff032d4a9dcf209cdcded063510c0..fe1256044a6c9bca9b9e8475cf8a0487ca7cd0b8 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/ctype.h>
 #include <linux/of.h>
 #include <linux/idr.h>
+#include <linux/inetdevice.h>
 
 #include "decl.h"
 #include "ioctl.h"
@@ -58,6 +59,8 @@ enum {
 
 #define MWIFIEX_MAX_AP                         64
 
+#define MWIFIEX_MAX_PKTS_TXQ                   16
+
 #define MWIFIEX_DEFAULT_WATCHDOG_TIMEOUT       (5 * HZ)
 
 #define MWIFIEX_TIMER_10S                      10000
@@ -118,6 +121,7 @@ enum {
 
 #define MWIFIEX_TYPE_CMD                               1
 #define MWIFIEX_TYPE_DATA                              0
+#define MWIFIEX_TYPE_AGGR_DATA                         10
 #define MWIFIEX_TYPE_EVENT                             3
 
 #define MAX_BITMAP_RATES_SIZE                  18
@@ -210,6 +214,12 @@ struct mwifiex_tx_aggr {
        u8 amsdu;
 };
 
+enum mwifiex_ba_status {
+       BA_SETUP_NONE = 0,
+       BA_SETUP_INPROGRESS,
+       BA_SETUP_COMPLETE
+};
+
 struct mwifiex_ra_list_tbl {
        struct list_head list;
        struct sk_buff_head skb_head;
@@ -218,6 +228,8 @@ struct mwifiex_ra_list_tbl {
        u16 max_amsdu;
        u16 ba_pkt_count;
        u8 ba_packet_thr;
+       enum mwifiex_ba_status ba_status;
+       u8 amsdu_in_ampdu;
        u16 total_pkt_count;
        bool tdls_link;
 };
@@ -601,11 +613,6 @@ struct mwifiex_private {
        struct mwifiex_11h_intf_state state_11h;
 };
 
-enum mwifiex_ba_status {
-       BA_SETUP_NONE = 0,
-       BA_SETUP_INPROGRESS,
-       BA_SETUP_COMPLETE
-};
 
 struct mwifiex_tx_ba_stream_tbl {
        struct list_head list;
@@ -738,6 +745,7 @@ struct mwifiex_if_ops {
        int (*clean_pcie_ring) (struct mwifiex_adapter *adapter);
        void (*iface_work)(struct work_struct *work);
        void (*submit_rem_rx_urbs)(struct mwifiex_adapter *adapter);
+       void (*deaggr_pkt)(struct mwifiex_adapter *, struct sk_buff *);
 };
 
 struct mwifiex_adapter {
@@ -771,6 +779,7 @@ struct mwifiex_adapter {
        bool rx_processing;
        bool delay_main_work;
        bool rx_locked;
+       bool main_locked;
        struct mwifiex_bss_prio_tbl bss_prio_tbl[MWIFIEX_MAX_BSS_NUM];
        /* spin lock for init/shutdown */
        spinlock_t mwifiex_lock;
@@ -780,6 +789,8 @@ struct mwifiex_adapter {
        u8 more_task_flag;
        u16 tx_buf_size;
        u16 curr_tx_buf_size;
+       bool sdio_rx_aggr_enable;
+       u16 sdio_rx_block_size;
        u32 ioport;
        enum MWIFIEX_HARDWARE_STATUS hw_status;
        u16 number_of_antenna;
@@ -814,6 +825,8 @@ struct mwifiex_adapter {
        spinlock_t scan_pending_q_lock;
        /* spin lock for RX processing routine */
        spinlock_t rx_proc_lock;
+       struct sk_buff_head tx_data_q;
+       atomic_t tx_queued;
        u32 scan_processing;
        u16 region_code;
        struct mwifiex_802_11d_domain_reg domain_reg;
@@ -885,8 +898,6 @@ struct mwifiex_adapter {
        bool ext_scan;
        u8 fw_api_ver;
        u8 key_api_major_ver, key_api_minor_ver;
-       struct work_struct iface_work;
-       unsigned long iface_work_flags;
        struct memory_type_mapping *mem_type_mapping_tbl;
        u8 num_mem_types;
        u8 curr_mem_idx;
@@ -900,6 +911,8 @@ struct mwifiex_adapter {
        bool auto_tdls;
 };
 
+void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);
+
 int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
 
 void mwifiex_set_trans_start(struct net_device *dev);
@@ -1422,7 +1435,8 @@ u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv,
                            u8 rx_rate, u8 ht_info);
 
 void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter);
-void *mwifiex_alloc_rx_buf(int rx_len, gfp_t flags);
+void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags);
+void mwifiex_queue_main_work(struct mwifiex_adapter *adapter);
 
 #ifdef CONFIG_DEBUG_FS
 void mwifiex_debugfs_init(void);
index 4b463c3b99064904ba15c8503cc6f922cfe345ef..bcc7751d883c3773b558ef8c8b8bb8c3a8a3c474 100644 (file)
@@ -234,8 +234,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
        if (!adapter || !adapter->priv_num)
                return;
 
-       cancel_work_sync(&adapter->iface_work);
-
        if (user_rmmod) {
 #ifdef CONFIG_PM_SLEEP
                if (adapter->is_suspended)
@@ -498,8 +496,8 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
 
        for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
                /* Allocate skb here so that firmware can DMA data from it */
-               skb = mwifiex_alloc_rx_buf(MWIFIEX_RX_DATA_BUF_SIZE,
-                                          GFP_KERNEL | GFP_DMA);
+               skb = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE,
+                                                 GFP_KERNEL | GFP_DMA);
                if (!skb) {
                        dev_err(adapter->dev,
                                "Unable to allocate skb for RX ring.\n");
@@ -1298,8 +1296,8 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
                        }
                }
 
-               skb_tmp = mwifiex_alloc_rx_buf(MWIFIEX_RX_DATA_BUF_SIZE,
-                                              GFP_KERNEL | GFP_DMA);
+               skb_tmp = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE,
+                                                     GFP_KERNEL | GFP_DMA);
                if (!skb_tmp) {
                        dev_err(adapter->dev,
                                "Unable to allocate skb.\n");
@@ -2101,7 +2099,7 @@ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context)
                goto exit;
 
        mwifiex_interrupt_status(adapter);
-       queue_work(adapter->workqueue, &adapter->main_work);
+       mwifiex_queue_main_work(adapter);
 
 exit:
        return IRQ_HANDLED;
@@ -2373,25 +2371,26 @@ done:
        adapter->curr_mem_idx = 0;
 }
 
+static unsigned long iface_work_flags;
+static struct mwifiex_adapter *save_adapter;
 static void mwifiex_pcie_work(struct work_struct *work)
 {
-       struct mwifiex_adapter *adapter =
-                       container_of(work, struct mwifiex_adapter, iface_work);
-
        if (test_and_clear_bit(MWIFIEX_IFACE_WORK_FW_DUMP,
-                              &adapter->iface_work_flags))
-               mwifiex_pcie_fw_dump_work(adapter);
+                              &iface_work_flags))
+               mwifiex_pcie_fw_dump_work(save_adapter);
 }
 
+static DECLARE_WORK(pcie_work, mwifiex_pcie_work);
 /* This function dumps FW information */
 static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
 {
-       if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &adapter->iface_work_flags))
+       save_adapter = adapter;
+       if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags))
                return;
 
-       set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &adapter->iface_work_flags);
+       set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags);
 
-       schedule_work(&adapter->iface_work);
+       schedule_work(&pcie_work);
 }
 
 /*
@@ -2619,7 +2618,6 @@ static struct mwifiex_if_ops pcie_ops = {
        .init_fw_port =                 mwifiex_pcie_init_fw_port,
        .clean_pcie_ring =              mwifiex_clean_pcie_ring_buf,
        .fw_dump =                      mwifiex_pcie_fw_dump,
-       .iface_work =                   mwifiex_pcie_work,
 };
 
 /*
@@ -2665,6 +2663,7 @@ static void mwifiex_pcie_cleanup_module(void)
        /* Set the flag as user is removing this module. */
        user_rmmod = 1;
 
+       cancel_work_sync(&pcie_work);
        pci_unregister_driver(&mwifiex_pcie);
 }
 
index 57d85ab442bf3f2569d7ffd434a2b7e45bb314d8..d10320f89bc16f0f87604fa3840a4a0b72e2530d 100644 (file)
@@ -47,6 +47,7 @@
 static u8 user_rmmod;
 
 static struct mwifiex_if_ops sdio_ops;
+static unsigned long iface_work_flags;
 
 static struct semaphore add_remove_card_sem;
 
@@ -200,8 +201,6 @@ mwifiex_sdio_remove(struct sdio_func *func)
        if (!adapter || !adapter->priv_num)
                return;
 
-       cancel_work_sync(&adapter->iface_work);
-
        if (user_rmmod) {
                if (adapter->is_suspended)
                        mwifiex_sdio_resume(adapter->dev);
@@ -1042,6 +1041,59 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
        return ret;
 }
 
+/*
+ * This function decode sdio aggreation pkt.
+ *
+ * Based on the the data block size and pkt_len,
+ * skb data will be decoded to few packets.
+ */
+static void mwifiex_deaggr_sdio_pkt(struct mwifiex_adapter *adapter,
+                                   struct sk_buff *skb)
+{
+       u32 total_pkt_len, pkt_len;
+       struct sk_buff *skb_deaggr;
+       u32 pkt_type;
+       u16 blk_size;
+       u8 blk_num;
+       u8 *data;
+
+       data = skb->data;
+       total_pkt_len = skb->len;
+
+       while (total_pkt_len >= (SDIO_HEADER_OFFSET + INTF_HEADER_LEN)) {
+               if (total_pkt_len < adapter->sdio_rx_block_size)
+                       break;
+               blk_num = *(data + BLOCK_NUMBER_OFFSET);
+               blk_size = adapter->sdio_rx_block_size * blk_num;
+               if (blk_size > total_pkt_len) {
+                       dev_err(adapter->dev, "%s: error in pkt,\t"
+                               "blk_num=%d, blk_size=%d, total_pkt_len=%d\n",
+                               __func__, blk_num, blk_size, total_pkt_len);
+                       break;
+               }
+               pkt_len = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET));
+               pkt_type = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET +
+                                        2));
+               if ((pkt_len + SDIO_HEADER_OFFSET) > blk_size) {
+                       dev_err(adapter->dev, "%s: error in pkt,\t"
+                               "pkt_len=%d, blk_size=%d\n",
+                               __func__, pkt_len, blk_size);
+                       break;
+               }
+               skb_deaggr = mwifiex_alloc_dma_align_buf(pkt_len,
+                                                        GFP_KERNEL | GFP_DMA);
+               if (!skb_deaggr)
+                       break;
+               skb_put(skb_deaggr, pkt_len);
+               memcpy(skb_deaggr->data, data + SDIO_HEADER_OFFSET, pkt_len);
+               skb_pull(skb_deaggr, INTF_HEADER_LEN);
+
+               mwifiex_handle_rx_packet(adapter, skb_deaggr);
+               data += blk_size;
+               total_pkt_len -= blk_size;
+       }
+}
+
 /*
  * This function decodes a received packet.
  *
@@ -1055,11 +1107,28 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
        u8 *cmd_buf;
        __le16 *curr_ptr = (__le16 *)skb->data;
        u16 pkt_len = le16_to_cpu(*curr_ptr);
+       struct mwifiex_rxinfo *rx_info;
 
-       skb_trim(skb, pkt_len);
-       skb_pull(skb, INTF_HEADER_LEN);
+       if (upld_typ != MWIFIEX_TYPE_AGGR_DATA) {
+               skb_trim(skb, pkt_len);
+               skb_pull(skb, INTF_HEADER_LEN);
+       }
 
        switch (upld_typ) {
+       case MWIFIEX_TYPE_AGGR_DATA:
+               dev_dbg(adapter->dev, "info: --- Rx: Aggr Data packet ---\n");
+               rx_info = MWIFIEX_SKB_RXCB(skb);
+               rx_info->buf_type = MWIFIEX_TYPE_AGGR_DATA;
+               if (adapter->rx_work_enabled) {
+                       skb_queue_tail(&adapter->rx_data_q, skb);
+                       atomic_inc(&adapter->rx_pending);
+                       adapter->data_received = true;
+               } else {
+                       mwifiex_deaggr_sdio_pkt(adapter, skb);
+                       dev_kfree_skb_any(skb);
+               }
+               break;
+
        case MWIFIEX_TYPE_DATA:
                dev_dbg(adapter->dev, "info: --- Rx: Data packet ---\n");
                if (adapter->rx_work_enabled) {
@@ -1127,17 +1196,17 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
  * provided there is space left, processed and finally uploaded.
  */
 static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
-                                            struct sk_buff *skb, u8 port)
+                                            u16 rx_len, u8 port)
 {
        struct sdio_mmc_card *card = adapter->card;
        s32 f_do_rx_aggr = 0;
        s32 f_do_rx_cur = 0;
        s32 f_aggr_cur = 0;
+       s32 f_post_aggr_cur = 0;
        struct sk_buff *skb_deaggr;
-       u32 pind;
-       u32 pkt_len, pkt_type, mport;
+       struct sk_buff *skb = NULL;
+       u32 pkt_len, pkt_type, mport, pind;
        u8 *curr_ptr;
-       u32 rx_len = skb->len;
 
        if ((card->has_control_mask) && (port == CTRL_PORT)) {
                /* Read the command Resp without aggr */
@@ -1164,12 +1233,12 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
                dev_dbg(adapter->dev, "info: %s: not last packet\n", __func__);
 
                if (MP_RX_AGGR_IN_PROGRESS(card)) {
-                       if (MP_RX_AGGR_BUF_HAS_ROOM(card, skb->len)) {
+                       if (MP_RX_AGGR_BUF_HAS_ROOM(card, rx_len)) {
                                f_aggr_cur = 1;
                        } else {
                                /* No room in Aggr buf, do rx aggr now */
                                f_do_rx_aggr = 1;
-                               f_do_rx_cur = 1;
+                               f_post_aggr_cur = 1;
                        }
                } else {
                        /* Rx aggr not in progress */
@@ -1182,7 +1251,7 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
 
                if (MP_RX_AGGR_IN_PROGRESS(card)) {
                        f_do_rx_aggr = 1;
-                       if (MP_RX_AGGR_BUF_HAS_ROOM(card, skb->len))
+                       if (MP_RX_AGGR_BUF_HAS_ROOM(card, rx_len))
                                f_aggr_cur = 1;
                        else
                                /* No room in Aggr buf, do rx aggr now */
@@ -1195,7 +1264,7 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
        if (f_aggr_cur) {
                dev_dbg(adapter->dev, "info: current packet aggregation\n");
                /* Curr pkt can be aggregated */
-               mp_rx_aggr_setup(card, skb, port);
+               mp_rx_aggr_setup(card, rx_len, port);
 
                if (MP_RX_AGGR_PKT_LIMIT_REACHED(card) ||
                    mp_rx_aggr_port_limit_reached(card)) {
@@ -1238,16 +1307,29 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
                curr_ptr = card->mpa_rx.buf;
 
                for (pind = 0; pind < card->mpa_rx.pkt_cnt; pind++) {
+                       u32 *len_arr = card->mpa_rx.len_arr;
 
                        /* get curr PKT len & type */
                        pkt_len = le16_to_cpu(*(__le16 *) &curr_ptr[0]);
                        pkt_type = le16_to_cpu(*(__le16 *) &curr_ptr[2]);
 
                        /* copy pkt to deaggr buf */
-                       skb_deaggr = card->mpa_rx.skb_arr[pind];
+                       skb_deaggr = mwifiex_alloc_dma_align_buf(len_arr[pind],
+                                                                GFP_KERNEL |
+                                                                GFP_DMA);
+                       if (!skb_deaggr) {
+                               dev_err(adapter->dev, "skb allocation failure drop pkt len=%d type=%d\n",
+                                       pkt_len, pkt_type);
+                               curr_ptr += len_arr[pind];
+                               continue;
+                       }
 
-                       if ((pkt_type == MWIFIEX_TYPE_DATA) && (pkt_len <=
-                                        card->mpa_rx.len_arr[pind])) {
+                       skb_put(skb_deaggr, len_arr[pind]);
+
+                       if ((pkt_type == MWIFIEX_TYPE_DATA ||
+                            (pkt_type == MWIFIEX_TYPE_AGGR_DATA &&
+                             adapter->sdio_rx_aggr_enable)) &&
+                           (pkt_len <= len_arr[pind])) {
 
                                memcpy(skb_deaggr->data, curr_ptr, pkt_len);
 
@@ -1257,13 +1339,15 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
                                mwifiex_decode_rx_packet(adapter, skb_deaggr,
                                                         pkt_type);
                        } else {
-                               dev_err(adapter->dev, "wrong aggr pkt:"
-                                       " type=%d len=%d max_len=%d\n",
+                               dev_err(adapter->dev, " drop wrong aggr pkt:\t"
+                                       "sdio_single_port_rx_aggr=%d\t"
+                                       "type=%d len=%d max_len=%d\n",
+                                       adapter->sdio_rx_aggr_enable,
                                        pkt_type, pkt_len,
-                                       card->mpa_rx.len_arr[pind]);
+                                       len_arr[pind]);
                                dev_kfree_skb_any(skb_deaggr);
                        }
-                       curr_ptr += card->mpa_rx.len_arr[pind];
+                       curr_ptr += len_arr[pind];
                }
                MP_RX_AGGR_BUF_RESET(card);
        }
@@ -1273,28 +1357,46 @@ rx_curr_single:
                dev_dbg(adapter->dev, "info: RX: port: %d, rx_len: %d\n",
                        port, rx_len);
 
+               skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA);
+               if (!skb) {
+                       dev_err(adapter->dev, "single skb allocated fail,\t"
+                               "drop pkt port=%d len=%d\n", port, rx_len);
+                       if (mwifiex_sdio_card_to_host(adapter, &pkt_type,
+                                                     card->mpa_rx.buf, rx_len,
+                                                     adapter->ioport + port))
+                               goto error;
+                       return 0;
+               }
+
+               skb_put(skb, rx_len);
+
                if (mwifiex_sdio_card_to_host(adapter, &pkt_type,
                                              skb->data, skb->len,
                                              adapter->ioport + port))
                        goto error;
+               if (!adapter->sdio_rx_aggr_enable &&
+                   pkt_type == MWIFIEX_TYPE_AGGR_DATA) {
+                       dev_err(adapter->dev, "drop wrong pkt type %d\t"
+                               "current SDIO RX Aggr not enabled\n",
+                               pkt_type);
+                       dev_kfree_skb_any(skb);
+                       return 0;
+               }
 
                mwifiex_decode_rx_packet(adapter, skb, pkt_type);
        }
+       if (f_post_aggr_cur) {
+               dev_dbg(adapter->dev, "info: current packet aggregation\n");
+               /* Curr pkt can be aggregated */
+               mp_rx_aggr_setup(card, rx_len, port);
+       }
 
        return 0;
-
 error:
-       if (MP_RX_AGGR_IN_PROGRESS(card)) {
-               /* Multiport-aggregation transfer failed - cleanup */
-               for (pind = 0; pind < card->mpa_rx.pkt_cnt; pind++) {
-                       /* copy pkt to deaggr buf */
-                       skb_deaggr = card->mpa_rx.skb_arr[pind];
-                       dev_kfree_skb_any(skb_deaggr);
-               }
+       if (MP_RX_AGGR_IN_PROGRESS(card))
                MP_RX_AGGR_BUF_RESET(card);
-       }
 
-       if (f_do_rx_cur)
+       if (f_do_rx_cur && skb)
                /* Single transfer pending. Free curr buff also */
                dev_kfree_skb_any(skb);
 
@@ -1356,8 +1458,9 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
                     MWIFIEX_RX_DATA_BUF_SIZE)
                        return -1;
                rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
+               dev_dbg(adapter->dev, "info: rx_len = %d\n", rx_len);
 
-               skb = mwifiex_alloc_rx_buf(rx_len, GFP_KERNEL | GFP_DMA);
+               skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA);
                if (!skb)
                        return -1;
 
@@ -1447,28 +1550,16 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
                                 1) / MWIFIEX_SDIO_BLOCK_SIZE;
                        if (rx_len <= INTF_HEADER_LEN ||
                            (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) >
-                            MWIFIEX_RX_DATA_BUF_SIZE) {
+                            card->mpa_rx.buf_size) {
                                dev_err(adapter->dev, "invalid rx_len=%d\n",
                                        rx_len);
                                return -1;
                        }
-                       rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
-
-                       skb = mwifiex_alloc_rx_buf(rx_len,
-                                                  GFP_KERNEL | GFP_DMA);
-
-                       if (!skb) {
-                               dev_err(adapter->dev, "%s: failed to alloc skb",
-                                       __func__);
-                               return -1;
-                       }
 
-                       skb_put(skb, rx_len);
-
-                       dev_dbg(adapter->dev, "info: rx_len = %d skb->len = %d\n",
-                               rx_len, skb->len);
+                       rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
+                       dev_dbg(adapter->dev, "info: rx_len = %d\n", rx_len);
 
-                       if (mwifiex_sdio_card_to_host_mp_aggr(adapter, skb,
+                       if (mwifiex_sdio_card_to_host_mp_aggr(adapter, rx_len,
                                                              port)) {
                                dev_err(adapter->dev, "card_to_host_mpa failed:"
                                        " int status=%#x\n", sdio_ireg);
@@ -1736,6 +1827,7 @@ static int mwifiex_alloc_sdio_mpa_buffers(struct mwifiex_adapter *adapter,
                                   u32 mpa_tx_buf_size, u32 mpa_rx_buf_size)
 {
        struct sdio_mmc_card *card = adapter->card;
+       u32 rx_buf_size;
        int ret = 0;
 
        card->mpa_tx.buf = kzalloc(mpa_tx_buf_size, GFP_KERNEL);
@@ -1746,13 +1838,15 @@ static int mwifiex_alloc_sdio_mpa_buffers(struct mwifiex_adapter *adapter,
 
        card->mpa_tx.buf_size = mpa_tx_buf_size;
 
-       card->mpa_rx.buf = kzalloc(mpa_rx_buf_size, GFP_KERNEL);
+       rx_buf_size = max_t(u32, mpa_rx_buf_size,
+                           (u32)SDIO_MAX_AGGR_BUF_SIZE);
+       card->mpa_rx.buf = kzalloc(rx_buf_size, GFP_KERNEL);
        if (!card->mpa_rx.buf) {
                ret = -1;
                goto error;
        }
 
-       card->mpa_rx.buf_size = mpa_rx_buf_size;
+       card->mpa_rx.buf_size = rx_buf_size;
 
 error:
        if (ret) {
@@ -1951,6 +2045,7 @@ mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port)
                port, card->mp_data_port_mask);
 }
 
+static struct mwifiex_adapter *save_adapter;
 static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
 {
        struct sdio_mmc_card *card = adapter->card;
@@ -2019,10 +2114,8 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter,
 }
 
 /* This function dump firmware memory to file */
-static void mwifiex_sdio_fw_dump_work(struct work_struct *work)
+static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter)
 {
-       struct mwifiex_adapter *adapter =
-                       container_of(work, struct mwifiex_adapter, iface_work);
        struct sdio_mmc_card *card = adapter->card;
        int ret = 0;
        unsigned int reg, reg_start, reg_end;
@@ -2144,36 +2237,36 @@ done:
 
 static void mwifiex_sdio_work(struct work_struct *work)
 {
-       struct mwifiex_adapter *adapter =
-                       container_of(work, struct mwifiex_adapter, iface_work);
-
-       if (test_and_clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET,
-                              &adapter->iface_work_flags))
-               mwifiex_sdio_card_reset_work(adapter);
        if (test_and_clear_bit(MWIFIEX_IFACE_WORK_FW_DUMP,
-                              &adapter->iface_work_flags))
-               mwifiex_sdio_fw_dump_work(work);
+                              &iface_work_flags))
+               mwifiex_sdio_fw_dump_work(save_adapter);
+       if (test_and_clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET,
+                              &iface_work_flags))
+               mwifiex_sdio_card_reset_work(save_adapter);
 }
 
+static DECLARE_WORK(sdio_work, mwifiex_sdio_work);
 /* This function resets the card */
 static void mwifiex_sdio_card_reset(struct mwifiex_adapter *adapter)
 {
-       if (test_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &adapter->iface_work_flags))
+       save_adapter = adapter;
+       if (test_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &iface_work_flags))
                return;
 
-       set_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &adapter->iface_work_flags);
+       set_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &iface_work_flags);
 
-       schedule_work(&adapter->iface_work);
+       schedule_work(&sdio_work);
 }
 
 /* This function dumps FW information */
 static void mwifiex_sdio_fw_dump(struct mwifiex_adapter *adapter)
 {
-       if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &adapter->iface_work_flags))
+       save_adapter = adapter;
+       if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags))
                return;
 
-       set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &adapter->iface_work_flags);
-       schedule_work(&adapter->iface_work);
+       set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags);
+       schedule_work(&sdio_work);
 }
 
 /* Function to dump SDIO function registers and SDIO scratch registers in case
@@ -2289,9 +2382,9 @@ static struct mwifiex_if_ops sdio_ops = {
        .cmdrsp_complete = mwifiex_sdio_cmdrsp_complete,
        .event_complete = mwifiex_sdio_event_complete,
        .card_reset = mwifiex_sdio_card_reset,
-       .iface_work = mwifiex_sdio_work,
        .fw_dump = mwifiex_sdio_fw_dump,
        .reg_dump = mwifiex_sdio_reg_dump,
+       .deaggr_pkt = mwifiex_deaggr_sdio_pkt,
 };
 
 /*
@@ -2328,6 +2421,7 @@ mwifiex_sdio_cleanup_module(void)
 
        /* Set the flag as user is removing this module. */
        user_rmmod = 1;
+       cancel_work_sync(&sdio_work);
 
        sdio_unregister_driver(&mwifiex_sdio);
 }
index c636944c77bcdc935bbbbf07e5f87e37e318bf43..6f645cf47369baddaa10bc6489837fa914a14a57 100644 (file)
@@ -67,6 +67,8 @@
 
 #define MWIFIEX_MP_AGGR_BUF_SIZE_16K   (16384)
 #define MWIFIEX_MP_AGGR_BUF_SIZE_32K   (32768)
+/* we leave one block of 256 bytes for DMA alignment*/
+#define MWIFIEX_MP_AGGR_BUF_SIZE_MAX    (65280)
 
 /* Misc. Config Register : Auto Re-enable interrupts */
 #define AUTO_RE_ENABLE_INT              BIT(4)
@@ -458,8 +460,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
        .max_ports = 32,
        .mp_agg_pkt_limit = 16,
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
-       .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
-       .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
+       .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+       .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
        .supports_sdio_new_mode = true,
        .has_control_mask = false,
        .can_dump_fw = true,
@@ -571,9 +573,9 @@ mp_tx_aggr_port_limit_reached(struct sdio_mmc_card *card)
 
 /* Prepare to copy current packet from card to SDIO Rx aggregation buffer */
 static inline void mp_rx_aggr_setup(struct sdio_mmc_card *card,
-                                   struct sk_buff *skb, u8 port)
+                                   u16 rx_len, u8 port)
 {
-       card->mpa_rx.buf_len += skb->len;
+       card->mpa_rx.buf_len += rx_len;
 
        if (!card->mpa_rx.pkt_cnt)
                card->mpa_rx.start_port = port;
@@ -586,8 +588,8 @@ static inline void mp_rx_aggr_setup(struct sdio_mmc_card *card,
                else
                        card->mpa_rx.ports |= 1 << (card->mpa_rx.pkt_cnt + 1);
        }
-       card->mpa_rx.skb_arr[card->mpa_rx.pkt_cnt] = skb;
-       card->mpa_rx.len_arr[card->mpa_rx.pkt_cnt] = skb->len;
+       card->mpa_rx.skb_arr[card->mpa_rx.pkt_cnt] = NULL;
+       card->mpa_rx.len_arr[card->mpa_rx.pkt_cnt] = rx_len;
        card->mpa_rx.pkt_cnt++;
 }
 #endif /* _MWIFIEX_SDIO_H */
index f7d204ffd6e97c2444129252535e9fd3580959cc..49422f2a53809fe0c241de93afb231c8011871c3 100644 (file)
@@ -1370,22 +1370,29 @@ mwifiex_cmd_mef_cfg(struct mwifiex_private *priv,
                    struct mwifiex_ds_mef_cfg *mef)
 {
        struct host_cmd_ds_mef_cfg *mef_cfg = &cmd->params.mef_cfg;
+       struct mwifiex_fw_mef_entry *mef_entry = NULL;
        u8 *pos = (u8 *)mef_cfg;
+       u16 i;
 
        cmd->command = cpu_to_le16(HostCmd_CMD_MEF_CFG);
 
        mef_cfg->criteria = cpu_to_le32(mef->criteria);
        mef_cfg->num_entries = cpu_to_le16(mef->num_entries);
        pos += sizeof(*mef_cfg);
-       mef_cfg->mef_entry->mode = mef->mef_entry->mode;
-       mef_cfg->mef_entry->action = mef->mef_entry->action;
-       pos += sizeof(*(mef_cfg->mef_entry));
 
-       if (mwifiex_cmd_append_rpn_expression(priv, mef->mef_entry, &pos))
-               return -1;
+       for (i = 0; i < mef->num_entries; i++) {
+               mef_entry = (struct mwifiex_fw_mef_entry *)pos;
+               mef_entry->mode = mef->mef_entry[i].mode;
+               mef_entry->action = mef->mef_entry[i].action;
+               pos += sizeof(*mef_cfg->mef_entry);
+
+               if (mwifiex_cmd_append_rpn_expression(priv,
+                                                     &mef->mef_entry[i], &pos))
+                       return -1;
 
-       mef_cfg->mef_entry->exprsize =
-                       cpu_to_le16(pos - mef_cfg->mef_entry->expr);
+               mef_entry->exprsize =
+                       cpu_to_le16(pos - mef_entry->expr);
+       }
        cmd->size = cpu_to_le16((u16) (pos - (u8 *)mef_cfg) + S_DS_GEN);
 
        return 0;
@@ -1664,6 +1671,25 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
 
        return 0;
 }
+
+/* This function prepares command of sdio rx aggr info. */
+static int mwifiex_cmd_sdio_rx_aggr_cfg(struct host_cmd_ds_command *cmd,
+                                       u16 cmd_action, void *data_buf)
+{
+       struct host_cmd_sdio_sp_rx_aggr_cfg *cfg =
+                                       &cmd->params.sdio_rx_aggr_cfg;
+
+       cmd->command = cpu_to_le16(HostCmd_CMD_SDIO_SP_RX_AGGR_CFG);
+       cmd->size =
+               cpu_to_le16(sizeof(struct host_cmd_sdio_sp_rx_aggr_cfg) +
+                           S_DS_GEN);
+       cfg->action = cmd_action;
+       if (cmd_action == HostCmd_ACT_GEN_SET)
+               cfg->enable = *(u8 *)data_buf;
+
+       return 0;
+}
+
 /*
  * This function prepares the commands before sending them to the firmware.
  *
@@ -1901,6 +1927,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
                ret = mwifiex_cmd_issue_chan_report_request(priv, cmd_ptr,
                                                            data_buf);
                break;
+       case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG:
+               ret = mwifiex_cmd_sdio_rx_aggr_cfg(cmd_ptr, cmd_action,
+                                                  data_buf);
+               break;
        default:
                dev_err(priv->adapter->dev,
                        "PREP_CMD: unknown cmd- %#x\n", cmd_no);
@@ -1940,6 +1970,7 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
        struct mwifiex_ds_auto_ds auto_ds;
        enum state_11d_t state_11d;
        struct mwifiex_ds_11n_tx_cfg tx_cfg;
+       u8 sdio_sp_rx_aggr_enable;
 
        if (first_sta) {
                if (priv->adapter->iface_type == MWIFIEX_PCIE) {
@@ -1983,6 +2014,22 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
                if (ret)
                        return -1;
 
+               /** Set SDIO Single Port RX Aggr Info */
+               if (priv->adapter->iface_type == MWIFIEX_SDIO &&
+                   ISSUPP_SDIO_SPA_ENABLED(priv->adapter->fw_cap_info)) {
+                       sdio_sp_rx_aggr_enable = true;
+                       ret = mwifiex_send_cmd(priv,
+                                              HostCmd_CMD_SDIO_SP_RX_AGGR_CFG,
+                                              HostCmd_ACT_GEN_SET, 0,
+                                              &sdio_sp_rx_aggr_enable,
+                                              true);
+                       if (ret) {
+                               dev_err(priv->adapter->dev,
+                                       "error while enabling SP aggregation..disable it");
+                               adapter->sdio_rx_aggr_enable = false;
+                       }
+               }
+
                /* Reconfigure tx buf size */
                ret = mwifiex_send_cmd(priv, HostCmd_CMD_RECONFIGURE_TX_BUFF,
                                       HostCmd_ACT_GEN_SET, 0,
index 5f8da5924666275615482fd07de75fa46d4bfb4a..88dc6b672ef43adb5cc8c1b836b19a1bed0db5d1 100644 (file)
@@ -90,6 +90,10 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
        case HostCmd_CMD_MAC_CONTROL:
                break;
 
+       case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG:
+               dev_err(priv->adapter->dev, "SDIO RX single-port aggregation Not support\n");
+               break;
+
        default:
                break;
        }
@@ -943,6 +947,20 @@ static int mwifiex_ret_cfg_data(struct mwifiex_private *priv,
        return 0;
 }
 
+/** This Function handles the command response of sdio rx aggr */
+static int mwifiex_ret_sdio_rx_aggr_cfg(struct mwifiex_private *priv,
+                                       struct host_cmd_ds_command *resp)
+{
+       struct mwifiex_adapter *adapter = priv->adapter;
+       struct host_cmd_sdio_sp_rx_aggr_cfg *cfg =
+                               &resp->params.sdio_rx_aggr_cfg;
+
+       adapter->sdio_rx_aggr_enable = cfg->enable;
+       adapter->sdio_rx_block_size = le16_to_cpu(cfg->block_size);
+
+       return 0;
+}
+
 /*
  * This function handles the command responses.
  *
@@ -1124,6 +1142,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
                break;
        case HostCmd_CMD_CHAN_REPORT_REQUEST:
                break;
+       case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG:
+               ret = mwifiex_ret_sdio_rx_aggr_cfg(priv, resp);
+               break;
        default:
                dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
                        resp->command);
index 64c4223a1e1ee919783493487f2c449d8c8693bd..0dc7a1d3993d325a15f84fa447afaa884349eabc 100644 (file)
@@ -312,7 +312,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                                        adapter->ps_state = PS_STATE_AWAKE;
                                        adapter->pm_wakeup_card_req = false;
                                        adapter->pm_wakeup_fw_try = false;
-                                       del_timer_sync(&adapter->wakeup_timer);
+                                       del_timer(&adapter->wakeup_timer);
                                        break;
                                }
                                if (!mwifiex_send_null_packet
@@ -327,7 +327,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                adapter->ps_state = PS_STATE_AWAKE;
                adapter->pm_wakeup_card_req = false;
                adapter->pm_wakeup_fw_try = false;
-               del_timer_sync(&adapter->wakeup_timer);
+               del_timer(&adapter->wakeup_timer);
 
                break;
 
index ea4549f0e0b931b449c4a13879a81d3a4025c77e..a245f444aeec17e23c027b60d027239050fb4497 100644 (file)
@@ -92,6 +92,12 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
        else
                head_ptr = mwifiex_process_sta_txpd(priv, skb);
 
+       if ((adapter->data_sent || adapter->tx_lock_flag) && head_ptr) {
+               skb_queue_tail(&adapter->tx_data_q, skb);
+               atomic_inc(&adapter->tx_queued);
+               return 0;
+       }
+
        if (head_ptr) {
                if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA)
                        local_tx_pd = (struct txpd *)(head_ptr + hroom);
@@ -142,6 +148,123 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
        return ret;
 }
 
+static int mwifiex_host_to_card(struct mwifiex_adapter *adapter,
+                               struct sk_buff *skb,
+                               struct mwifiex_tx_param *tx_param)
+{
+       struct txpd *local_tx_pd = NULL;
+       u8 *head_ptr = skb->data;
+       int ret = 0;
+       struct mwifiex_private *priv;
+       struct mwifiex_txinfo *tx_info;
+
+       tx_info = MWIFIEX_SKB_TXCB(skb);
+       priv = mwifiex_get_priv_by_id(adapter, tx_info->bss_num,
+                                     tx_info->bss_type);
+       if (!priv) {
+               dev_err(adapter->dev, "data: priv not found. Drop TX packet\n");
+               adapter->dbg.num_tx_host_to_card_failure++;
+               mwifiex_write_data_complete(adapter, skb, 0, 0);
+               return ret;
+       }
+       if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) {
+               if (adapter->iface_type == MWIFIEX_USB)
+                       local_tx_pd = (struct txpd *)head_ptr;
+               else
+                       local_tx_pd = (struct txpd *) (head_ptr +
+                               INTF_HEADER_LEN);
+       }
+
+       if (adapter->iface_type == MWIFIEX_USB) {
+               adapter->data_sent = true;
+               ret = adapter->if_ops.host_to_card(adapter,
+                                                  MWIFIEX_USB_EP_DATA,
+                                                  skb, NULL);
+       } else {
+               ret = adapter->if_ops.host_to_card(adapter,
+                                                  MWIFIEX_TYPE_DATA,
+                                                  skb, tx_param);
+       }
+       switch (ret) {
+       case -ENOSR:
+               dev_err(adapter->dev, "data: -ENOSR is returned\n");
+               break;
+       case -EBUSY:
+               if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
+                   (adapter->pps_uapsd_mode) &&
+                   (adapter->tx_lock_flag)) {
+                       priv->adapter->tx_lock_flag = false;
+                       if (local_tx_pd)
+                               local_tx_pd->flags = 0;
+               }
+               skb_queue_head(&adapter->tx_data_q, skb);
+               if (tx_info->flags & MWIFIEX_BUF_FLAG_AGGR_PKT)
+                       atomic_add(tx_info->aggr_num, &adapter->tx_queued);
+               else
+                       atomic_inc(&adapter->tx_queued);
+               dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
+               break;
+       case -1:
+               if (adapter->iface_type != MWIFIEX_PCIE)
+                       adapter->data_sent = false;
+               dev_err(adapter->dev, "mwifiex_write_data_async failed: 0x%X\n",
+                       ret);
+               adapter->dbg.num_tx_host_to_card_failure++;
+               mwifiex_write_data_complete(adapter, skb, 0, ret);
+               break;
+       case -EINPROGRESS:
+               if (adapter->iface_type != MWIFIEX_PCIE)
+                       adapter->data_sent = false;
+               break;
+       case 0:
+               mwifiex_write_data_complete(adapter, skb, 0, ret);
+               break;
+       default:
+               break;
+       }
+       return ret;
+}
+
+static int
+mwifiex_dequeue_tx_queue(struct mwifiex_adapter *adapter)
+{
+       struct sk_buff *skb, *skb_next;
+       struct mwifiex_txinfo *tx_info;
+       struct mwifiex_tx_param tx_param;
+
+       skb = skb_dequeue(&adapter->tx_data_q);
+       if (!skb)
+               return -1;
+
+       tx_info = MWIFIEX_SKB_TXCB(skb);
+       if (tx_info->flags & MWIFIEX_BUF_FLAG_AGGR_PKT)
+               atomic_sub(tx_info->aggr_num, &adapter->tx_queued);
+       else
+               atomic_dec(&adapter->tx_queued);
+
+       if (!skb_queue_empty(&adapter->tx_data_q))
+               skb_next = skb_peek(&adapter->tx_data_q);
+       else
+               skb_next = NULL;
+       tx_param.next_pkt_len = ((skb_next) ? skb_next->len : 0);
+       if (!tx_param.next_pkt_len) {
+               if (!mwifiex_wmm_lists_empty(adapter))
+                       tx_param.next_pkt_len = 1;
+       }
+       return mwifiex_host_to_card(adapter, skb, &tx_param);
+}
+
+void
+mwifiex_process_tx_queue(struct mwifiex_adapter *adapter)
+{
+       do {
+               if (adapter->data_sent || adapter->tx_lock_flag)
+                       break;
+               if (mwifiex_dequeue_tx_queue(adapter))
+                       break;
+       } while (!skb_queue_empty(&adapter->tx_data_q));
+}
+
 /*
  * Packet send completion callback handler.
  *
@@ -179,8 +302,11 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
                priv->stats.tx_errors++;
        }
 
-       if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT)
+       if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT) {
                atomic_dec_return(&adapter->pending_bridged_pkts);
+               if (tx_info->flags & MWIFIEX_BUF_FLAG_AGGR_PKT)
+                       goto done;
+       }
 
        if (aggr)
                /* For skb_aggr, do not wake up tx queue */
index 223873022ffe24120054cf13c8a8466c78bcdd0d..fd8027f200a0ddd61c178ee1f7107e7e0931c1c1 100644 (file)
@@ -193,7 +193,7 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
                dev_dbg(adapter->dev, "info: recv_length=%d, status=%d\n",
                        recv_length, status);
                if (status == -EINPROGRESS) {
-                       queue_work(adapter->workqueue, &adapter->main_work);
+                       mwifiex_queue_main_work(adapter);
 
                        /* urb for data_ep is re-submitted now;
                         * urb for cmd_ep will be re-submitted in callback
@@ -262,7 +262,7 @@ static void mwifiex_usb_tx_complete(struct urb *urb)
                                            urb->status ? -1 : 0);
        }
 
-       queue_work(adapter->workqueue, &adapter->main_work);
+       mwifiex_queue_main_work(adapter);
 
        return;
 }
@@ -1006,7 +1006,7 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
 {
        /* Simulation of HS_AWAKE event */
        adapter->pm_wakeup_fw_try = false;
-       del_timer_sync(&adapter->wakeup_timer);
+       del_timer(&adapter->wakeup_timer);
        adapter->pm_wakeup_card_req = false;
        adapter->ps_state = PS_STATE_AWAKE;
 
index 2148a573396b8dfcbcf2dba17843cace3af250a6..b8a45872354d7f46c330da734fa5f2aef8b4c7c0 100644 (file)
@@ -632,7 +632,7 @@ void mwifiex_hist_data_reset(struct mwifiex_private *priv)
                atomic_set(&phist_data->sig_str[ix], 0);
 }
 
-void *mwifiex_alloc_rx_buf(int rx_len, gfp_t flags)
+void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags)
 {
        struct sk_buff *skb;
        int buf_len, pad;
@@ -653,4 +653,4 @@ void *mwifiex_alloc_rx_buf(int rx_len, gfp_t flags)
 
        return skb;
 }
-EXPORT_SYMBOL_GPL(mwifiex_alloc_rx_buf);
+EXPORT_SYMBOL_GPL(mwifiex_alloc_dma_align_buf);
index 0cd4f6bed9fc4f6535bdb5e8e02578db13d5bb68..b2e99569a0f8b659b756179a7cec340de68952e1 100644 (file)
@@ -157,6 +157,8 @@ void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
 
                ra_list->is_11n_enabled = 0;
                ra_list->tdls_link = false;
+               ra_list->ba_status = BA_SETUP_NONE;
+               ra_list->amsdu_in_ampdu = false;
                if (!mwifiex_queuing_ra_based(priv)) {
                        if (mwifiex_get_tdls_link_status(priv, ra) ==
                            TDLS_SETUP_COMPLETE) {
@@ -574,7 +576,7 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
  * This function retrieves a particular RA list node, matching with the
  * given TID and RA address.
  */
-static struct mwifiex_ra_list_tbl *
+struct mwifiex_ra_list_tbl *
 mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
                            const u8 *ra_addr)
 {
@@ -942,14 +944,11 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
        struct mwifiex_ra_list_tbl *ptr;
        struct mwifiex_tid_tbl *tid_ptr;
        atomic_t *hqp;
-       unsigned long flags_bss, flags_ra;
+       unsigned long flags_ra;
        int i, j;
 
        /* check the BSS with highest priority first */
        for (j = adapter->priv_num - 1; j >= 0; --j) {
-               spin_lock_irqsave(&adapter->bss_prio_tbl[j].bss_prio_lock,
-                                 flags_bss);
-
                /* iterate over BSS with the equal priority */
                list_for_each_entry(adapter->bss_prio_tbl[j].bss_prio_cur,
                                    &adapter->bss_prio_tbl[j].bss_prio_head,
@@ -985,19 +984,15 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
                        }
                }
 
-               spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock,
-                                      flags_bss);
        }
 
        return NULL;
 
 found:
-       /* holds bss_prio_lock / ra_list_spinlock */
+       /* holds ra_list_spinlock */
        if (atomic_read(hqp) > i)
                atomic_set(hqp, i);
        spin_unlock_irqrestore(&priv_tmp->wmm.ra_list_spinlock, flags_ra);
-       spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock,
-                              flags_bss);
 
        *priv = priv_tmp;
        *tid = tos_to_tid[i];
@@ -1179,6 +1174,14 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
 
        skb = skb_dequeue(&ptr->skb_head);
 
+       if (adapter->data_sent || adapter->tx_lock_flag) {
+               spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
+                                      ra_list_flags);
+               skb_queue_tail(&adapter->tx_data_q, skb);
+               atomic_inc(&adapter->tx_queued);
+               return;
+       }
+
        if (!skb_queue_empty(&ptr->skb_head))
                skb_next = skb_peek(&ptr->skb_head);
        else
@@ -1276,13 +1279,13 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
        }
 
        if (!ptr->is_11n_enabled ||
-           mwifiex_is_ba_stream_setup(priv, ptr, tid) ||
-           priv->wps.session_enable) {
+               ptr->ba_status ||
+               priv->wps.session_enable) {
                if (ptr->is_11n_enabled &&
-                   mwifiex_is_ba_stream_setup(priv, ptr, tid) &&
-                   mwifiex_is_amsdu_in_ampdu_allowed(priv, ptr, tid) &&
-                   mwifiex_is_amsdu_allowed(priv, tid) &&
-                   mwifiex_is_11n_aggragation_possible(priv, ptr,
+                       ptr->ba_status &&
+                       ptr->amsdu_in_ampdu &&
+                       mwifiex_is_amsdu_allowed(priv, tid) &&
+                       mwifiex_is_11n_aggragation_possible(priv, ptr,
                                                        adapter->tx_buf_size))
                        mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
                        /* ra_list_spinlock has been freed in
@@ -1329,11 +1332,16 @@ void
 mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter)
 {
        do {
-               /* Check if busy */
-               if (adapter->data_sent || adapter->tx_lock_flag)
-                       break;
-
                if (mwifiex_dequeue_tx_packet(adapter))
                        break;
+               if (adapter->iface_type != MWIFIEX_SDIO) {
+                       if (adapter->data_sent ||
+                           adapter->tx_lock_flag)
+                               break;
+               } else {
+                       if (atomic_read(&adapter->tx_queued) >=
+                           MWIFIEX_MAX_PKTS_TXQ)
+                               break;
+               }
        } while (!mwifiex_wmm_lists_empty(adapter));
 }
index 569bd73f33c5f001f93241fe1c81b40e172230e0..48ece0b355919d3c3a4278dfc1727fc391f5848d 100644 (file)
@@ -127,4 +127,6 @@ mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
                            const u8 *ra_addr);
 u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid);
 
+struct mwifiex_ra_list_tbl *mwifiex_wmm_get_ralist_node(struct mwifiex_private
+                                       *priv, u8 tid, const u8 *ra_addr);
 #endif /* !_MWIFIEX_WMM_H_ */
index 8444313eabe2bbc1bdcb660c5a82bff17f3fdb90..6ec2466b52b6ccd686341e2ed53df35f0b0370e0 100644 (file)
@@ -233,6 +233,7 @@ static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev)
 {
        __le32 *reg;
        u32 fw_mode;
+       int ret;
 
        reg = kmalloc(sizeof(*reg), GFP_KERNEL);
        if (reg == NULL)
@@ -242,11 +243,14 @@ static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev)
         * magic value USB_MODE_AUTORUN (0x11) to the device, thus the
         * returned value would be invalid.
         */
-       rt2x00usb_vendor_request(rt2x00dev, USB_DEVICE_MODE,
-                                USB_VENDOR_REQUEST_IN, 0, USB_MODE_AUTORUN,
-                                reg, sizeof(*reg), REGISTER_TIMEOUT_FIRMWARE);
+       ret = rt2x00usb_vendor_request(rt2x00dev, USB_DEVICE_MODE,
+                                      USB_VENDOR_REQUEST_IN, 0,
+                                      USB_MODE_AUTORUN, reg, sizeof(*reg),
+                                      REGISTER_TIMEOUT_FIRMWARE);
        fw_mode = le32_to_cpu(*reg);
        kfree(reg);
+       if (ret < 0)
+               return ret;
 
        if ((fw_mode & 0x00000003) == 2)
                return 1;
@@ -289,6 +293,7 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
        if (retval) {
                rt2x00_info(rt2x00dev,
                            "Firmware loading not required - NIC in AutoRun mode\n");
+               __clear_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags);
        } else {
                rt2x00usb_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE,
                                              data + offset, length);
@@ -374,7 +379,6 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
 static void rt2800usb_disable_radio(struct rt2x00_dev *rt2x00dev)
 {
        rt2800_disable_radio(rt2x00dev);
-       rt2x00usb_disable_radio(rt2x00dev);
 }
 
 static int rt2800usb_set_state(struct rt2x00_dev *rt2x00dev,
@@ -1040,6 +1044,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
        { USB_DEVICE(0x07d1, 0x3c17) },
        { USB_DEVICE(0x2001, 0x3317) },
        { USB_DEVICE(0x2001, 0x3c1b) },
+       { USB_DEVICE(0x2001, 0x3c25) },
        /* Draytek */
        { USB_DEVICE(0x07fa, 0x7712) },
        /* DVICO */
index 8f85fbd5f237eff576e343009d1aeefe26a39f02..569363da00a2999fca8cac2d425347d759a62b08 100644 (file)
@@ -199,7 +199,7 @@ static inline void rt2x00usb_register_read(struct rt2x00_dev *rt2x00dev,
                                           const unsigned int offset,
                                           u32 *value)
 {
-       __le32 reg;
+       __le32 reg = 0;
        rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ,
                                      USB_VENDOR_REQUEST_IN, offset,
                                      &reg, sizeof(reg));
@@ -219,7 +219,7 @@ static inline void rt2x00usb_register_read_lock(struct rt2x00_dev *rt2x00dev,
                                                const unsigned int offset,
                                                u32 *value)
 {
-       __le32 reg;
+       __le32 reg = 0;
        rt2x00usb_vendor_req_buff_lock(rt2x00dev, USB_MULTI_READ,
                                       USB_VENDOR_REQUEST_IN, offset,
                                       &reg, sizeof(reg), REGISTER_TIMEOUT);
index 074f716020aae4e28d3e8340da846514065a94db..01f56c7df8b501f7b6390a1f2a12bd1a70168a26 100644 (file)
@@ -1315,7 +1315,8 @@ static void setup_arp_tx(struct rtl_priv *rtlpriv, struct rtl_ps_ctl *ppsc)
 }
 
 /*should call before software enc*/
-u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
+u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
+                      bool is_enc)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
@@ -1344,7 +1345,9 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
                break;
        }
 
-       offset = mac_hdr_len + SNAP_SIZE + encrypt_header_len;
+       offset = mac_hdr_len + SNAP_SIZE;
+       if (is_enc)
+               offset += encrypt_header_len;
        ether_type = be16_to_cpup((__be16 *)(skb->data + offset));
 
        if (ETH_P_IP == ether_type) {
index dee4ac2f27e2c372afc6b7111357e4cfbed207ae..74233d601a909b12b6eb33dcd1793db28efca3da 100644 (file)
@@ -120,10 +120,10 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
 int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht,
                         bool isvht, u8 desc_rate);
 bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
-u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
+u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
+                      bool is_enc);
 
 void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb);
-void rtl_watch_dog_timer_callback(unsigned long data);
 int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        struct ieee80211_sta *sta, u16 tid, u16 *ssn);
 int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
index a62170ea04818e37790eeec99fc2047e255b030f..f46c9d7f652813a46fe6d8bbe24a48ab814afd6f 100644 (file)
@@ -887,7 +887,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                                unicast = true;
                                rtlpriv->stats.rxbytesunicast += skb->len;
                        }
-                       rtl_is_special_data(hw, skb, false);
+                       rtl_is_special_data(hw, skb, false, true);
 
                        if (ieee80211_is_data(fc)) {
                                rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
@@ -1124,12 +1124,22 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
        /*This is for new trx flow*/
        struct rtl_tx_buffer_desc *pbuffer_desc = NULL;
        u8 temp_one = 1;
+       u8 *entry;
 
        memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
        ring = &rtlpci->tx_ring[BEACON_QUEUE];
        pskb = __skb_dequeue(&ring->queue);
-       if (pskb)
+       if (rtlpriv->use_new_trx_flow)
+               entry = (u8 *)(&ring->buffer_desc[ring->idx]);
+       else
+               entry = (u8 *)(&ring->desc[ring->idx]);
+       if (pskb) {
+               pci_unmap_single(rtlpci->pdev,
+                                rtlpriv->cfg->ops->get_desc(
+                                (u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
+                                pskb->len, PCI_DMA_TODEVICE);
                kfree_skb(pskb);
+       }
 
        /*NB: the beacon data buffer must be 32-bit aligned. */
        pskb = ieee80211_beacon_get(hw, mac->vif);
index 7863bd278b227f6c7dd5057405d64a042f2fe69a..74c14ce28238eed70f8ad798f0724642147411d3 100644 (file)
@@ -56,7 +56,8 @@ static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv,
                wireless_mode = sta_entry->wireless_mode;
        }
 
-       if (rtl_is_special_data(rtlpriv->mac80211.hw, skb, true) || not_data) {
+       if (rtl_is_special_data(rtlpriv->mac80211.hw, skb, true, false) ||
+           not_data) {
                return 0;
        } else {
                if (rtlhal->current_bandtype == BAND_ON_2_4G) {
@@ -201,7 +202,7 @@ static void rtl_tx_status(void *ppriv,
        if (!priv_sta || !ieee80211_is_data(fc))
                return;
 
-       if (rtl_is_special_data(mac->hw, skb, true))
+       if (rtl_is_special_data(mac->hw, skb, true, true))
                return;
 
        if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
index edc2cbb6253c9263bed3fd294301cd79ea69f32d..86ce5b1930e6d2824b66f7d5c95210c59fca6cb7 100644 (file)
@@ -30,6 +30,7 @@
 #include "../cam.h"
 #include "../ps.h"
 #include "../pci.h"
+#include "../pwrseqcmd.h"
 #include "reg.h"
 #include "def.h"
 #include "phy.h"
@@ -885,7 +886,7 @@ static bool _rtl88ee_init_mac(struct ieee80211_hw *hw)
 
        rtl_write_word(rtlpriv, REG_CR, 0x2ff);
        rtl_write_byte(rtlpriv, REG_CR+1, 0x06);
-       rtl_write_byte(rtlpriv, REG_CR+2, 0x00);
+       rtl_write_byte(rtlpriv, MSR, 0x00);
 
        if (!rtlhal->mac_func_enable) {
                if (_rtl88ee_llt_table_init(hw) == false) {
@@ -1277,7 +1278,7 @@ static int _rtl88ee_set_media_status(struct ieee80211_hw *hw,
                         mode);
        }
 
-       rtl_write_byte(rtlpriv, (MSR), bt_msr | mode);
+       rtl_write_byte(rtlpriv, MSR, bt_msr | mode);
        rtlpriv->cfg->ops->led_control(hw, ledaction);
        if (mode == MSR_AP)
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
index 0c20dd74d6ecd862043321f4e5e1b9d0e7ff3693..d310d55d800efd584f9dd9a6d8e143539c0c777c 100644 (file)
@@ -1364,7 +1364,7 @@ static int _rtl92cu_set_media_status(struct ieee80211_hw *hw,
                         "Network type %d not supported!\n", type);
                goto error_out;
        }
-       rtl_write_byte(rtlpriv, (MSR), bt_msr);
+       rtl_write_byte(rtlpriv, MSR, bt_msr);
        rtlpriv->cfg->ops->led_control(hw, ledaction);
        if ((bt_msr & MSR_MASK) == MSR_AP)
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
@@ -1471,8 +1471,7 @@ static void _InitBeaconParameters(struct ieee80211_hw *hw)
                rtl_write_word(rtlpriv, REG_BCNTCFG, 0x66FF);
 }
 
-static void _beacon_function_enable(struct ieee80211_hw *hw, bool Enable,
-                                   bool Linked)
+static void _beacon_function_enable(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
 
@@ -1517,7 +1516,7 @@ void rtl92cu_set_beacon_related_registers(struct ieee80211_hw *hw)
                rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x50);
                rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x50);
        }
-       _beacon_function_enable(hw, true, true);
+       _beacon_function_enable(hw);
 }
 
 void rtl92cu_set_beacon_interval(struct ieee80211_hw *hw)
index 133e395b7401fc80f286792441c72fbb7f56a397..adb810794eef71e27080cd9590a07911eba5123b 100644 (file)
@@ -497,7 +497,7 @@ int rtl92c_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
                         "Network type %d not supported!\n", type);
                return -EOPNOTSUPP;
        }
-       rtl_write_byte(rtlpriv, (REG_CR + 2), value);
+       rtl_write_byte(rtlpriv, MSR, value);
        return 0;
 }
 
index 90a714c189a8e5694fa0c84ec8868739b436135f..23806c243a53174db28aa1b0a99e5a01119206d7 100644 (file)
@@ -321,6 +321,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
        {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
        {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
+       {RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
        {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
        {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
        {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
@@ -377,6 +378,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
        {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
        {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
+       {RTL_USB_DEVICE(0x2001, 0x330d, rtl92cu_hal_cfg)}, /*D-Link DWA-131 */
        {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
        {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
        {RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/
index 01bcc2d218dc5d215a1f0e6125699a007d94b574..f49b60d314502d7565eea0badb7666b8e5e76a55 100644 (file)
@@ -1126,7 +1126,7 @@ static int _rtl92de_set_media_status(struct ieee80211_hw *hw,
                break;
 
        }
-       rtl_write_byte(rtlpriv, REG_CR + 2, bt_msr);
+       rtl_write_byte(rtlpriv, MSR, bt_msr);
        rtlpriv->cfg->ops->led_control(hw, ledaction);
        if ((bt_msr & MSR_MASK) == MSR_AP)
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
index db230a3f0137e0650f3e64d3ae87dfd20d6c5e87..da0a6125f314b7c582eefb5ea297fd2efd593ca0 100644 (file)
@@ -1510,7 +1510,7 @@ static int _rtl92ee_set_media_status(struct ieee80211_hw *hw,
                         mode);
        }
 
-       rtl_write_byte(rtlpriv, (MSR), bt_msr | mode);
+       rtl_write_byte(rtlpriv, MSR, bt_msr | mode);
        rtlpriv->cfg->ops->led_control(hw, ledaction);
        if (mode == MSR_AP)
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
index dee88a80bee136946523a37076b4bdbaee14cf44..12b0978ba4faf26161e57f34d61644149544fa9d 100644 (file)
@@ -1204,7 +1204,7 @@ static int _rtl92se_set_media_status(struct ieee80211_hw *hw,
        if (type != NL80211_IFTYPE_AP &&
            rtlpriv->mac80211.link_state < MAC80211_LINKED)
                bt_msr = rtl_read_byte(rtlpriv, MSR) & ~MSR_LINK_MASK;
-       rtl_write_byte(rtlpriv, (MSR), bt_msr);
+       rtl_write_byte(rtlpriv, MSR, bt_msr);
 
        temp = rtl_read_dword(rtlpriv, TCR);
        rtl_write_dword(rtlpriv, TCR, temp & (~BIT(8)));
index b3b094759f6dc1d76ea3db83392d64599262666e..67bb47d77b68c5d45bd6bb6dc0f0721c2461fa2c 100644 (file)
@@ -1183,7 +1183,7 @@ static int _rtl8723e_set_media_status(struct ieee80211_hw *hw,
                         mode);
        }
 
-       rtl_write_byte(rtlpriv, (MSR), bt_msr | mode);
+       rtl_write_byte(rtlpriv, MSR, bt_msr | mode);
        rtlpriv->cfg->ops->led_control(hw, ledaction);
        if (mode == MSR_AP)
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
index b46998341c409ea00cdb635f5132954c305b2a49..b681af3c7a355d66fb411c71dcdcbba5bb8dfc01 100644 (file)
@@ -1558,7 +1558,7 @@ static int _rtl8723be_set_media_status(struct ieee80211_hw *hw,
                         mode);
        }
 
-       rtl_write_byte(rtlpriv, (MSR), bt_msr | mode);
+       rtl_write_byte(rtlpriv, MSR, bt_msr | mode);
        rtlpriv->cfg->ops->led_control(hw, ledaction);
        if (mode == MSR_AP)
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
index 2a0a71bac00c84d331972fd27e234fafe70ec5f3..8704eee9f3a495108e93135d6e306a090c322bc7 100644 (file)
@@ -423,7 +423,7 @@ void rtl8821ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                *((u16 *)(val+4)) = rtl_read_word(rtlpriv, REG_BSSID+4);
                break;
        case HW_VAR_MEDIA_STATUS:
-               val[0] = rtl_read_byte(rtlpriv, REG_CR+2) & 0x3;
+               val[0] = rtl_read_byte(rtlpriv, MSR) & 0x3;
                break;
        case HW_VAR_SLOT_TIME:
                *((u8 *)(val)) = mac->slot_time;
@@ -2178,7 +2178,7 @@ static int _rtl8821ae_set_media_status(struct ieee80211_hw *hw,
                return 1;
        }
 
-       rtl_write_byte(rtlpriv, (MSR), bt_msr);
+       rtl_write_byte(rtlpriv, MSR, bt_msr);
        rtlpriv->cfg->ops->led_control(hw, ledaction);
        if ((bt_msr & 0xfc) == MSR_AP)
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
index 72af4b9ee32b3f24f350acf03b188710d02f254f..174743aef9431c16673ff69f4d1ff79e47ecf752 100644 (file)
@@ -64,6 +64,20 @@ static u16 odm_cfo(char value)
        return ret_val;
 }
 
+static u8 _rtl8821ae_evm_dbm_jaguar(char value)
+{
+       char ret_val = value;
+
+       /* -33dB~0dB to 33dB ~ 0dB*/
+       if (ret_val == -128)
+               ret_val = 127;
+       else if (ret_val < 0)
+               ret_val = 0 - ret_val;
+
+       ret_val  = ret_val >> 1;
+       return ret_val;
+}
+
 static void query_rxphystatus(struct ieee80211_hw *hw,
                              struct rtl_stats *pstatus, u8 *pdesc,
                              struct rx_fwinfo_8821ae *p_drvinfo,
@@ -246,7 +260,7 @@ static void query_rxphystatus(struct ieee80211_hw *hw,
 
                for (i = 0; i < max_spatial_stream; i++) {
                        evm = rtl_evm_db_to_percentage(p_phystrpt->rxevm[i]);
-                       evmdbm = rtl_evm_dbm_jaguar(p_phystrpt->rxevm[i]);
+                       evmdbm = _rtl8821ae_evm_dbm_jaguar(p_phystrpt->rxevm[i]);
 
                        if (bpacket_match_bssid) {
                                /* Fill value in RFD, Get the first
index 2d0736a09fc0e0a590724ff4415134018b3f8293..d8b30690b00de35aad2595a4e8d28952b9b2ffb4 100644 (file)
@@ -39,15 +39,8 @@ EXPORT_SYMBOL(rtl_query_rxpwrpercentage);
 
 u8 rtl_evm_db_to_percentage(char value)
 {
-       char ret_val;
-       ret_val = value;
+       char ret_val = clamp(-value, 0, 33) * 3;
 
-       if (ret_val >= 0)
-               ret_val = 0;
-       if (ret_val <= -33)
-               ret_val = -33;
-       ret_val = 0 - ret_val;
-       ret_val *= 3;
        if (ret_val == 99)
                ret_val = 100;
 
@@ -55,21 +48,6 @@ u8 rtl_evm_db_to_percentage(char value)
 }
 EXPORT_SYMBOL(rtl_evm_db_to_percentage);
 
-u8 rtl_evm_dbm_jaguar(char value)
-{
-       char ret_val = value;
-
-       /* -33dB~0dB to 33dB ~ 0dB*/
-       if (ret_val == -128)
-               ret_val = 127;
-       else if (ret_val < 0)
-               ret_val = 0 - ret_val;
-
-       ret_val  = ret_val >> 1;
-       return ret_val;
-}
-EXPORT_SYMBOL(rtl_evm_dbm_jaguar);
-
 static long rtl_translate_todbm(struct ieee80211_hw *hw,
                         u8 signal_strength_index)
 {
index aa4eec80ccf7a7588e3ec58f817172130fcc4a1d..2b57dffef572f019ac42d5d2465bb82e82bc29ce 100644 (file)
@@ -35,7 +35,6 @@
 
 u8 rtl_query_rxpwrpercentage(char antpower);
 u8 rtl_evm_db_to_percentage(char value);
-u8 rtl_evm_dbm_jaguar(char value);
 long rtl_signal_scale_mapping(struct ieee80211_hw *hw, long currsig);
 void rtl_process_phyinfo(struct ieee80211_hw *hw, u8 *buffer,
                         struct rtl_stats *pstatus);
index d1e9a13be910b584d5e17394822ba3c1ff2a9ba4..5d54d16a59e7151942246ff1d737af28c332fadc 100644 (file)
@@ -1608,7 +1608,7 @@ int wl1251_free_hw(struct wl1251 *wl)
 }
 EXPORT_SYMBOL_GPL(wl1251_free_hw);
 
-MODULE_DESCRIPTION("TI wl1251 Wireles LAN Driver Core");
+MODULE_DESCRIPTION("TI wl1251 Wireless LAN Driver Core");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>");
 MODULE_FIRMWARE(WL1251_FW_NAME);
index c93fae95baac87e1775714c122a32ab35bfbda3c..5fbd2230f372f2a17d763be90efa3228bca5bdbd 100644 (file)
@@ -139,7 +139,7 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, protection_filter, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, accum_arp_pend_requests, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u");
 
-WL18XX_DEBUGFS_FWSTATS_FILE(rx_rate, rx_frames_per_rates, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(rx_rate, rx_frames_per_rates, 50);
 
 WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate,
                                  AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE);
index 0f2cfb0d2a9ec38fe013872e6d4339c2db1345e3..bf14676e6515002b8b2982f507386e8999b356e9 100644 (file)
@@ -26,8 +26,8 @@
 
 #include "wlcore.h"
 
-int wl1271_format_buffer(char __user *userbuf, size_t count,
-                        loff_t *ppos, char *fmt, ...);
+__printf(4, 5) int wl1271_format_buffer(char __user *userbuf, size_t count,
+                                       loff_t *ppos, char *fmt, ...);
 
 int wl1271_debugfs_init(struct wl1271 *wl);
 void wl1271_debugfs_exit(struct wl1271 *wl);
index e9b960f0ff32c8af2ff404a138780ff751bf4572..720aaf6313d296bec9b9a4826f1240b0eb4c0940 100644 (file)
@@ -1008,8 +1008,7 @@ err:
 
 static int xennet_change_mtu(struct net_device *dev, int mtu)
 {
-       int max = xennet_can_sg(dev) ?
-               XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN;
+       int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
 
        if (mtu > max)
                return -EINVAL;
@@ -1279,8 +1278,6 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
        netdev->ethtool_ops = &xennet_ethtool_ops;
        SET_NETDEV_DEV(netdev, &dev->dev);
 
-       netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
-
        np->netdev = netdev;
 
        netif_carrier_off(netdev);
index 7929fac13e1c55ac072d46f6c2d3a6ea10528fa7..107714e4405fd348331c6f2ecdbfc18c43709d51 100644 (file)
@@ -73,4 +73,5 @@ source "drivers/nfc/microread/Kconfig"
 source "drivers/nfc/nfcmrvl/Kconfig"
 source "drivers/nfc/st21nfca/Kconfig"
 source "drivers/nfc/st21nfcb/Kconfig"
+source "drivers/nfc/nxp-nci/Kconfig"
 endmenu
index 6b23a2c6e34adf7c1ed35e0aa7e40aa2e9a7cc66..a4292d790f9b900130462d7f4bf7b3f64e3dec7b 100644 (file)
@@ -13,5 +13,6 @@ obj-$(CONFIG_NFC_MRVL)                += nfcmrvl/
 obj-$(CONFIG_NFC_TRF7970A)     += trf7970a.o
 obj-$(CONFIG_NFC_ST21NFCA)     += st21nfca/
 obj-$(CONFIG_NFC_ST21NFCB)     += st21nfcb/
+obj-$(CONFIG_NFC_NXP_NCI)      += nxp-nci/
 
 ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
index df85cd3d9db0c76e1d80ebbf3bcc204f3e3f8a0c..661e2c8143c40a8e4ab6e98b76a007eafa9a6fd4 100644 (file)
@@ -286,7 +286,7 @@ static int microread_i2c_probe(struct i2c_client *client,
        if (r < 0)
                goto err_irq;
 
-       nfc_info(&client->dev, "Probed");
+       nfc_info(&client->dev, "Probed\n");
 
        return 0;
 
index 85e8bcf986936123bdab959e5b01fb8738d80e79..ad4933cefbd1b2c60ae5ef23aeece275d2fcf0fd 100644 (file)
@@ -111,7 +111,7 @@ struct nfcmrvl_private *nfcmrvl_nci_register_dev(void *drv_data,
 
        priv->ndev = nci_allocate_device(&nfcmrvl_nci_ops, protocols, 0, 0);
        if (!priv->ndev) {
-               nfc_err(dev, "nci_allocate_device failed");
+               nfc_err(dev, "nci_allocate_device failed\n");
                rc = -ENOMEM;
                goto error;
        }
@@ -120,7 +120,7 @@ struct nfcmrvl_private *nfcmrvl_nci_register_dev(void *drv_data,
 
        rc = nci_register_device(priv->ndev);
        if (rc) {
-               nfc_err(dev, "nci_register_device failed %d", rc);
+               nfc_err(dev, "nci_register_device failed %d\n", rc);
                nci_free_device(priv->ndev);
                goto error;
        }
index 3221ca37d6c9463700af6ed4f5a25fe7d4825379..6cf15c1a261831dadc5194bdc3615c7362c6ce0d 100644 (file)
@@ -80,7 +80,7 @@ static void nfcmrvl_bulk_complete(struct urb *urb)
        if (!urb->status) {
                if (nfcmrvl_nci_recv_frame(drv_data->priv, urb->transfer_buffer,
                                           urb->actual_length) < 0)
-                       nfc_err(&drv_data->udev->dev, "corrupted Rx packet");
+                       nfc_err(&drv_data->udev->dev, "corrupted Rx packet\n");
        }
 
        if (!test_bit(NFCMRVL_USB_BULK_RUNNING, &drv_data->flags))
@@ -96,7 +96,7 @@ static void nfcmrvl_bulk_complete(struct urb *urb)
                 */
                if (err != -EPERM && err != -ENODEV)
                        nfc_err(&drv_data->udev->dev,
-                               "urb %p failed to resubmit (%d)", urb, -err);
+                               "urb %p failed to resubmit (%d)\n", urb, -err);
                usb_unanchor_urb(urb);
        }
 }
@@ -137,7 +137,7 @@ nfcmrvl_submit_bulk_urb(struct nfcmrvl_usb_drv_data *drv_data, gfp_t mem_flags)
        if (err) {
                if (err != -EPERM && err != -ENODEV)
                        nfc_err(&drv_data->udev->dev,
-                               "urb %p submission failed (%d)", urb, -err);
+                               "urb %p submission failed (%d)\n", urb, -err);
                usb_unanchor_urb(urb);
        }
 
@@ -153,7 +153,7 @@ static void nfcmrvl_tx_complete(struct urb *urb)
        struct nfcmrvl_private *priv = nci_get_drvdata(ndev);
        struct nfcmrvl_usb_drv_data *drv_data = priv->drv_data;
 
-       nfc_info(priv->dev, "urb %p status %d count %d",
+       nfc_info(priv->dev, "urb %p status %d count %d\n",
                 urb, urb->status, urb->actual_length);
 
        spin_lock(&drv_data->txlock);
@@ -253,7 +253,7 @@ static int nfcmrvl_usb_nci_send(struct nfcmrvl_private *priv,
        if (err) {
                if (err != -EPERM && err != -ENODEV)
                        nfc_err(&drv_data->udev->dev,
-                               "urb %p submission failed (%d)", urb, -err);
+                               "urb %p submission failed (%d)\n", urb, -err);
                kfree(urb->setup_packet);
                usb_unanchor_urb(urb);
        } else {
@@ -293,7 +293,7 @@ static int nfcmrvl_probe(struct usb_interface *intf,
        int i;
        struct usb_device *udev = interface_to_usbdev(intf);
 
-       nfc_info(&udev->dev, "intf %p id %p", intf, id);
+       nfc_info(&udev->dev, "intf %p id %p\n", intf, id);
 
        drv_data = devm_kzalloc(&intf->dev, sizeof(*drv_data), GFP_KERNEL);
        if (!drv_data)
@@ -348,7 +348,7 @@ static void nfcmrvl_disconnect(struct usb_interface *intf)
        if (!drv_data)
                return;
 
-       nfc_info(&drv_data->udev->dev, "intf %p", intf);
+       nfc_info(&drv_data->udev->dev, "intf %p\n", intf);
 
        nfcmrvl_nci_unregister_dev(drv_data->priv);
 
@@ -360,7 +360,7 @@ static int nfcmrvl_suspend(struct usb_interface *intf, pm_message_t message)
 {
        struct nfcmrvl_usb_drv_data *drv_data = usb_get_intfdata(intf);
 
-       nfc_info(&drv_data->udev->dev, "intf %p", intf);
+       nfc_info(&drv_data->udev->dev, "intf %p\n", intf);
 
        if (drv_data->suspend_count++)
                return 0;
@@ -401,7 +401,7 @@ static int nfcmrvl_resume(struct usb_interface *intf)
        struct nfcmrvl_usb_drv_data *drv_data = usb_get_intfdata(intf);
        int err = 0;
 
-       nfc_info(&drv_data->udev->dev, "intf %p", intf);
+       nfc_info(&drv_data->udev->dev, "intf %p\n", intf);
 
        if (--drv_data->suspend_count)
                return 0;
diff --git a/drivers/nfc/nxp-nci/Kconfig b/drivers/nfc/nxp-nci/Kconfig
new file mode 100644 (file)
index 0000000..37b4061
--- /dev/null
@@ -0,0 +1,25 @@
+config NFC_NXP_NCI
+       tristate "NXP-NCI NFC driver"
+       depends on NFC_NCI
+       default n
+       ---help---
+         Generic core driver for NXP NCI chips such as the NPC100
+         or PN7150 families.
+         This is a driver based on the NCI NFC kernel layers and
+         will thus not work with NXP libnfc library.
+
+         To compile this driver as a module, choose m here. The module will
+         be called nxp_nci.
+         Say N if unsure.
+
+config NFC_NXP_NCI_I2C
+       tristate "NXP-NCI I2C support"
+       depends on NFC_NXP_NCI && I2C
+       ---help---
+         This module adds support for an I2C interface to the NXP NCI
+         chips.
+         Select this if your platform is using the I2C bus.
+
+         To compile this driver as a module, choose m here. The module will
+         be called nxp_nci_i2c.
+         Say Y if unsure.
diff --git a/drivers/nfc/nxp-nci/Makefile b/drivers/nfc/nxp-nci/Makefile
new file mode 100644 (file)
index 0000000..c008be3
--- /dev/null
@@ -0,0 +1,11 @@
+#
+# Makefile for NXP-NCI NFC driver
+#
+
+nxp-nci-objs = core.o firmware.o
+nxp-nci_i2c-objs = i2c.o
+
+obj-$(CONFIG_NFC_NXP_NCI) += nxp-nci.o
+obj-$(CONFIG_NFC_NXP_NCI_I2C) += nxp-nci_i2c.o
+
+ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
diff --git a/drivers/nfc/nxp-nci/core.c b/drivers/nfc/nxp-nci/core.c
new file mode 100644 (file)
index 0000000..8979636
--- /dev/null
@@ -0,0 +1,186 @@
+/*
+ * Generic driver for NXP NCI NFC chips
+ *
+ * Copyright (C) 2014  NXP Semiconductors  All rights reserved.
+ *
+ * Authors: Clément Perrochaud <clement.perrochaud@nxp.com>
+ *
+ * Derived from PN544 device driver:
+ * Copyright (C) 2012  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/nfc.h>
+#include <linux/platform_data/nxp-nci.h>
+
+#include <net/nfc/nci_core.h>
+
+#include "nxp-nci.h"
+
+#define NXP_NCI_HDR_LEN        4
+
+#define NXP_NCI_NFC_PROTOCOLS (NFC_PROTO_JEWEL_MASK | \
+                              NFC_PROTO_MIFARE_MASK | \
+                              NFC_PROTO_FELICA_MASK | \
+                              NFC_PROTO_ISO14443_MASK | \
+                              NFC_PROTO_ISO14443_B_MASK | \
+                              NFC_PROTO_NFC_DEP_MASK)
+
+static int nxp_nci_open(struct nci_dev *ndev)
+{
+       struct nxp_nci_info *info = nci_get_drvdata(ndev);
+       int r = 0;
+
+       mutex_lock(&info->info_lock);
+
+       if (info->mode != NXP_NCI_MODE_COLD) {
+               r = -EBUSY;
+               goto open_exit;
+       }
+
+       if (info->phy_ops->set_mode)
+               r = info->phy_ops->set_mode(info->phy_id, NXP_NCI_MODE_NCI);
+
+       info->mode = NXP_NCI_MODE_NCI;
+
+open_exit:
+       mutex_unlock(&info->info_lock);
+       return r;
+}
+
+static int nxp_nci_close(struct nci_dev *ndev)
+{
+       struct nxp_nci_info *info = nci_get_drvdata(ndev);
+       int r = 0;
+
+       mutex_lock(&info->info_lock);
+
+       if (info->phy_ops->set_mode)
+               r = info->phy_ops->set_mode(info->phy_id, NXP_NCI_MODE_COLD);
+
+       info->mode = NXP_NCI_MODE_COLD;
+
+       mutex_unlock(&info->info_lock);
+       return r;
+}
+
+static int nxp_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
+{
+       struct nxp_nci_info *info = nci_get_drvdata(ndev);
+       int r;
+
+       if (!info->phy_ops->write) {
+               r = -ENOTSUPP;
+               goto send_exit;
+       }
+
+       if (info->mode != NXP_NCI_MODE_NCI) {
+               r = -EINVAL;
+               goto send_exit;
+       }
+
+       r = info->phy_ops->write(info->phy_id, skb);
+       if (r < 0)
+               kfree_skb(skb);
+
+send_exit:
+       return r;
+}
+
+static struct nci_ops nxp_nci_ops = {
+       .open = nxp_nci_open,
+       .close = nxp_nci_close,
+       .send = nxp_nci_send,
+       .fw_download = nxp_nci_fw_download,
+};
+
+int nxp_nci_probe(void *phy_id, struct device *pdev,
+                 struct nxp_nci_phy_ops *phy_ops, unsigned int max_payload,
+                 struct nci_dev **ndev)
+{
+       struct nxp_nci_info *info;
+       int r;
+
+       info = devm_kzalloc(pdev, sizeof(struct nxp_nci_info), GFP_KERNEL);
+       if (!info) {
+               r = -ENOMEM;
+               goto probe_exit;
+       }
+
+       info->phy_id = phy_id;
+       info->pdev = pdev;
+       info->phy_ops = phy_ops;
+       info->max_payload = max_payload;
+       INIT_WORK(&info->fw_info.work, nxp_nci_fw_work);
+       init_completion(&info->fw_info.cmd_completion);
+       mutex_init(&info->info_lock);
+
+       if (info->phy_ops->set_mode) {
+               r = info->phy_ops->set_mode(info->phy_id, NXP_NCI_MODE_COLD);
+               if (r < 0)
+                       goto probe_exit;
+       }
+
+       info->mode = NXP_NCI_MODE_COLD;
+
+       info->ndev = nci_allocate_device(&nxp_nci_ops, NXP_NCI_NFC_PROTOCOLS,
+                                        NXP_NCI_HDR_LEN, 0);
+       if (!info->ndev) {
+               r = -ENOMEM;
+               goto probe_exit;
+       }
+
+       nci_set_parent_dev(info->ndev, pdev);
+       nci_set_drvdata(info->ndev, info);
+       r = nci_register_device(info->ndev);
+       if (r < 0)
+               goto probe_exit_free_nci;
+
+       *ndev = info->ndev;
+
+       goto probe_exit;
+
+probe_exit_free_nci:
+       nci_free_device(info->ndev);
+probe_exit:
+       return r;
+}
+EXPORT_SYMBOL(nxp_nci_probe);
+
+void nxp_nci_remove(struct nci_dev *ndev)
+{
+       struct nxp_nci_info *info = nci_get_drvdata(ndev);
+
+       if (info->mode == NXP_NCI_MODE_FW)
+               nxp_nci_fw_work_complete(info, -ESHUTDOWN);
+       cancel_work_sync(&info->fw_info.work);
+
+       mutex_lock(&info->info_lock);
+
+       if (info->phy_ops->set_mode)
+               info->phy_ops->set_mode(info->phy_id, NXP_NCI_MODE_COLD);
+
+       nci_unregister_device(ndev);
+       nci_free_device(ndev);
+
+       mutex_unlock(&info->info_lock);
+}
+EXPORT_SYMBOL(nxp_nci_remove);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("NXP NCI NFC driver");
+MODULE_AUTHOR("Clément Perrochaud <clement.perrochaud@nxp.com>");
diff --git a/drivers/nfc/nxp-nci/firmware.c b/drivers/nfc/nxp-nci/firmware.c
new file mode 100644 (file)
index 0000000..5291797
--- /dev/null
@@ -0,0 +1,325 @@
+/*
+ * Generic driver for NXP NCI NFC chips
+ *
+ * Copyright (C) 2014  NXP Semiconductors  All rights reserved.
+ *
+ * Author: Clément Perrochaud <clement.perrochaud@nxp.com>
+ *
+ * Derived from PN544 device driver:
+ * Copyright (C) 2012  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/completion.h>
+#include <linux/firmware.h>
+#include <linux/nfc.h>
+#include <linux/unaligned/access_ok.h>
+
+#include "nxp-nci.h"
+
+/* Crypto operations can take up to 30 seconds */
+#define NXP_NCI_FW_ANSWER_TIMEOUT      msecs_to_jiffies(30000)
+
+#define NXP_NCI_FW_CMD_RESET           0xF0
+#define NXP_NCI_FW_CMD_GETVERSION      0xF1
+#define NXP_NCI_FW_CMD_CHECKINTEGRITY  0xE0
+#define NXP_NCI_FW_CMD_WRITE           0xC0
+#define NXP_NCI_FW_CMD_READ            0xA2
+#define NXP_NCI_FW_CMD_GETSESSIONSTATE 0xF2
+#define NXP_NCI_FW_CMD_LOG             0xA7
+#define NXP_NCI_FW_CMD_FORCE           0xD0
+#define NXP_NCI_FW_CMD_GET_DIE_ID      0xF4
+
+#define NXP_NCI_FW_CHUNK_FLAG  0x0400
+
+#define NXP_NCI_FW_RESULT_OK                           0x00
+#define NXP_NCI_FW_RESULT_INVALID_ADDR                 0x01
+#define NXP_NCI_FW_RESULT_GENERIC_ERROR                        0x02
+#define NXP_NCI_FW_RESULT_UNKNOWN_CMD                  0x0B
+#define NXP_NCI_FW_RESULT_ABORTED_CMD                  0x0C
+#define NXP_NCI_FW_RESULT_PLL_ERROR                    0x0D
+#define NXP_NCI_FW_RESULT_ADDR_RANGE_OFL_ERROR         0x1E
+#define NXP_NCI_FW_RESULT_BUFFER_OFL_ERROR             0x1F
+#define NXP_NCI_FW_RESULT_MEM_BSY                      0x20
+#define NXP_NCI_FW_RESULT_SIGNATURE_ERROR              0x21
+#define NXP_NCI_FW_RESULT_FIRMWARE_VERSION_ERROR       0x24
+#define NXP_NCI_FW_RESULT_PROTOCOL_ERROR               0x28
+#define NXP_NCI_FW_RESULT_SFWU_DEGRADED                        0x2A
+#define NXP_NCI_FW_RESULT_PH_STATUS_FIRST_CHUNK                0x2D
+#define NXP_NCI_FW_RESULT_PH_STATUS_NEXT_CHUNK         0x2E
+#define NXP_NCI_FW_RESULT_PH_STATUS_INTERNAL_ERROR_5   0xC5
+
+void nxp_nci_fw_work_complete(struct nxp_nci_info *info, int result)
+{
+       struct nxp_nci_fw_info *fw_info = &info->fw_info;
+       int r;
+
+       if (info->phy_ops->set_mode) {
+               r = info->phy_ops->set_mode(info->phy_id, NXP_NCI_MODE_COLD);
+               if (r < 0 && result == 0)
+                       result = -r;
+       }
+
+       info->mode = NXP_NCI_MODE_COLD;
+
+       if (fw_info->fw) {
+               release_firmware(fw_info->fw);
+               fw_info->fw = NULL;
+       }
+
+       nfc_fw_download_done(info->ndev->nfc_dev, fw_info->name, (u32) -result);
+}
+
+/* crc_ccitt cannot be used since it is computed MSB first and not LSB first */
+static u16 nxp_nci_fw_crc(u8 const *buffer, size_t len)
+{
+       u16 crc = 0xffff;
+
+       while (len--) {
+               crc = ((crc >> 8) | (crc << 8)) ^ *buffer++;
+               crc ^= (crc & 0xff) >> 4;
+               crc ^= (crc & 0xff) << 12;
+               crc ^= (crc & 0xff) << 5;
+       }
+
+       return crc;
+}
+
+static int nxp_nci_fw_send_chunk(struct nxp_nci_info *info)
+{
+       struct nxp_nci_fw_info *fw_info = &info->fw_info;
+       u16 header, crc;
+       struct sk_buff *skb;
+       size_t chunk_len;
+       size_t remaining_len;
+       int r;
+
+       skb = nci_skb_alloc(info->ndev, info->max_payload, GFP_KERNEL);
+       if (!skb) {
+               r = -ENOMEM;
+               goto chunk_exit;
+       }
+
+       chunk_len = info->max_payload - NXP_NCI_FW_HDR_LEN - NXP_NCI_FW_CRC_LEN;
+       remaining_len = fw_info->frame_size - fw_info->written;
+
+       if (remaining_len > chunk_len) {
+               header = NXP_NCI_FW_CHUNK_FLAG;
+       } else {
+               chunk_len = remaining_len;
+               header = 0x0000;
+       }
+
+       header |= chunk_len & NXP_NCI_FW_FRAME_LEN_MASK;
+       put_unaligned_be16(header, skb_put(skb, NXP_NCI_FW_HDR_LEN));
+
+       memcpy(skb_put(skb, chunk_len), fw_info->data + fw_info->written,
+              chunk_len);
+
+       crc = nxp_nci_fw_crc(skb->data, chunk_len + NXP_NCI_FW_HDR_LEN);
+       put_unaligned_be16(crc, skb_put(skb, NXP_NCI_FW_CRC_LEN));
+
+       r = info->phy_ops->write(info->phy_id, skb);
+       if (r >= 0)
+               r = chunk_len;
+
+       kfree_skb(skb);
+
+chunk_exit:
+       return r;
+}
+
+static int nxp_nci_fw_send(struct nxp_nci_info *info)
+{
+       struct nxp_nci_fw_info *fw_info = &info->fw_info;
+       long completion_rc;
+       int r;
+
+       reinit_completion(&fw_info->cmd_completion);
+
+       if (fw_info->written == 0) {
+               fw_info->frame_size = get_unaligned_be16(fw_info->data) &
+                                     NXP_NCI_FW_FRAME_LEN_MASK;
+               fw_info->data += NXP_NCI_FW_HDR_LEN;
+               fw_info->size -= NXP_NCI_FW_HDR_LEN;
+       }
+
+       if (fw_info->frame_size > fw_info->size)
+               return -EMSGSIZE;
+
+       r = nxp_nci_fw_send_chunk(info);
+       if (r < 0)
+               return r;
+
+       fw_info->written += r;
+
+       if (*fw_info->data == NXP_NCI_FW_CMD_RESET) {
+               fw_info->cmd_result = 0;
+               if (fw_info->fw)
+                       schedule_work(&fw_info->work);
+       } else {
+               completion_rc = wait_for_completion_interruptible_timeout(
+                       &fw_info->cmd_completion, NXP_NCI_FW_ANSWER_TIMEOUT);
+               if (completion_rc == 0)
+                       return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+void nxp_nci_fw_work(struct work_struct *work)
+{
+       struct nxp_nci_info *info;
+       struct nxp_nci_fw_info *fw_info;
+       int r;
+
+       fw_info = container_of(work, struct nxp_nci_fw_info, work);
+       info = container_of(fw_info, struct nxp_nci_info, fw_info);
+
+       mutex_lock(&info->info_lock);
+
+       r = fw_info->cmd_result;
+       if (r < 0)
+               goto exit_work;
+
+       if (fw_info->written == fw_info->frame_size) {
+               fw_info->data += fw_info->frame_size;
+               fw_info->size -= fw_info->frame_size;
+               fw_info->written = 0;
+       }
+
+       if (fw_info->size > 0)
+               r = nxp_nci_fw_send(info);
+
+exit_work:
+       if (r < 0 || fw_info->size == 0)
+               nxp_nci_fw_work_complete(info, r);
+       mutex_unlock(&info->info_lock);
+}
+
+int nxp_nci_fw_download(struct nci_dev *ndev, const char *firmware_name)
+{
+       struct nxp_nci_info *info = nci_get_drvdata(ndev);
+       struct nxp_nci_fw_info *fw_info = &info->fw_info;
+       int r;
+
+       mutex_lock(&info->info_lock);
+
+       if (!info->phy_ops->set_mode || !info->phy_ops->write) {
+               r = -ENOTSUPP;
+               goto fw_download_exit;
+       }
+
+       if (!firmware_name || firmware_name[0] == '\0') {
+               r = -EINVAL;
+               goto fw_download_exit;
+       }
+
+       strcpy(fw_info->name, firmware_name);
+
+       r = request_firmware(&fw_info->fw, firmware_name,
+                            ndev->nfc_dev->dev.parent);
+       if (r < 0)
+               goto fw_download_exit;
+
+       r = info->phy_ops->set_mode(info->phy_id, NXP_NCI_MODE_FW);
+       if (r < 0) {
+               release_firmware(fw_info->fw);
+               goto fw_download_exit;
+       }
+
+       info->mode = NXP_NCI_MODE_FW;
+
+       fw_info->data = fw_info->fw->data;
+       fw_info->size = fw_info->fw->size;
+       fw_info->written = 0;
+       fw_info->frame_size = 0;
+       fw_info->cmd_result = 0;
+
+       schedule_work(&fw_info->work);
+
+fw_download_exit:
+       mutex_unlock(&info->info_lock);
+       return r;
+}
+
+static int nxp_nci_fw_read_status(u8 stat)
+{
+       switch (stat) {
+       case NXP_NCI_FW_RESULT_OK:
+               return 0;
+       case NXP_NCI_FW_RESULT_INVALID_ADDR:
+               return -EINVAL;
+       case NXP_NCI_FW_RESULT_UNKNOWN_CMD:
+               return -EINVAL;
+       case NXP_NCI_FW_RESULT_ABORTED_CMD:
+               return -EMSGSIZE;
+       case NXP_NCI_FW_RESULT_ADDR_RANGE_OFL_ERROR:
+               return -EADDRNOTAVAIL;
+       case NXP_NCI_FW_RESULT_BUFFER_OFL_ERROR:
+               return -ENOBUFS;
+       case NXP_NCI_FW_RESULT_MEM_BSY:
+               return -ENOKEY;
+       case NXP_NCI_FW_RESULT_SIGNATURE_ERROR:
+               return -EKEYREJECTED;
+       case NXP_NCI_FW_RESULT_FIRMWARE_VERSION_ERROR:
+               return -EALREADY;
+       case NXP_NCI_FW_RESULT_PROTOCOL_ERROR:
+               return -EPROTO;
+       case NXP_NCI_FW_RESULT_SFWU_DEGRADED:
+               return -EHWPOISON;
+       case NXP_NCI_FW_RESULT_PH_STATUS_FIRST_CHUNK:
+               return 0;
+       case NXP_NCI_FW_RESULT_PH_STATUS_NEXT_CHUNK:
+               return 0;
+       case NXP_NCI_FW_RESULT_PH_STATUS_INTERNAL_ERROR_5:
+               return -EINVAL;
+       default:
+               return -EIO;
+       }
+}
+
+static u16 nxp_nci_fw_check_crc(struct sk_buff *skb)
+{
+       u16 crc, frame_crc;
+       size_t len = skb->len - NXP_NCI_FW_CRC_LEN;
+
+       crc = nxp_nci_fw_crc(skb->data, len);
+       frame_crc = get_unaligned_be16(skb->data + len);
+
+       return (crc ^ frame_crc);
+}
+
+void nxp_nci_fw_recv_frame(struct nci_dev *ndev, struct sk_buff *skb)
+{
+       struct nxp_nci_info *info = nci_get_drvdata(ndev);
+       struct nxp_nci_fw_info *fw_info = &info->fw_info;
+
+       complete(&fw_info->cmd_completion);
+
+       if (skb) {
+               if (nxp_nci_fw_check_crc(skb) != 0x00)
+                       fw_info->cmd_result = -EBADMSG;
+               else
+                       fw_info->cmd_result = nxp_nci_fw_read_status(
+                                       *skb_pull(skb, NXP_NCI_FW_HDR_LEN));
+               kfree_skb(skb);
+       } else {
+               fw_info->cmd_result = -EIO;
+       }
+
+       if (fw_info->fw)
+               schedule_work(&fw_info->work);
+}
+EXPORT_SYMBOL(nxp_nci_fw_recv_frame);
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
new file mode 100644 (file)
index 0000000..17bd67d
--- /dev/null
@@ -0,0 +1,415 @@
+/*
+ * I2C link layer for the NXP NCI driver
+ *
+ * Copyright (C) 2014  NXP Semiconductors  All rights reserved.
+ *
+ * Authors: Clément Perrochaud <clement.perrochaud@nxp.com>
+ *
+ * Derived from PN544 device driver:
+ * Copyright (C) 2012  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/nfc.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/platform_data/nxp-nci.h>
+#include <linux/unaligned/access_ok.h>
+
+#include <net/nfc/nfc.h>
+
+#include "nxp-nci.h"
+
+#define NXP_NCI_I2C_DRIVER_NAME        "nxp-nci_i2c"
+
+#define NXP_NCI_I2C_MAX_PAYLOAD        32
+
+struct nxp_nci_i2c_phy {
+       struct i2c_client *i2c_dev;
+       struct nci_dev *ndev;
+
+       unsigned int gpio_en;
+       unsigned int gpio_fw;
+
+       int hard_fault; /*
+                        * < 0 if hardware error occurred (e.g. i2c err)
+                        * and prevents normal operation.
+                        */
+};
+
+static int nxp_nci_i2c_set_mode(void *phy_id,
+                                   enum nxp_nci_mode mode)
+{
+       struct nxp_nci_i2c_phy *phy = (struct nxp_nci_i2c_phy *) phy_id;
+
+       gpio_set_value(phy->gpio_fw, (mode == NXP_NCI_MODE_FW) ? 1 : 0);
+       gpio_set_value(phy->gpio_en, (mode != NXP_NCI_MODE_COLD) ? 1 : 0);
+       usleep_range(10000, 15000);
+
+       if (mode == NXP_NCI_MODE_COLD)
+               phy->hard_fault = 0;
+
+       return 0;
+}
+
+static int nxp_nci_i2c_write(void *phy_id, struct sk_buff *skb)
+{
+       int r;
+       struct nxp_nci_i2c_phy *phy = phy_id;
+       struct i2c_client *client = phy->i2c_dev;
+
+       if (phy->hard_fault != 0)
+               return phy->hard_fault;
+
+       r = i2c_master_send(client, skb->data, skb->len);
+       if (r == -EREMOTEIO) {
+               /* Retry, chip was in standby */
+               usleep_range(110000, 120000);
+               r = i2c_master_send(client, skb->data, skb->len);
+       }
+
+       if (r < 0) {
+               nfc_err(&client->dev, "Error %d on I2C send\n", r);
+       } else if (r != skb->len) {
+               nfc_err(&client->dev,
+                       "Invalid length sent: %u (expected %u)\n",
+                       r, skb->len);
+               r = -EREMOTEIO;
+       } else {
+               /* Success but return 0 and not number of bytes */
+               r = 0;
+       }
+
+       return r;
+}
+
+static struct nxp_nci_phy_ops i2c_phy_ops = {
+       .set_mode = nxp_nci_i2c_set_mode,
+       .write = nxp_nci_i2c_write,
+};
+
+static int nxp_nci_i2c_fw_read(struct nxp_nci_i2c_phy *phy,
+                              struct sk_buff **skb)
+{
+       struct i2c_client *client = phy->i2c_dev;
+       u16 header;
+       size_t frame_len;
+       int r;
+
+       r = i2c_master_recv(client, (u8 *) &header, NXP_NCI_FW_HDR_LEN);
+       if (r < 0) {
+               goto fw_read_exit;
+       } else if (r != NXP_NCI_FW_HDR_LEN) {
+               nfc_err(&client->dev, "Incorrect header length: %u\n", r);
+               r = -EBADMSG;
+               goto fw_read_exit;
+       }
+
+       frame_len = (get_unaligned_be16(&header) & NXP_NCI_FW_FRAME_LEN_MASK) +
+                   NXP_NCI_FW_CRC_LEN;
+
+       *skb = alloc_skb(NXP_NCI_FW_HDR_LEN + frame_len, GFP_KERNEL);
+       if (*skb == NULL) {
+               r = -ENOMEM;
+               goto fw_read_exit;
+       }
+
+       memcpy(skb_put(*skb, NXP_NCI_FW_HDR_LEN), &header, NXP_NCI_FW_HDR_LEN);
+
+       r = i2c_master_recv(client, skb_put(*skb, frame_len), frame_len);
+       if (r != frame_len) {
+               nfc_err(&client->dev,
+                       "Invalid frame length: %u (expected %zu)\n",
+                       r, frame_len);
+               r = -EBADMSG;
+               goto fw_read_exit_free_skb;
+       }
+
+       return 0;
+
+fw_read_exit_free_skb:
+       kfree_skb(*skb);
+fw_read_exit:
+       return r;
+}
+
+static int nxp_nci_i2c_nci_read(struct nxp_nci_i2c_phy *phy,
+                               struct sk_buff **skb)
+{
+       struct nci_ctrl_hdr header; /* May actually be a data header */
+       struct i2c_client *client = phy->i2c_dev;
+       int r;
+
+       r = i2c_master_recv(client, (u8 *) &header, NCI_CTRL_HDR_SIZE);
+       if (r < 0) {
+               goto nci_read_exit;
+       } else if (r != NCI_CTRL_HDR_SIZE) {
+               nfc_err(&client->dev, "Incorrect header length: %u\n", r);
+               r = -EBADMSG;
+               goto nci_read_exit;
+       }
+
+       *skb = alloc_skb(NCI_CTRL_HDR_SIZE + header.plen, GFP_KERNEL);
+       if (*skb == NULL) {
+               r = -ENOMEM;
+               goto nci_read_exit;
+       }
+
+       memcpy(skb_put(*skb, NCI_CTRL_HDR_SIZE), (void *) &header,
+              NCI_CTRL_HDR_SIZE);
+
+       r = i2c_master_recv(client, skb_put(*skb, header.plen), header.plen);
+       if (r != header.plen) {
+               nfc_err(&client->dev,
+                       "Invalid frame payload length: %u (expected %u)\n",
+                       r, header.plen);
+               r = -EBADMSG;
+               goto nci_read_exit_free_skb;
+       }
+
+       return 0;
+
+nci_read_exit_free_skb:
+       kfree_skb(*skb);
+nci_read_exit:
+       return r;
+}
+
+static irqreturn_t nxp_nci_i2c_irq_thread_fn(int irq, void *phy_id)
+{
+       struct nxp_nci_i2c_phy *phy = phy_id;
+       struct i2c_client *client;
+       struct nxp_nci_info *info;
+
+       struct sk_buff *skb = NULL;
+       int r = 0;
+
+       if (!phy || !phy->ndev)
+               goto exit_irq_none;
+
+       client = phy->i2c_dev;
+
+       if (!client || irq != client->irq)
+               goto exit_irq_none;
+
+       info = nci_get_drvdata(phy->ndev);
+
+       if (!info)
+               goto exit_irq_none;
+
+       mutex_lock(&info->info_lock);
+
+       if (phy->hard_fault != 0)
+               goto exit_irq_handled;
+
+       switch (info->mode) {
+       case NXP_NCI_MODE_NCI:
+               r = nxp_nci_i2c_nci_read(phy, &skb);
+               break;
+       case NXP_NCI_MODE_FW:
+               r = nxp_nci_i2c_fw_read(phy, &skb);
+               break;
+       case NXP_NCI_MODE_COLD:
+               r = -EREMOTEIO;
+               break;
+       }
+
+       if (r == -EREMOTEIO) {
+               phy->hard_fault = r;
+               skb = NULL;
+       } else if (r < 0) {
+               nfc_err(&client->dev, "Read failed with error %d\n", r);
+               goto exit_irq_handled;
+       }
+
+       switch (info->mode) {
+       case NXP_NCI_MODE_NCI:
+               nci_recv_frame(phy->ndev, skb);
+               break;
+       case NXP_NCI_MODE_FW:
+               nxp_nci_fw_recv_frame(phy->ndev, skb);
+               break;
+       case NXP_NCI_MODE_COLD:
+               break;
+       }
+
+exit_irq_handled:
+       mutex_unlock(&info->info_lock);
+       return IRQ_HANDLED;
+exit_irq_none:
+       WARN_ON_ONCE(1);
+       return IRQ_NONE;
+}
+
+#ifdef CONFIG_OF
+
+static int nxp_nci_i2c_parse_devtree(struct i2c_client *client)
+{
+       struct nxp_nci_i2c_phy *phy = i2c_get_clientdata(client);
+       struct device_node *pp;
+       int r;
+
+       pp = client->dev.of_node;
+       if (!pp)
+               return -ENODEV;
+
+       r = of_get_named_gpio(pp, "enable-gpios", 0);
+       if (r == -EPROBE_DEFER)
+               r = of_get_named_gpio(pp, "enable-gpios", 0);
+       if (r < 0) {
+               nfc_err(&client->dev, "Failed to get EN gpio, error: %d\n", r);
+               return r;
+       }
+       phy->gpio_en = r;
+
+       r = of_get_named_gpio(pp, "firmware-gpios", 0);
+       if (r == -EPROBE_DEFER)
+               r = of_get_named_gpio(pp, "firmware-gpios", 0);
+       if (r < 0) {
+               nfc_err(&client->dev, "Failed to get FW gpio, error: %d\n", r);
+               return r;
+       }
+       phy->gpio_fw = r;
+
+       r = irq_of_parse_and_map(pp, 0);
+       if (r < 0) {
+               nfc_err(&client->dev, "Unable to get irq, error: %d\n", r);
+               return r;
+       }
+       client->irq = r;
+
+       return 0;
+}
+
+#else
+
+static int nxp_nci_i2c_parse_devtree(struct i2c_client *client)
+{
+       return -ENODEV;
+}
+
+#endif
+
+static int nxp_nci_i2c_probe(struct i2c_client *client,
+                           const struct i2c_device_id *id)
+{
+       struct nxp_nci_i2c_phy *phy;
+       struct nxp_nci_nfc_platform_data *pdata;
+       int r;
+
+       if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+               nfc_err(&client->dev, "Need I2C_FUNC_I2C\n");
+               r = -ENODEV;
+               goto probe_exit;
+       }
+
+       phy = devm_kzalloc(&client->dev, sizeof(struct nxp_nci_i2c_phy),
+                          GFP_KERNEL);
+       if (!phy) {
+               r = -ENOMEM;
+               goto probe_exit;
+       }
+
+       phy->i2c_dev = client;
+       i2c_set_clientdata(client, phy);
+
+       pdata = client->dev.platform_data;
+
+       if (!pdata && client->dev.of_node) {
+               r = nxp_nci_i2c_parse_devtree(client);
+               if (r < 0) {
+                       nfc_err(&client->dev, "Failed to get DT data\n");
+                       goto probe_exit;
+               }
+       } else if (pdata) {
+               phy->gpio_en = pdata->gpio_en;
+               phy->gpio_fw = pdata->gpio_fw;
+               client->irq = pdata->irq;
+       } else {
+               nfc_err(&client->dev, "No platform data\n");
+               r = -EINVAL;
+               goto probe_exit;
+       }
+
+       r = devm_gpio_request_one(&phy->i2c_dev->dev, phy->gpio_en,
+                                 GPIOF_OUT_INIT_LOW, "nxp_nci_en");
+       if (r < 0)
+               goto probe_exit;
+
+       r = devm_gpio_request_one(&phy->i2c_dev->dev, phy->gpio_fw,
+                                 GPIOF_OUT_INIT_LOW, "nxp_nci_fw");
+       if (r < 0)
+               goto probe_exit;
+
+       r = nxp_nci_probe(phy, &client->dev, &i2c_phy_ops,
+                         NXP_NCI_I2C_MAX_PAYLOAD, &phy->ndev);
+       if (r < 0)
+               goto probe_exit;
+
+       r = request_threaded_irq(client->irq, NULL,
+                                nxp_nci_i2c_irq_thread_fn,
+                                IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+                                NXP_NCI_I2C_DRIVER_NAME, phy);
+       if (r < 0)
+               nfc_err(&client->dev, "Unable to register IRQ handler\n");
+
+probe_exit:
+       return r;
+}
+
+static int nxp_nci_i2c_remove(struct i2c_client *client)
+{
+       struct nxp_nci_i2c_phy *phy = i2c_get_clientdata(client);
+
+       nxp_nci_remove(phy->ndev);
+       free_irq(client->irq, phy);
+
+       return 0;
+}
+
+static struct i2c_device_id nxp_nci_i2c_id_table[] = {
+       {"nxp-nci_i2c", 0},
+       {}
+};
+MODULE_DEVICE_TABLE(i2c, nxp_nci_i2c_id_table);
+
+static const struct of_device_id of_nxp_nci_i2c_match[] = {
+       { .compatible = "nxp,nxp-nci-i2c", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, of_nxp_nci_i2c_match);
+
+static struct i2c_driver nxp_nci_i2c_driver = {
+       .driver = {
+                  .name = NXP_NCI_I2C_DRIVER_NAME,
+                  .owner  = THIS_MODULE,
+                  .of_match_table = of_match_ptr(of_nxp_nci_i2c_match),
+                 },
+       .probe = nxp_nci_i2c_probe,
+       .id_table = nxp_nci_i2c_id_table,
+       .remove = nxp_nci_i2c_remove,
+};
+
+module_i2c_driver(nxp_nci_i2c_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("I2C driver for NXP NCI NFC controllers");
+MODULE_AUTHOR("Clément Perrochaud <clement.perrochaud@nxp.com>");
diff --git a/drivers/nfc/nxp-nci/nxp-nci.h b/drivers/nfc/nxp-nci/nxp-nci.h
new file mode 100644 (file)
index 0000000..f1fecc4
--- /dev/null
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2014  NXP Semiconductors  All rights reserved.
+ *
+ * Authors: Clément Perrochaud <clement.perrochaud@nxp.com>
+ *
+ * Derived from PN544 device driver:
+ * Copyright (C) 2012  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __LOCAL_NXP_NCI_H_
+#define __LOCAL_NXP_NCI_H_
+
+#include <linux/completion.h>
+#include <linux/firmware.h>
+#include <linux/nfc.h>
+#include <linux/platform_data/nxp-nci.h>
+
+#include <net/nfc/nci_core.h>
+
+#define NXP_NCI_FW_HDR_LEN     2
+#define NXP_NCI_FW_CRC_LEN     2
+
+#define NXP_NCI_FW_FRAME_LEN_MASK      0x03FF
+
+enum nxp_nci_mode {
+       NXP_NCI_MODE_COLD,
+       NXP_NCI_MODE_NCI,
+       NXP_NCI_MODE_FW
+};
+
+struct nxp_nci_phy_ops {
+       int (*set_mode)(void *id, enum nxp_nci_mode mode);
+       int (*write)(void *id, struct sk_buff *skb);
+};
+
+struct nxp_nci_fw_info {
+       char name[NFC_FIRMWARE_NAME_MAXSIZE + 1];
+       const struct firmware *fw;
+
+       size_t size;
+       size_t written;
+
+       const u8 *data;
+       size_t frame_size;
+
+       struct work_struct work;
+       struct completion cmd_completion;
+
+       int cmd_result;
+};
+
+struct nxp_nci_info {
+       struct nci_dev *ndev;
+       void *phy_id;
+       struct device *pdev;
+
+       enum nxp_nci_mode mode;
+
+       struct nxp_nci_phy_ops *phy_ops;
+       unsigned int max_payload;
+
+       struct mutex info_lock;
+
+       struct nxp_nci_fw_info fw_info;
+};
+
+int nxp_nci_fw_download(struct nci_dev *ndev, const char *firmware_name);
+void nxp_nci_fw_work(struct work_struct *work);
+void nxp_nci_fw_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
+void nxp_nci_fw_work_complete(struct nxp_nci_info *info, int result);
+
+int nxp_nci_probe(void *phy_id, struct device *pdev,
+                 struct nxp_nci_phy_ops *phy_ops, unsigned int max_payload,
+                 struct nci_dev **ndev);
+void nxp_nci_remove(struct nci_dev *ndev);
+
+#endif /* __LOCAL_NXP_NCI_H_ */
index d46a700a9637a9152aee5bc5c41b58eacfbe8ec5..a03e4eb5fe2978c24d10899d249ddc2373968db6 100644 (file)
@@ -1820,7 +1820,7 @@ static int pn533_rf_complete(struct pn533 *dev, void *arg,
        if (IS_ERR(resp)) {
                rc = PTR_ERR(resp);
 
-               nfc_err(&dev->interface->dev, "RF setting error %d", rc);
+               nfc_err(&dev->interface->dev, "RF setting error %d\n", rc);
 
                return rc;
        }
@@ -2554,8 +2554,10 @@ static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg,
        }
 
        skb = pn533_build_response(dev);
-       if (!skb)
+       if (!skb) {
+               rc = -ENOMEM;
                goto error;
+       }
 
        arg->cb(arg->cb_context, skb, 0);
        kfree(arg);
index cdde745b96bd142e07a90b441447ed57bd94b087..6fd986f5ac3ed522e46a1c226ba418f4576b1711 100644 (file)
@@ -953,7 +953,7 @@ static int pn544_hci_i2c_acpi_request_resources(struct i2c_client *client)
        }
 
        nfc_info(dev, "GPIO resource, no:%d irq:%d\n",
-                       desc_to_gpio(gpiod_irq), ret);
+                desc_to_gpio(gpiod_irq), ret);
        client->irq = ret;
 
        return 0;
@@ -1062,11 +1062,8 @@ static int pn544_hci_i2c_probe(struct i2c_client *client,
 
        phy = devm_kzalloc(&client->dev, sizeof(struct pn544_i2c_phy),
                           GFP_KERNEL);
-       if (!phy) {
-               nfc_err(&client->dev,
-                       "Cannot allocate memory for pn544 i2c phy.\n");
+       if (!phy)
                return -ENOMEM;
-       }
 
        INIT_WORK(&phy->fw_work, pn544_hci_i2c_fw_work);
        phy->fw_work_state = FW_WORK_STATE_IDLE;
index 4ac4d31f6c598309a9c9e35f871cd0316419ba5f..87d509996704039c3ac224dcb95bb4b096570542 100644 (file)
@@ -604,11 +604,11 @@ static void port100_recv_response(struct urb *urb)
        case -ECONNRESET:
        case -ENOENT:
                nfc_err(&dev->interface->dev,
-                       "The urb has been canceled (status %d)", urb->status);
+                       "The urb has been canceled (status %d)\n", urb->status);
                goto sched_wq;
        case -ESHUTDOWN:
        default:
-               nfc_err(&dev->interface->dev, "Urb failure (status %d)",
+               nfc_err(&dev->interface->dev, "Urb failure (status %d)\n",
                        urb->status);
                goto sched_wq;
        }
@@ -616,7 +616,7 @@ static void port100_recv_response(struct urb *urb)
        in_frame = dev->in_urb->transfer_buffer;
 
        if (!port100_rx_frame_is_valid(in_frame)) {
-               nfc_err(&dev->interface->dev, "Received an invalid frame");
+               nfc_err(&dev->interface->dev, "Received an invalid frame\n");
                cmd->status = -EIO;
                goto sched_wq;
        }
@@ -626,7 +626,7 @@ static void port100_recv_response(struct urb *urb)
 
        if (!port100_rx_frame_is_cmd_response(dev, in_frame)) {
                nfc_err(&dev->interface->dev,
-                       "It's not the response to the last command");
+                       "It's not the response to the last command\n");
                cmd->status = -EIO;
                goto sched_wq;
        }
@@ -657,11 +657,11 @@ static void port100_recv_ack(struct urb *urb)
        case -ECONNRESET:
        case -ENOENT:
                nfc_err(&dev->interface->dev,
-                       "The urb has been stopped (status %d)", urb->status);
+                       "The urb has been stopped (status %d)\n", urb->status);
                goto sched_wq;
        case -ESHUTDOWN:
        default:
-               nfc_err(&dev->interface->dev, "Urb failure (status %d)",
+               nfc_err(&dev->interface->dev, "Urb failure (status %d)\n",
                        urb->status);
                goto sched_wq;
        }
@@ -669,7 +669,7 @@ static void port100_recv_ack(struct urb *urb)
        in_frame = dev->in_urb->transfer_buffer;
 
        if (!port100_rx_frame_is_ack(in_frame)) {
-               nfc_err(&dev->interface->dev, "Received an invalid ack");
+               nfc_err(&dev->interface->dev, "Received an invalid ack\n");
                cmd->status = -EIO;
                goto sched_wq;
        }
@@ -677,7 +677,7 @@ static void port100_recv_ack(struct urb *urb)
        rc = port100_submit_urb_for_response(dev, GFP_ATOMIC);
        if (rc) {
                nfc_err(&dev->interface->dev,
-                       "usb_submit_urb failed with result %d", rc);
+                       "usb_submit_urb failed with result %d\n", rc);
                cmd->status = rc;
                goto sched_wq;
        }
@@ -873,11 +873,11 @@ static void port100_send_complete(struct urb *urb)
        case -ECONNRESET:
        case -ENOENT:
                nfc_err(&dev->interface->dev,
-                       "The urb has been stopped (status %d)", urb->status);
+                       "The urb has been stopped (status %d)\n", urb->status);
                break;
        case -ESHUTDOWN:
        default:
-               nfc_err(&dev->interface->dev, "Urb failure (status %d)",
+               nfc_err(&dev->interface->dev, "Urb failure (status %d)\n",
                        urb->status);
        }
 }
@@ -1094,7 +1094,7 @@ static void port100_in_comm_rf_complete(struct port100 *dev, void *arg,
 
        if (resp->len < 4) {
                nfc_err(&dev->interface->dev,
-                       "Invalid packet length received.\n");
+                       "Invalid packet length received\n");
                rc = -EIO;
                goto error;
        }
@@ -1250,7 +1250,7 @@ static bool port100_tg_target_activated(struct port100 *dev, u8 tgt_activated)
                       PORT100_MDAA_TGT_WAS_ACTIVATED_MASK;
                break;
        default:
-               nfc_err(&dev->interface->dev, "Unknonwn command type.\n");
+               nfc_err(&dev->interface->dev, "Unknown command type\n");
                return false;
        }
 
@@ -1481,7 +1481,7 @@ static int port100_probe(struct usb_interface *interface,
        cmd_type_mask = port100_get_command_type_mask(dev);
        if (!cmd_type_mask) {
                nfc_err(&interface->dev,
-                       "Could not get supported command types.\n");
+                       "Could not get supported command types\n");
                rc = -ENODEV;
                goto error;
        }
@@ -1494,7 +1494,7 @@ static int port100_probe(struct usb_interface *interface,
        rc = port100_set_command_type(dev, dev->cmd_type);
        if (rc) {
                nfc_err(&interface->dev,
-                       "The device does not support command type %u.\n",
+                       "The device does not support command type %u\n",
                        dev->cmd_type);
                goto error;
        }
@@ -1502,7 +1502,7 @@ static int port100_probe(struct usb_interface *interface,
        fw_version = port100_get_firmware_version(dev);
        if (!fw_version)
                nfc_err(&interface->dev,
-                       "Could not get device firmware version.\n");
+                       "Could not get device firmware version\n");
 
        nfc_info(&interface->dev,
                 "Sony NFC Port-100 Series attached (firmware v%x.%02x)\n",
@@ -1515,7 +1515,7 @@ static int port100_probe(struct usb_interface *interface,
                                                           dev->skb_tailroom);
        if (!dev->nfc_digital_dev) {
                nfc_err(&interface->dev,
-                       "Could not allocate nfc_digital_dev.\n");
+                       "Could not allocate nfc_digital_dev\n");
                rc = -ENOMEM;
                goto error;
        }
@@ -1526,7 +1526,7 @@ static int port100_probe(struct usb_interface *interface,
        rc = nfc_digital_register_device(dev->nfc_digital_dev);
        if (rc) {
                nfc_err(&interface->dev,
-                       "Could not register digital device.\n");
+                       "Could not register digital device\n");
                goto free_nfc_dev;
        }
 
@@ -1562,7 +1562,7 @@ static void port100_disconnect(struct usb_interface *interface)
 
        kfree(dev->cmd);
 
-       nfc_info(&interface->dev, "Sony Port-100 NFC device disconnected");
+       nfc_info(&interface->dev, "Sony Port-100 NFC device disconnected\n");
 }
 
 static struct usb_driver port100_driver = {
index 24d3d240d5f42c83484925ed110f3a9dbf74ffcf..d251f7229c4e8fe5aa9da16baf09e75ed0a5b412 100644 (file)
@@ -572,7 +572,7 @@ exit:
        return r;
 }
 
-static int st21nfca_get_iso14443_3_uid(struct nfc_hci_dev *hdev, u8 *gate,
+static int st21nfca_get_iso14443_3_uid(struct nfc_hci_dev *hdev, u8 *uid,
                                       int *len)
 {
        int r;
@@ -588,7 +588,7 @@ static int st21nfca_get_iso14443_3_uid(struct nfc_hci_dev *hdev, u8 *gate,
                goto exit;
        }
 
-       gate = uid_skb->data;
+       memcpy(uid, uid_skb->data, uid_skb->len);
        *len = uid_skb->len;
 exit:
        kfree_skb(uid_skb);
index bd13cac9c66ac711cda7c1c3d0b8e5e3cb0b2ccd..3197e9bb66f73c53d940a3292b2140a2e7e72171 100644 (file)
@@ -310,6 +310,13 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
        case ST21NFCA_EVT_CONNECTIVITY:
                break;
        case ST21NFCA_EVT_TRANSACTION:
+               /*
+                * According to specification etsi 102 622
+                * 11.2.2.4 EVT_TRANSACTION Table 52
+                * Description  Tag     Length
+                * AID          81      5 to 16
+                * PARAMETERS   82      0 to 255
+                */
                if (skb->len < NFC_MIN_AID_LENGTH + 2 &&
                    skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
                        return -EPROTO;
@@ -318,8 +325,10 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
                                                   skb->len - 2, GFP_KERNEL);
 
                transaction->aid_len = skb->data[1];
-               memcpy(transaction->aid, &skb->data[2], skb->data[1]);
+               memcpy(transaction->aid, &skb->data[2],
+                      transaction->aid_len);
 
+               /* Check next byte is PARAMETERS tag (82) */
                if (skb->data[transaction->aid_len + 2] !=
                    NFC_EVT_TRANSACTION_PARAMS_TAG)
                        return -EPROTO;
index eb886932d97278cfa044fb66a20dea49ffcaca6b..76a4cad41cec9b3affa4f903683a313d4620b389 100644 (file)
@@ -109,7 +109,7 @@ static int st21nfcb_nci_i2c_write(void *phy_id, struct sk_buff *skb)
                return phy->ndlc->hard_fault;
 
        r = i2c_master_send(client, skb->data, skb->len);
-       if (r == -EREMOTEIO) {  /* Retry, chip was in standby */
+       if (r < 0) {  /* Retry, chip was in standby */
                usleep_range(1000, 4000);
                r = i2c_master_send(client, skb->data, skb->len);
        }
@@ -148,7 +148,7 @@ static int st21nfcb_nci_i2c_read(struct st21nfcb_i2c_phy *phy,
        struct i2c_client *client = phy->i2c_dev;
 
        r = i2c_master_recv(client, buf, ST21NFCB_NCI_I2C_MIN_SIZE);
-       if (r == -EREMOTEIO) {  /* Retry, chip was in standby */
+       if (r < 0) {  /* Retry, chip was in standby */
                usleep_range(1000, 4000);
                r = i2c_master_recv(client, buf, ST21NFCB_NCI_I2C_MIN_SIZE);
        }
@@ -313,11 +313,8 @@ static int st21nfcb_nci_i2c_probe(struct i2c_client *client,
 
        phy = devm_kzalloc(&client->dev, sizeof(struct st21nfcb_i2c_phy),
                           GFP_KERNEL);
-       if (!phy) {
-               nfc_err(&client->dev,
-                       "Cannot allocate memory for st21nfcb i2c phy.\n");
+       if (!phy)
                return -ENOMEM;
-       }
 
        phy->i2c_dev = client;
 
index 5fbf59d2138c1b0a0c723550905d37931a89bcbe..6014b585946595dc9afa1f51b5f24553d2a52e16 100644 (file)
@@ -256,10 +256,9 @@ int ndlc_probe(void *phy_id, struct nfc_phy_ops *phy_ops, struct device *dev,
        struct llt_ndlc *ndlc;
 
        ndlc = devm_kzalloc(dev, sizeof(struct llt_ndlc), GFP_KERNEL);
-       if (!ndlc) {
-               nfc_err(dev, "Cannot allocate memory for ndlc.\n");
+       if (!ndlc)
                return -ENOMEM;
-       }
+
        ndlc->ops = phy_ops;
        ndlc->phy_id = phy_id;
        ndlc->dev = dev;
index 7c82e9d87a65d718a92b7066e6b06c3b233e5c60..24862a525fb5044bbb3d18c8ea593e82b6f2c9bb 100644 (file)
@@ -321,6 +321,12 @@ static int st21nfcb_hci_connectivity_event_received(struct nci_dev *ndev,
 
        break;
        case ST21NFCB_EVT_TRANSACTION:
+               /* According to specification etsi 102 622
+                * 11.2.2.4 EVT_TRANSACTION Table 52
+                * Description  Tag     Length
+                * AID          81      5 to 16
+                * PARAMETERS   82      0 to 255
+                */
                if (skb->len < NFC_MIN_AID_LENGTH + 2 &&
                    skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
                        return -EPROTO;
@@ -329,8 +335,9 @@ static int st21nfcb_hci_connectivity_event_received(struct nci_dev *ndev,
                                            skb->len - 2, GFP_KERNEL);
 
                transaction->aid_len = skb->data[1];
-               memcpy(transaction->aid, &skb->data[2], skb->data[1]);
+               memcpy(transaction->aid, &skb->data[2], transaction->aid_len);
 
+               /* Check next byte is PARAMETERS tag (82) */
                if (skb->data[transaction->aid_len + 2] !=
                    NFC_EVT_TRANSACTION_PARAMS_TAG)
                        return -EPROTO;
@@ -340,6 +347,7 @@ static int st21nfcb_hci_connectivity_event_received(struct nci_dev *ndev,
                       transaction->aid_len + 4, transaction->params_len);
 
                r = nfc_se_transaction(ndev->nfc_dev, host, transaction);
+               break;
        default:
                return 1;
        }
@@ -542,14 +550,12 @@ static int st21nfcb_hci_network_init(struct nci_dev *ndev)
 
        r = nci_hci_dev_session_init(ndev);
        if (r != NCI_HCI_ANY_OK)
-               goto exit;
+               goto free_dest_params;
 
        r = nci_nfcee_mode_set(ndev, ndev->hci_dev->conn_info->id,
                               NCI_NFCEE_ENABLE);
        if (r != NCI_STATUS_OK)
-               goto exit;
-
-       return 0;
+               goto free_dest_params;
 
 free_dest_params:
        kfree(dest_params);
index ad2906919d4589f4edbc0cd599a2fe7aa2938922..78a7dcbec7d8990ac37adad938a0aff3420423e2 100644 (file)
@@ -450,12 +450,17 @@ static struct of_bus *of_match_bus(struct device_node *np)
        return NULL;
 }
 
-static int of_empty_ranges_quirk(void)
+static int of_empty_ranges_quirk(struct device_node *np)
 {
        if (IS_ENABLED(CONFIG_PPC)) {
-               /* To save cycles, we cache the result */
+               /* To save cycles, we cache the result for global "Mac" setting */
                static int quirk_state = -1;
 
+               /* PA-SEMI sdc DT bug */
+               if (of_device_is_compatible(np, "1682m-sdc"))
+                       return true;
+
+               /* Make quirk cached */
                if (quirk_state < 0)
                        quirk_state =
                                of_machine_is_compatible("Power Macintosh") ||
@@ -490,7 +495,7 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
         * This code is only enabled on powerpc. --gcl
         */
        ranges = of_get_property(parent, rprop, &rlen);
-       if (ranges == NULL && !of_empty_ranges_quirk()) {
+       if (ranges == NULL && !of_empty_ranges_quirk(parent)) {
                pr_debug("OF: no ranges; cannot translate\n");
                return 1;
        }
index 9205f433573cc124bd84701f6ee6324969b093e9..18198316b6cf15c406c6f9eb243bc39601b97979 100644 (file)
@@ -1572,6 +1572,10 @@ static int palmas_regulators_probe(struct platform_device *pdev)
        if (!pmic)
                return -ENOMEM;
 
+       if (of_device_is_compatible(node, "ti,tps659038-pmic"))
+               palmas_generic_regs_info[PALMAS_REG_REGEN2].ctrl_addr =
+                                                       TPS659038_REGEN2_CTRL;
+
        pmic->dev = &pdev->dev;
        pmic->palmas = palmas;
        palmas->pmic = pmic;
index e2436d140175a109907e55eacafeb1906105748d..3a6fd3a8a2ec63d389a2f4dbf948f5c2090ecc3a 100644 (file)
@@ -413,8 +413,8 @@ static void rtc_mrst_do_remove(struct device *dev)
        mrst->dev = NULL;
 }
 
-#ifdef CONFIG_PM
-static int mrst_suspend(struct device *dev, pm_message_t mesg)
+#ifdef CONFIG_PM_SLEEP
+static int mrst_suspend(struct device *dev)
 {
        struct mrst_rtc *mrst = dev_get_drvdata(dev);
        unsigned char   tmp;
@@ -453,7 +453,7 @@ static int mrst_suspend(struct device *dev, pm_message_t mesg)
  */
 static inline int mrst_poweroff(struct device *dev)
 {
-       return mrst_suspend(dev, PMSG_HIBERNATE);
+       return mrst_suspend(dev);
 }
 
 static int mrst_resume(struct device *dev)
@@ -490,9 +490,11 @@ static int mrst_resume(struct device *dev)
        return 0;
 }
 
+static SIMPLE_DEV_PM_OPS(mrst_pm_ops, mrst_suspend, mrst_resume);
+#define MRST_PM_OPS (&mrst_pm_ops)
+
 #else
-#define        mrst_suspend    NULL
-#define        mrst_resume     NULL
+#define MRST_PM_OPS NULL
 
 static inline int mrst_poweroff(struct device *dev)
 {
@@ -529,9 +531,8 @@ static struct platform_driver vrtc_mrst_platform_driver = {
        .remove         = vrtc_mrst_platform_remove,
        .shutdown       = vrtc_mrst_platform_shutdown,
        .driver = {
-               .name           = (char *) driver_name,
-               .suspend        = mrst_suspend,
-               .resume         = mrst_resume,
+               .name   = driver_name,
+               .pm     = MRST_PM_OPS,
        }
 };
 
index 9219953ee949a9dfaf1ff0f044e41a3e5c13adc7..d9afc51af7d3dbc7df2e38865596ba3473a1f708 100644 (file)
@@ -6815,7 +6815,8 @@ static struct ata_port_operations ipr_sata_ops = {
 };
 
 static struct ata_port_info sata_port_info = {
-       .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
+       .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
+                         ATA_FLAG_SAS_HOST,
        .pio_mask       = ATA_PIO4_ONLY,
        .mwdma_mask     = ATA_MWDMA2,
        .udma_mask      = ATA_UDMA6,
index 932d9cc98d2fc807c4ef0e7715fe922803bbc29c..9c706d8c144174dae8cf87c1aa62a95a98f961e4 100644 (file)
@@ -547,7 +547,8 @@ static struct ata_port_operations sas_sata_ops = {
 };
 
 static struct ata_port_info sata_port_info = {
-       .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ,
+       .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ |
+                ATA_FLAG_SAS_HOST,
        .pio_mask = ATA_PIO4,
        .mwdma_mask = ATA_MWDMA2,
        .udma_mask = ATA_UDMA6,
index 3ce39d10fafbc270d47a17107b7cc5ce257dca94..4f8c798e0633a81483c3b05af46805b1848ba386 100644 (file)
@@ -108,7 +108,8 @@ static void dw_spi_dma_tx_done(void *arg)
 {
        struct dw_spi *dws = arg;
 
-       if (test_and_clear_bit(TX_BUSY, &dws->dma_chan_busy) & BIT(RX_BUSY))
+       clear_bit(TX_BUSY, &dws->dma_chan_busy);
+       if (test_bit(RX_BUSY, &dws->dma_chan_busy))
                return;
        dw_spi_xfer_done(dws);
 }
@@ -156,7 +157,8 @@ static void dw_spi_dma_rx_done(void *arg)
 {
        struct dw_spi *dws = arg;
 
-       if (test_and_clear_bit(RX_BUSY, &dws->dma_chan_busy) & BIT(TX_BUSY))
+       clear_bit(RX_BUSY, &dws->dma_chan_busy);
+       if (test_bit(TX_BUSY, &dws->dma_chan_busy))
                return;
        dw_spi_xfer_done(dws);
 }
index ff9cdbdb6672371df54b6c59ce54579a46758602..2b2c359f5a501da32a38af38cfe0c0956b0e23c9 100644 (file)
@@ -498,7 +498,7 @@ static int spi_qup_probe(struct platform_device *pdev)
        struct resource *res;
        struct device *dev;
        void __iomem *base;
-       u32 max_freq, iomode;
+       u32 max_freq, iomode, num_cs;
        int ret, irq, size;
 
        dev = &pdev->dev;
@@ -550,10 +550,11 @@ static int spi_qup_probe(struct platform_device *pdev)
        }
 
        /* use num-cs unless not present or out of range */
-       if (of_property_read_u16(dev->of_node, "num-cs",
-                       &master->num_chipselect) ||
-                       (master->num_chipselect > SPI_NUM_CHIPSELECTS))
+       if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
+           num_cs > SPI_NUM_CHIPSELECTS)
                master->num_chipselect = SPI_NUM_CHIPSELECTS;
+       else
+               master->num_chipselect = num_cs;
 
        master->bus_num = pdev->id;
        master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
index c64a3e59fce30a7f9658afcca246a6d6872627da..57a195041dc72e019a02b3c7c4b6f4c2a6fac59b 100644 (file)
@@ -1105,13 +1105,14 @@ void spi_finalize_current_message(struct spi_master *master)
                                "failed to unprepare message: %d\n", ret);
                }
        }
+
+       trace_spi_message_done(mesg);
+
        master->cur_msg_prepared = false;
 
        mesg->state = NULL;
        if (mesg->complete)
                mesg->complete(mesg->context);
-
-       trace_spi_message_done(mesg);
 }
 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
 
index d75b72ba2672b74ccfc50f8f8a5c67dc17db0ea9..15a7ee3859dd7dd74aee31593876b422093eff04 100644 (file)
@@ -357,6 +357,15 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
        pcicore_write32(pc, SSB_PCICORE_SBTOPCI2,
                        SSB_PCICORE_SBTOPCI_MEM | SSB_PCI_DMA);
 
+       /*
+        * Accessing PCI config without a proper delay after devices reset (not
+        * GPIO reset) was causing reboots on WRT300N v1.0.
+        * Tested delay 850 us lowered reboot chance to 50-80%, 1000 us fixed it
+        * completely. Flushing all writes was also tested but with no luck.
+        */
+       if (pc->dev->bus->chip_id == 0x4704)
+               usleep_range(1000, 2000);
+
        /* Enable PCI bridge BAR0 prefetch and burst */
        val = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
        ssb_extpci_write_config(pc, 0, 0, 0, PCI_COMMAND, &val, 2);
index 24183028bd712b11af46cd4531f60b33b4e57338..6d5b38d6957852ee81caa09cb6e3c047c55d26b8 100644 (file)
@@ -38,6 +38,7 @@ config IIO_SIMPLE_DUMMY_EVENTS
 config IIO_SIMPLE_DUMMY_BUFFER
        bool "Buffered capture support"
        select IIO_BUFFER
+       select IIO_TRIGGER
        select IIO_KFIFO_BUF
        help
          Add buffered data capture to the simple dummy driver.
index fd171d8b38fbcc444f3e7118bb66b2d51bdf698c..90cc18b703cf67ae6c089c36d51d8ebde6f28106 100644 (file)
@@ -592,6 +592,7 @@ int hmc5843_common_probe(struct device *dev, struct regmap *regmap,
        mutex_init(&data->lock);
 
        indio_dev->dev.parent = dev;
+       indio_dev->name = dev->driver->name;
        indio_dev->info = &hmc5843_info;
        indio_dev->modes = INDIO_DIRECT_MODE;
        indio_dev->channels = data->variant->channels;
index b1893f3f88f1c6b6fdb2e8e42ad565ba23e18fd1..3ad1458bfeb0fc32afe790b2aa0f3e36961b7b45 100644 (file)
@@ -921,6 +921,9 @@ static void lpuart_setup_watermark(struct lpuart_port *sport)
        writeb(val | UARTPFIFO_TXFE | UARTPFIFO_RXFE,
                        sport->port.membase + UARTPFIFO);
 
+       /* explicitly clear RDRF */
+       readb(sport->port.membase + UARTSR1);
+
        /* flush Tx and Rx FIFO */
        writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH,
                        sport->port.membase + UARTCFIFO);
@@ -1076,6 +1079,8 @@ static int lpuart_startup(struct uart_port *port)
        sport->txfifo_size = 0x1 << (((temp >> UARTPFIFO_TXSIZE_OFF) &
                UARTPFIFO_FIFOSIZE_MASK) + 1);
 
+       sport->port.fifosize = sport->txfifo_size;
+
        sport->rxfifo_size = 0x1 << (((temp >> UARTPFIFO_RXSIZE_OFF) &
                UARTPFIFO_FIFOSIZE_MASK) + 1);
 
index af821a9087204ec654f637a617513ba06d68a0ef..cf08876922f1446e55a2d8ca79bf87e0d4e24fed 100644 (file)
@@ -963,6 +963,7 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
                        free_irq(ourport->tx_irq, ourport);
                tx_enabled(port) = 0;
                ourport->tx_claimed = 0;
+               ourport->tx_mode = 0;
        }
 
        if (ourport->rx_claimed) {
index a7865c4b04980898b49317386ad6138aab051bc5..0827d7c965276382418f0a602ec5c1412c023142 100644 (file)
@@ -387,6 +387,10 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
                status = PORT_PLC;
                port_change_bit = "link state";
                break;
+       case USB_PORT_FEAT_C_PORT_CONFIG_ERROR:
+               status = PORT_CEC;
+               port_change_bit = "config error";
+               break;
        default:
                /* Should never happen */
                return;
@@ -588,6 +592,8 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
                        status |= USB_PORT_STAT_C_LINK_STATE << 16;
                if ((raw_port_status & PORT_WRC))
                        status |= USB_PORT_STAT_C_BH_RESET << 16;
+               if ((raw_port_status & PORT_CEC))
+                       status |= USB_PORT_STAT_C_CONFIG_ERROR << 16;
        }
 
        if (hcd->speed != HCD_USB3) {
@@ -1005,6 +1011,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                case USB_PORT_FEAT_C_OVER_CURRENT:
                case USB_PORT_FEAT_C_ENABLE:
                case USB_PORT_FEAT_C_PORT_LINK_STATE:
+               case USB_PORT_FEAT_C_PORT_CONFIG_ERROR:
                        xhci_clear_port_change_bit(xhci, wValue, wIndex,
                                        port_array[wIndex], temp);
                        break;
@@ -1069,7 +1076,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
         */
        status = bus_state->resuming_ports;
 
-       mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC;
+       mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC;
 
        spin_lock_irqsave(&xhci->lock, flags);
        /* For each port, did anything change?  If so, set that bit in buf. */
index fd53c9ebd662a5fb4593c99ce5dd7c2552ad83c5..2af32e26fafc3727279fe656fbbcaf158736371d 100644 (file)
@@ -115,6 +115,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
        if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
                xhci->quirks |= XHCI_LPM_SUPPORT;
                xhci->quirks |= XHCI_INTEL_HOST;
+               xhci->quirks |= XHCI_AVOID_BEI;
        }
        if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
                        pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
@@ -130,7 +131,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                 * PPT chipsets.
                 */
                xhci->quirks |= XHCI_SPURIOUS_REBOOT;
-               xhci->quirks |= XHCI_AVOID_BEI;
        }
        if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
                pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
index f32c292cc8689d81bfce947f7c587603e95c09be..3fc4fe7702533b785bc22ead9fe17939e1f365f8 100644 (file)
@@ -1203,7 +1203,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget,
 
        if (udc->driver) {
                dev_err(udc->isp->dev, "UDC already has a gadget driver\n");
-               spin_unlock(&udc->lock);
+               spin_unlock_irqrestore(&udc->lock, flags);
                return -EBUSY;
        }
 
index 3086dec0ef53bbd5d5d3d21087b91469983492fb..8eb68a31cab6c4021617ca555cd58b086872c112 100644 (file)
@@ -604,6 +604,7 @@ static const struct usb_device_id id_table_combined[] = {
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+       { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
        /*
         * ELV devices:
         */
@@ -1883,8 +1884,12 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
 {
        struct usb_device *udev = serial->dev;
 
-       if ((udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) ||
-           (udev->product && !strcmp(udev->product, "BeagleBone/XDS100V2")))
+       if (udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems"))
+               return ftdi_jtag_probe(serial);
+
+       if (udev->product &&
+               (!strcmp(udev->product, "BeagleBone/XDS100V2") ||
+                !strcmp(udev->product, "SNAP Connect E10")))
                return ftdi_jtag_probe(serial);
 
        return 0;
index 56b1b55c4751696b2e89633ea90bfe1c2940c436..4e4f46f3c89c025670d42860756f39b2bb62ae24 100644 (file)
  */
 #define FTDI_NT_ORIONLXM_PID   0x7c90  /* OrionLXm Substation Automation Platform */
 
+/*
+ * Synapse Wireless product ids (FTDI_VID)
+ * http://www.synapse-wireless.com
+ */
+#define FTDI_SYNAPSE_SS200_PID 0x9090 /* SS200 - SNAP Stick 200 */
+
 
 /********************************/
 /** third-party VID/PID combos **/
index dd97d8b572c336e03c7c4c282fbe7e38c44646a4..4f7e072e4e001e9f7c439114f13b52a77d3250d6 100644 (file)
@@ -61,6 +61,7 @@ struct keyspan_pda_private {
 /* For Xircom PGSDB9 and older Entrega version of the same device */
 #define XIRCOM_VENDOR_ID               0x085a
 #define XIRCOM_FAKE_ID                 0x8027
+#define XIRCOM_FAKE_ID_2               0x8025 /* "PGMFHUB" serial */
 #define ENTREGA_VENDOR_ID              0x1645
 #define ENTREGA_FAKE_ID                        0x8093
 
@@ -70,6 +71,7 @@ static const struct usb_device_id id_table_combined[] = {
 #endif
 #ifdef XIRCOM
        { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) },
+       { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID_2) },
        { USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) },
 #endif
        { USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_ID) },
@@ -93,6 +95,7 @@ static const struct usb_device_id id_table_fake[] = {
 #ifdef XIRCOM
 static const struct usb_device_id id_table_fake_xircom[] = {
        { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) },
+       { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID_2) },
        { USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) },
        { }
 };
index c8def68d9e4cf30c391fabc36981f96b4cb9d72a..0deaa4f971f5ff1fdfde6d063314a45bd99b9707 100644 (file)
 #define PDC_WDT_MIN_TIMEOUT            1
 #define PDC_WDT_DEF_TIMEOUT            64
 
-static int heartbeat;
+static int heartbeat = PDC_WDT_DEF_TIMEOUT;
 module_param(heartbeat, int, 0);
-MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. "
-       "(default = " __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")");
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds "
+       "(default=" __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")");
 
 static bool nowayout = WATCHDOG_NOWAYOUT;
 module_param(nowayout, bool, 0);
@@ -191,6 +191,7 @@ static int pdc_wdt_probe(struct platform_device *pdev)
        pdc_wdt->wdt_dev.ops = &pdc_wdt_ops;
        pdc_wdt->wdt_dev.max_timeout = 1 << PDC_WDT_CONFIG_DELAY_MASK;
        pdc_wdt->wdt_dev.parent = &pdev->dev;
+       watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt);
 
        ret = watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev);
        if (ret < 0) {
@@ -232,7 +233,6 @@ static int pdc_wdt_probe(struct platform_device *pdev)
        watchdog_set_nowayout(&pdc_wdt->wdt_dev, nowayout);
 
        platform_set_drvdata(pdev, pdc_wdt);
-       watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt);
 
        ret = watchdog_register_device(&pdc_wdt->wdt_dev);
        if (ret)
index a87f6df6e85f32993db3907d66f3066c8820a91d..938b987de551bdea7615a701007d0125f9a10d8b 100644 (file)
@@ -133,7 +133,7 @@ static int mtk_wdt_start(struct watchdog_device *wdt_dev)
        u32 reg;
        struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev);
        void __iomem *wdt_base = mtk_wdt->wdt_base;
-       u32 ret;
+       int ret;
 
        ret = mtk_wdt_set_timeout(wdt_dev, wdt_dev->timeout);
        if (ret < 0)
index b812462083fcaf8d32c215327d28b7c4b7e6e4da..94d96809e686b9ffd1f83c76681c8c3ff0a7cf92 100644 (file)
@@ -55,6 +55,23 @@ config XEN_BALLOON_MEMORY_HOTPLUG
 
          In that case step 3 should be omitted.
 
+config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
+       int "Hotplugged memory limit (in GiB) for a PV guest"
+       default 512 if X86_64
+       default 4 if X86_32
+       range 0 64 if X86_32
+       depends on XEN_HAVE_PVMMU
+       depends on XEN_BALLOON_MEMORY_HOTPLUG
+       help
+         Maxmium amount of memory (in GiB) that a PV guest can be
+         expanded to when using memory hotplug.
+
+         A PV guest can have more memory than this limit if is
+         started with a larger maximum.
+
+         This value is used to allocate enough space in internal
+         tables needed for physical memory administration.
+
 config XEN_SCRUB_PAGES
        bool "Scrub pages before returning them to system"
        depends on XEN_BALLOON
index 0b52d92cb2e52d03899dd410d8a58e7640c8d8d5..fd933695f2328f29c2493ee751f22230ec68cbb1 100644 (file)
@@ -229,6 +229,29 @@ static enum bp_state reserve_additional_memory(long credit)
        balloon_hotplug = round_up(balloon_hotplug, PAGES_PER_SECTION);
        nid = memory_add_physaddr_to_nid(hotplug_start_paddr);
 
+#ifdef CONFIG_XEN_HAVE_PVMMU
+        /*
+         * add_memory() will build page tables for the new memory so
+         * the p2m must contain invalid entries so the correct
+         * non-present PTEs will be written.
+         *
+         * If a failure occurs, the original (identity) p2m entries
+         * are not restored since this region is now known not to
+         * conflict with any devices.
+         */ 
+       if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+               unsigned long pfn, i;
+
+               pfn = PFN_DOWN(hotplug_start_paddr);
+               for (i = 0; i < balloon_hotplug; i++) {
+                       if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
+                               pr_warn("set_phys_to_machine() failed, no memory added\n");
+                               return BP_ECANCELED;
+                       }
+                }
+       }
+#endif
+
        rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT);
 
        if (rc) {
index d2468bf95669850520a2a0dc852b1185c5234c7b..a91795e01a7ff0c0e85abf1bdf69f3d1d828b231 100644 (file)
@@ -699,8 +699,10 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
        boff = tmp % bsize;
        if (boff) {
                bh = affs_bread_ino(inode, bidx, 0);
-               if (IS_ERR(bh))
-                       return PTR_ERR(bh);
+               if (IS_ERR(bh)) {
+                       written = PTR_ERR(bh);
+                       goto err_first_bh;
+               }
                tmp = min(bsize - boff, to - from);
                BUG_ON(boff + tmp > bsize || tmp > bsize);
                memcpy(AFFS_DATA(bh) + boff, data + from, tmp);
@@ -712,14 +714,16 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
                bidx++;
        } else if (bidx) {
                bh = affs_bread_ino(inode, bidx - 1, 0);
-               if (IS_ERR(bh))
-                       return PTR_ERR(bh);
+               if (IS_ERR(bh)) {
+                       written = PTR_ERR(bh);
+                       goto err_first_bh;
+               }
        }
        while (from + bsize <= to) {
                prev_bh = bh;
                bh = affs_getemptyblk_ino(inode, bidx);
                if (IS_ERR(bh))
-                       goto out;
+                       goto err_bh;
                memcpy(AFFS_DATA(bh), data + from, bsize);
                if (buffer_new(bh)) {
                        AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
@@ -751,7 +755,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
                prev_bh = bh;
                bh = affs_bread_ino(inode, bidx, 1);
                if (IS_ERR(bh))
-                       goto out;
+                       goto err_bh;
                tmp = min(bsize, to - from);
                BUG_ON(tmp > bsize);
                memcpy(AFFS_DATA(bh), data + from, tmp);
@@ -790,12 +794,13 @@ done:
        if (tmp > inode->i_size)
                inode->i_size = AFFS_I(inode)->mmu_private = tmp;
 
+err_first_bh:
        unlock_page(page);
        page_cache_release(page);
 
        return written;
 
-out:
+err_bh:
        bh = prev_bh;
        if (!written)
                written = PTR_ERR(bh);
index 4ac7445e6ec70516848e942f54a6846a8541113b..aa0dc2573374184597b9e449fad6467f924f0f45 100644 (file)
@@ -1,6 +1,9 @@
 /*
  *   fs/cifs/cifsencrypt.c
  *
+ *   Encryption and hashing operations relating to NTLM, NTLMv2.  See MS-NLMP
+ *   for more detailed information
+ *
  *   Copyright (C) International Business Machines  Corp., 2005,2013
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
@@ -515,7 +518,8 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
                                 __func__);
                        return rc;
                }
-       } else if (ses->serverName) {
+       } else {
+               /* We use ses->serverName if no domain name available */
                len = strlen(ses->serverName);
 
                server = kmalloc(2 + (len * 2), GFP_KERNEL);
index d3aa999ab78520fcd4819f99548247e231df591b..480cf9c81d505b8351dd76eee0f012110c3f2b9c 100644 (file)
@@ -1599,6 +1599,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                                pr_warn("CIFS: username too long\n");
                                goto cifs_parse_mount_err;
                        }
+
+                       kfree(vol->username);
                        vol->username = kstrdup(string, GFP_KERNEL);
                        if (!vol->username)
                                goto cifs_parse_mount_err;
@@ -1700,6 +1702,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                                goto cifs_parse_mount_err;
                        }
 
+                       kfree(vol->domainname);
                        vol->domainname = kstrdup(string, GFP_KERNEL);
                        if (!vol->domainname) {
                                pr_warn("CIFS: no memory for domainname\n");
@@ -1731,6 +1734,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                        }
 
                         if (strncasecmp(string, "default", 7) != 0) {
+                               kfree(vol->iocharset);
                                vol->iocharset = kstrdup(string,
                                                         GFP_KERNEL);
                                if (!vol->iocharset) {
@@ -2913,8 +2917,7 @@ ip_rfc1001_connect(struct TCP_Server_Info *server)
                 * calling name ends in null (byte 16) from old smb
                 * convention.
                 */
-               if (server->workstation_RFC1001_name &&
-                   server->workstation_RFC1001_name[0] != 0)
+               if (server->workstation_RFC1001_name[0] != 0)
                        rfc1002mangle(ses_init_buf->trailer.
                                      session_req.calling_name,
                                      server->workstation_RFC1001_name,
@@ -3692,6 +3695,12 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
 #endif /* CIFS_WEAK_PW_HASH */
                rc = SMBNTencrypt(tcon->password, ses->server->cryptkey,
                                        bcc_ptr, nls_codepage);
+               if (rc) {
+                       cifs_dbg(FYI, "%s Can't generate NTLM rsp. Error: %d\n",
+                                __func__, rc);
+                       cifs_buf_release(smb_buffer);
+                       return rc;
+               }
 
                bcc_ptr += CIFS_AUTH_RESP_SIZE;
                if (ses->capabilities & CAP_UNICODE) {
index a94b3e67318283dd54d61fc595ecb2037ba3a515..ca30c391a894a0e9df8eac6ed1ede194c89f1885 100644 (file)
@@ -1823,6 +1823,7 @@ refind_writable:
                        cifsFileInfo_put(inv_file);
                        spin_lock(&cifs_file_list_lock);
                        ++refind;
+                       inv_file = NULL;
                        goto refind_writable;
                }
        }
index 2d4f37235ed0fa360782ae237c89fccccbf8b719..3e126d7bb2ea5bec97c9d6e02973a49886261580 100644 (file)
@@ -771,6 +771,8 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
                                cifs_buf_release(srchinf->ntwrk_buf_start);
                        }
                        kfree(srchinf);
+                       if (rc)
+                               goto cgii_exit;
        } else
                goto cgii_exit;
 
index 689f035915cf70f075d71fca5e281ec009c5420a..22dfdf17d06547f3d1b3abbc302bb03abf1b047b 100644 (file)
@@ -322,7 +322,7 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr)
 
        /* return pointer to beginning of data area, ie offset from SMB start */
        if ((*off != 0) && (*len != 0))
-               return hdr->ProtocolId + *off;
+               return (char *)(&hdr->ProtocolId[0]) + *off;
        else
                return NULL;
 }
index 96b5d40a2ece611b27ed19668cc4b7b665605113..eab05e1aa587424863d6914eb351da9fdcf17437 100644 (file)
@@ -684,7 +684,8 @@ smb2_clone_range(const unsigned int xid,
 
                        /* No need to change MaxChunks since already set to 1 */
                        chunk_sizes_updated = true;
-               }
+               } else
+                       goto cchunk_out;
        }
 
 cchunk_out:
index 3417340bf89e677fe0c46bf98cf922dd39d29a3a..65cd7a84c8bc3206033a917fe9d98fc939cbe1af 100644 (file)
@@ -1218,7 +1218,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        struct smb2_ioctl_req *req;
        struct smb2_ioctl_rsp *rsp;
        struct TCP_Server_Info *server;
-       struct cifs_ses *ses = tcon->ses;
+       struct cifs_ses *ses;
        struct kvec iov[2];
        int resp_buftype;
        int num_iovecs;
@@ -1233,6 +1233,11 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        if (plen)
                *plen = 0;
 
+       if (tcon)
+               ses = tcon->ses;
+       else
+               return -EIO;
+
        if (ses && (ses->server))
                server = ses->server;
        else
@@ -1296,14 +1301,12 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base;
 
        if ((rc != 0) && (rc != -EINVAL)) {
-               if (tcon)
-                       cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
+               cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
                goto ioctl_exit;
        } else if (rc == -EINVAL) {
                if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) &&
                    (opcode != FSCTL_SRV_COPYCHUNK)) {
-                       if (tcon)
-                               cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
+                       cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
                        goto ioctl_exit;
                }
        }
@@ -1629,7 +1632,7 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
 
        rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
 
-       if ((rc != 0) && tcon)
+       if (rc != 0)
                cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
 
        free_rsp_buf(resp_buftype, iov[0].iov_base);
@@ -2114,7 +2117,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
        struct kvec iov[2];
        int rc = 0;
        int len;
-       int resp_buftype;
+       int resp_buftype = CIFS_NO_BUFFER;
        unsigned char *bufptr;
        struct TCP_Server_Info *server;
        struct cifs_ses *ses = tcon->ses;
index afec6450450ff08e6be4c1cd7a05dbc293ef0a76..6b8e2f091f5b8fd71d63d545ea5fe9ae593d7688 100644 (file)
@@ -570,6 +570,7 @@ static int mt_ioctl_trans(unsigned int fd, unsigned int cmd, void __user *argp)
 #define BNEPCONNDEL    _IOW('B', 201, int)
 #define BNEPGETCONNLIST        _IOR('B', 210, int)
 #define BNEPGETCONNINFO        _IOR('B', 211, int)
+#define BNEPGETSUPPFEAT        _IOR('B', 212, int)
 
 #define CMTPCONNADD    _IOW('C', 200, int)
 #define CMTPCONNDEL    _IOW('C', 201, int)
@@ -1247,6 +1248,7 @@ COMPATIBLE_IOCTL(BNEPCONNADD)
 COMPATIBLE_IOCTL(BNEPCONNDEL)
 COMPATIBLE_IOCTL(BNEPGETCONNLIST)
 COMPATIBLE_IOCTL(BNEPGETCONNINFO)
+COMPATIBLE_IOCTL(BNEPGETSUPPFEAT)
 COMPATIBLE_IOCTL(CMTPCONNADD)
 COMPATIBLE_IOCTL(CMTPCONNDEL)
 COMPATIBLE_IOCTL(CMTPGETCONNLIST)
index e907052eeadb69f683df3c7d8838106c6e36905b..32a8bbd7a9ad1121f9b100da08f80500736bcebf 100644 (file)
@@ -53,6 +53,18 @@ struct wb_writeback_work {
        struct completion *done;        /* set if the caller waits */
 };
 
+/*
+ * If an inode is constantly having its pages dirtied, but then the
+ * updates stop dirtytime_expire_interval seconds in the past, it's
+ * possible for the worst case time between when an inode has its
+ * timestamps updated and when they finally get written out to be two
+ * dirtytime_expire_intervals.  We set the default to 12 hours (in
+ * seconds), which means most of the time inodes will have their
+ * timestamps written to disk after 12 hours, but in the worst case a
+ * few inodes might not their timestamps updated for 24 hours.
+ */
+unsigned int dirtytime_expire_interval = 12 * 60 * 60;
+
 /**
  * writeback_in_progress - determine whether there is writeback in progress
  * @bdi: the device's backing_dev_info structure.
@@ -275,8 +287,8 @@ static int move_expired_inodes(struct list_head *delaying_queue,
 
        if ((flags & EXPIRE_DIRTY_ATIME) == 0)
                older_than_this = work->older_than_this;
-       else if ((work->reason == WB_REASON_SYNC) == 0) {
-               expire_time = jiffies - (HZ * 86400);
+       else if (!work->for_sync) {
+               expire_time = jiffies - (dirtytime_expire_interval * HZ);
                older_than_this = &expire_time;
        }
        while (!list_empty(delaying_queue)) {
@@ -458,6 +470,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
                 */
                redirty_tail(inode, wb);
        } else if (inode->i_state & I_DIRTY_TIME) {
+               inode->dirtied_when = jiffies;
                list_move(&inode->i_wb_list, &wb->b_dirty_time);
        } else {
                /* The inode is clean. Remove from writeback lists. */
@@ -505,12 +518,17 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
        spin_lock(&inode->i_lock);
 
        dirty = inode->i_state & I_DIRTY;
-       if (((dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) &&
-            (inode->i_state & I_DIRTY_TIME)) ||
-           (inode->i_state & I_DIRTY_TIME_EXPIRED)) {
-               dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED;
-               trace_writeback_lazytime(inode);
-       }
+       if (inode->i_state & I_DIRTY_TIME) {
+               if ((dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) ||
+                   unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) ||
+                   unlikely(time_after(jiffies,
+                                       (inode->dirtied_time_when +
+                                        dirtytime_expire_interval * HZ)))) {
+                       dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED;
+                       trace_writeback_lazytime(inode);
+               }
+       } else
+               inode->i_state &= ~I_DIRTY_TIME_EXPIRED;
        inode->i_state &= ~dirty;
 
        /*
@@ -1131,6 +1149,56 @@ void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
        rcu_read_unlock();
 }
 
+/*
+ * Wake up bdi's periodically to make sure dirtytime inodes gets
+ * written back periodically.  We deliberately do *not* check the
+ * b_dirtytime list in wb_has_dirty_io(), since this would cause the
+ * kernel to be constantly waking up once there are any dirtytime
+ * inodes on the system.  So instead we define a separate delayed work
+ * function which gets called much more rarely.  (By default, only
+ * once every 12 hours.)
+ *
+ * If there is any other write activity going on in the file system,
+ * this function won't be necessary.  But if the only thing that has
+ * happened on the file system is a dirtytime inode caused by an atime
+ * update, we need this infrastructure below to make sure that inode
+ * eventually gets pushed out to disk.
+ */
+static void wakeup_dirtytime_writeback(struct work_struct *w);
+static DECLARE_DELAYED_WORK(dirtytime_work, wakeup_dirtytime_writeback);
+
+static void wakeup_dirtytime_writeback(struct work_struct *w)
+{
+       struct backing_dev_info *bdi;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
+               if (list_empty(&bdi->wb.b_dirty_time))
+                       continue;
+               bdi_wakeup_thread(bdi);
+       }
+       rcu_read_unlock();
+       schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
+}
+
+static int __init start_dirtytime_writeback(void)
+{
+       schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
+       return 0;
+}
+__initcall(start_dirtytime_writeback);
+
+int dirtytime_interval_handler(struct ctl_table *table, int write,
+                              void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       int ret;
+
+       ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+       if (ret == 0 && write)
+               mod_delayed_work(system_wq, &dirtytime_work, 0);
+       return ret;
+}
+
 static noinline void block_dump___mark_inode_dirty(struct inode *inode)
 {
        if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
@@ -1269,8 +1337,13 @@ void __mark_inode_dirty(struct inode *inode, int flags)
                        }
 
                        inode->dirtied_when = jiffies;
-                       list_move(&inode->i_wb_list, dirtytime ?
-                                 &bdi->wb.b_dirty_time : &bdi->wb.b_dirty);
+                       if (dirtytime)
+                               inode->dirtied_time_when = jiffies;
+                       if (inode->i_state & (I_DIRTY_INODE | I_DIRTY_PAGES))
+                               list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
+                       else
+                               list_move(&inode->i_wb_list,
+                                         &bdi->wb.b_dirty_time);
                        spin_unlock(&bdi->wb.list_lock);
                        trace_writeback_dirty_inode_enqueue(inode);
 
index 6e560d56094b2ccaf7091ec784a6a039af214f43..754fdf8c6356388f16bbdc285240394b350bd90e 100644 (file)
@@ -131,13 +131,16 @@ skip:
        hfs_bnode_write(node, entry, data_off + key_len, entry_len);
        hfs_bnode_dump(node);
 
-       if (new_node) {
-               /* update parent key if we inserted a key
-                * at the start of the first node
-                */
-               if (!rec && new_node != node)
-                       hfs_brec_update_parent(fd);
+       /*
+        * update parent key if we inserted a key
+        * at the start of the node and it is not the new node
+        */
+       if (!rec && new_node != node) {
+               hfs_bnode_read_key(node, fd->search_key, data_off + size);
+               hfs_brec_update_parent(fd);
+       }
 
+       if (new_node) {
                hfs_bnode_put(fd->bnode);
                if (!new_node->parent) {
                        hfs_btree_inc_height(tree);
@@ -168,9 +171,6 @@ skip:
                goto again;
        }
 
-       if (!rec)
-               hfs_brec_update_parent(fd);
-
        return 0;
 }
 
@@ -370,6 +370,8 @@ again:
        if (IS_ERR(parent))
                return PTR_ERR(parent);
        __hfs_brec_find(parent, fd, hfs_find_rec_by_key);
+       if (fd->record < 0)
+               return -ENOENT;
        hfs_bnode_dump(parent);
        rec = fd->record;
 
index 528fedfda15e6432bd69b80c7bd8faceb7d1351d..40bc384728c0e07b637dccfa755fe49b5751bf86 100644 (file)
@@ -1388,9 +1388,8 @@ any_leases_conflict(struct inode *inode, struct file_lock *breaker)
 int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
 {
        int error = 0;
-       struct file_lock *new_fl;
        struct file_lock_context *ctx = inode->i_flctx;
-       struct file_lock *fl;
+       struct file_lock *new_fl, *fl, *tmp;
        unsigned long break_time;
        int want_write = (mode & O_ACCMODE) != O_RDONLY;
        LIST_HEAD(dispose);
@@ -1420,7 +1419,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
                        break_time++;   /* so that 0 means no break time */
        }
 
-       list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
+       list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
                if (!leases_conflict(fl, new_fl))
                        continue;
                if (want_write) {
index cdbc78c7254218c80213877e90c08ec107aea223..03d647bf195d78bb3d6611553c9ad3e6fa4385a2 100644 (file)
@@ -137,7 +137,7 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
        seg->offset = iomap.offset;
        seg->length = iomap.length;
 
-       dprintk("GET: %lld:%lld %d\n", bex->foff, bex->len, bex->es);
+       dprintk("GET: 0x%llx:0x%llx %d\n", bex->foff, bex->len, bex->es);
        return 0;
 
 out_error:
index 9da89fddab338fcebbeb51fc0ec71534e0defc5a..9aa2796da90d9169488a625d5f80e90010971ff4 100644 (file)
@@ -122,19 +122,19 @@ nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
 
                p = xdr_decode_hyper(p, &bex.foff);
                if (bex.foff & (block_size - 1)) {
-                       dprintk("%s: unaligned offset %lld\n",
+                       dprintk("%s: unaligned offset 0x%llx\n",
                                __func__, bex.foff);
                        goto fail;
                }
                p = xdr_decode_hyper(p, &bex.len);
                if (bex.len & (block_size - 1)) {
-                       dprintk("%s: unaligned length %lld\n",
+                       dprintk("%s: unaligned length 0x%llx\n",
                                __func__, bex.foff);
                        goto fail;
                }
                p = xdr_decode_hyper(p, &bex.soff);
                if (bex.soff & (block_size - 1)) {
-                       dprintk("%s: unaligned disk offset %lld\n",
+                       dprintk("%s: unaligned disk offset 0x%llx\n",
                                __func__, bex.soff);
                        goto fail;
                }
index 1028a062954357c06005dc7b0d6f61a10ea6f418..6904213a436368e47628af85701a3beb68e0550b 100644 (file)
@@ -118,7 +118,7 @@ void nfsd4_setup_layout_type(struct svc_export *exp)
 {
        struct super_block *sb = exp->ex_path.mnt->mnt_sb;
 
-       if (exp->ex_flags & NFSEXP_NOPNFS)
+       if (!(exp->ex_flags & NFSEXP_PNFS))
                return;
 
        if (sb->s_export_op->get_uuid &&
@@ -440,15 +440,14 @@ nfsd4_return_file_layout(struct nfs4_layout *lp, struct nfsd4_layout_seg *seg,
                        list_move_tail(&lp->lo_perstate, reaplist);
                        return;
                }
-               end = seg->offset;
+               lo->offset = layout_end(seg);
        } else {
                /* retain the whole layout segment on a split. */
                if (layout_end(seg) < end) {
                        dprintk("%s: split not supported\n", __func__);
                        return;
                }
-
-               lo->offset = layout_end(seg);
+               end = seg->offset;
        }
 
        layout_update_len(lo, end);
@@ -513,6 +512,9 @@ nfsd4_return_client_layouts(struct svc_rqst *rqstp,
 
        spin_lock(&clp->cl_lock);
        list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) {
+               if (ls->ls_layout_type != lrp->lr_layout_type)
+                       continue;
+
                if (lrp->lr_return_type == RETURN_FSID &&
                    !fh_fsid_match(&ls->ls_stid.sc_file->fi_fhandle,
                                   &cstate->current_fh.fh_handle))
@@ -587,6 +589,8 @@ nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls)
 
        rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str));
 
+       trace_layout_recall_fail(&ls->ls_stid.sc_stateid);
+
        printk(KERN_WARNING
                "nfsd: client %s failed to respond to layout recall. "
                "  Fencing..\n", addr_str);
index d30bea8d0277ab3bfa45e7d66c38ed410026f585..92b9d97aff4f1adfe24ed607d11661196a500ffc 100644 (file)
@@ -1237,8 +1237,8 @@ nfsd4_getdeviceinfo(struct svc_rqst *rqstp,
                nfserr = ops->proc_getdeviceinfo(exp->ex_path.mnt->mnt_sb, gdp);
 
        gdp->gd_notify_types &= ops->notify_types;
-       exp_put(exp);
 out:
+       exp_put(exp);
        return nfserr;
 }
 
index d2f2c37dc2dbd2649399fe2ddad4032a5025d337..8ba1d888f1e624d672453bd1eea20c40e054f746 100644 (file)
@@ -3221,7 +3221,7 @@ alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
        } else
                nfs4_free_openowner(&oo->oo_owner);
        spin_unlock(&clp->cl_lock);
-       return oo;
+       return ret;
 }
 
 static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
@@ -5062,7 +5062,7 @@ alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
        } else
                nfs4_free_lockowner(&lo->lo_owner);
        spin_unlock(&clp->cl_lock);
-       return lo;
+       return ret;
 }
 
 static void
index df5e66caf100ca303005018932ccc8edadab4089..5fb7e78169a6b27a1a5a4d5e9ec354c9e32f6c43 100644 (file)
@@ -1562,7 +1562,11 @@ nfsd4_decode_layoutget(struct nfsd4_compoundargs *argp,
        p = xdr_decode_hyper(p, &lgp->lg_seg.offset);
        p = xdr_decode_hyper(p, &lgp->lg_seg.length);
        p = xdr_decode_hyper(p, &lgp->lg_minlength);
-       nfsd4_decode_stateid(argp, &lgp->lg_sid);
+
+       status = nfsd4_decode_stateid(argp, &lgp->lg_sid);
+       if (status)
+               return status;
+
        READ_BUF(4);
        lgp->lg_maxcount = be32_to_cpup(p++);
 
@@ -1580,7 +1584,11 @@ nfsd4_decode_layoutcommit(struct nfsd4_compoundargs *argp,
        p = xdr_decode_hyper(p, &lcp->lc_seg.offset);
        p = xdr_decode_hyper(p, &lcp->lc_seg.length);
        lcp->lc_reclaim = be32_to_cpup(p++);
-       nfsd4_decode_stateid(argp, &lcp->lc_sid);
+
+       status = nfsd4_decode_stateid(argp, &lcp->lc_sid);
+       if (status)
+               return status;
+
        READ_BUF(4);
        lcp->lc_newoffset = be32_to_cpup(p++);
        if (lcp->lc_newoffset) {
@@ -1628,7 +1636,11 @@ nfsd4_decode_layoutreturn(struct nfsd4_compoundargs *argp,
                READ_BUF(16);
                p = xdr_decode_hyper(p, &lrp->lr_seg.offset);
                p = xdr_decode_hyper(p, &lrp->lr_seg.length);
-               nfsd4_decode_stateid(argp, &lrp->lr_sid);
+
+               status = nfsd4_decode_stateid(argp, &lrp->lr_sid);
+               if (status)
+                       return status;
+
                READ_BUF(4);
                lrp->lrf_body_len = be32_to_cpup(p++);
                if (lrp->lrf_body_len > 0) {
@@ -4123,7 +4135,7 @@ nfsd4_encode_layoutreturn(struct nfsd4_compoundres *resp, __be32 nfserr,
                return nfserr_resource;
        *p++ = cpu_to_be32(lrp->lrs_present);
        if (lrp->lrs_present)
-               nfsd4_encode_stateid(xdr, &lrp->lr_sid);
+               return nfsd4_encode_stateid(xdr, &lrp->lr_sid);
        return nfs_ok;
 }
 #endif /* CONFIG_NFSD_PNFS */
index 83a9694ec485b0593e3de847464243640fcd7186..46ec934f5dee8c529020558bed4cd820956734f1 100644 (file)
@@ -165,13 +165,17 @@ int nfsd_reply_cache_init(void)
 {
        unsigned int hashsize;
        unsigned int i;
+       int status = 0;
 
        max_drc_entries = nfsd_cache_size_limit();
        atomic_set(&num_drc_entries, 0);
        hashsize = nfsd_hashsize(max_drc_entries);
        maskbits = ilog2(hashsize);
 
-       register_shrinker(&nfsd_reply_cache_shrinker);
+       status = register_shrinker(&nfsd_reply_cache_shrinker);
+       if (status)
+               return status;
+
        drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep),
                                        0, 0, NULL);
        if (!drc_slab)
index 44057b45ed326d4274a4cdd008465e40e1db8667..e34f906647d39dce39985d1cfe836f42b689c556 100644 (file)
@@ -437,6 +437,8 @@ static inline struct bcma_device *bcma_find_core(struct bcma_bus *bus,
 #ifdef CONFIG_BCMA_HOST_PCI
 extern void bcma_host_pci_up(struct bcma_bus *bus);
 extern void bcma_host_pci_down(struct bcma_bus *bus);
+extern int bcma_host_pci_irq_ctl(struct bcma_bus *bus,
+                                struct bcma_device *core, bool enable);
 #else
 static inline void bcma_host_pci_up(struct bcma_bus *bus)
 {
@@ -444,6 +446,13 @@ static inline void bcma_host_pci_up(struct bcma_bus *bus)
 static inline void bcma_host_pci_down(struct bcma_bus *bus)
 {
 }
+static inline int bcma_host_pci_irq_ctl(struct bcma_bus *bus,
+                                       struct bcma_device *core, bool enable)
+{
+       if (bus->hosttype == BCMA_HOSTTYPE_PCI)
+               return -ENOTSUPP;
+       return 0;
+}
 #endif
 
 extern bool bcma_core_is_enabled(struct bcma_device *core);
index 8e90004fdfd7b215074d7a4a97280d6405765db4..5ba6918ca20bc9d3bc30993c21b7ef10a6c42243 100644 (file)
@@ -238,9 +238,13 @@ struct bcma_drv_pci {
 #define pcicore_write16(pc, offset, val)       bcma_write16((pc)->core, offset, val)
 #define pcicore_write32(pc, offset, val)       bcma_write32((pc)->core, offset, val)
 
-extern int bcma_core_pci_irq_ctl(struct bcma_bus *bus,
-                                struct bcma_device *core, bool enable);
+#ifdef CONFIG_BCMA_DRIVER_PCI
 extern void bcma_core_pci_power_save(struct bcma_bus *bus, bool up);
+#else
+static inline void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
+{
+}
+#endif
 
 extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev);
 extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev);
index b4d71b5e1ff23a2d3f3ec5c481937edd571ead4d..f4131e8ead74965a73272949b3a9eae8fa08b5c7 100644 (file)
@@ -604,6 +604,7 @@ struct inode {
        struct mutex            i_mutex;
 
        unsigned long           dirtied_when;   /* jiffies of first dirtying */
+       unsigned long           dirtied_time_when;
 
        struct hlist_node       i_hash;
        struct list_head        i_wb_list;      /* backing dev IO list */
index 781974afff9f14e576a7912039a5fb68009cdb25..ffbc034c88104d251c3891f4513c12a62818c5dd 100644 (file)
 #define GICR_PROPBASER_WaWb            (5U << 7)
 #define GICR_PROPBASER_RaWaWt          (6U << 7)
 #define GICR_PROPBASER_RaWaWb          (7U << 7)
+#define GICR_PROPBASER_CACHEABILITY_MASK (7U << 7)
 #define GICR_PROPBASER_IDBITS_MASK     (0x1f)
 
+#define GICR_PENDBASER_NonShareable    (0U << 10)
+#define GICR_PENDBASER_InnerShareable  (1U << 10)
+#define GICR_PENDBASER_OuterShareable  (2U << 10)
+#define GICR_PENDBASER_SHAREABILITY_MASK (3UL << 10)
+#define GICR_PENDBASER_nCnB            (0U << 7)
+#define GICR_PENDBASER_nC              (1U << 7)
+#define GICR_PENDBASER_RaWt            (2U << 7)
+#define GICR_PENDBASER_RaWb            (3U << 7)
+#define GICR_PENDBASER_WaWt            (4U << 7)
+#define GICR_PENDBASER_WaWb            (5U << 7)
+#define GICR_PENDBASER_RaWaWt          (6U << 7)
+#define GICR_PENDBASER_RaWaWb          (7U << 7)
+#define GICR_PENDBASER_CACHEABILITY_MASK (7U << 7)
+
 /*
  * Re-Distributor registers, offsets from SGI_base
  */
 #define GITS_CBASER_WaWb               (5UL << 59)
 #define GITS_CBASER_RaWaWt             (6UL << 59)
 #define GITS_CBASER_RaWaWb             (7UL << 59)
+#define GITS_CBASER_CACHEABILITY_MASK  (7UL << 59)
 #define GITS_CBASER_NonShareable       (0UL << 10)
 #define GITS_CBASER_InnerShareable     (1UL << 10)
 #define GITS_CBASER_OuterShareable     (2UL << 10)
 #define GITS_BASER_WaWb                        (5UL << 59)
 #define GITS_BASER_RaWaWt              (6UL << 59)
 #define GITS_BASER_RaWaWb              (7UL << 59)
+#define GITS_BASER_CACHEABILITY_MASK   (7UL << 59)
 #define GITS_BASER_TYPE_SHIFT          (56)
 #define GITS_BASER_TYPE(r)             (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
 #define GITS_BASER_ENTRY_SIZE_SHIFT    (48)
index 47cb09edec1a613821b734a7b4a0aa1def2ab0ae..348c6f47e4cc36681e55ff4800a4d2a4e052f954 100644 (file)
@@ -145,11 +145,11 @@ static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
 }
 
 
-/* jhash_3words - hash exactly 3, 2 or 1 word(s) */
-static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
+/* __jhash_nwords - hash exactly 3, 2 or 1 word(s) */
+static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
 {
-       a += JHASH_INITVAL;
-       b += JHASH_INITVAL;
+       a += initval;
+       b += initval;
        c += initval;
 
        __jhash_final(a, b, c);
@@ -157,14 +157,19 @@ static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
        return c;
 }
 
+static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
+{
+       return __jhash_nwords(a, b, c, initval + JHASH_INITVAL + (3 << 2));
+}
+
 static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
 {
-       return jhash_3words(a, b, 0, initval);
+       return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
 }
 
 static inline u32 jhash_1word(u32 a, u32 initval)
 {
-       return jhash_3words(a, 0, 0, initval);
+       return __jhash_nwords(a, 0, 0, initval + JHASH_INITVAL + (1 << 2));
 }
 
 #endif /* _LINUX_JHASH_H */
index 7bf01d779b4532a5a8d168d0ff91fd08d5228e02..1ce79a7f1daa18868adfe14598c35206382f58a5 100644 (file)
@@ -4,5 +4,6 @@
 #include <linux/compiler.h>
 
 unsigned long lcm(unsigned long a, unsigned long b) __attribute_const__;
+unsigned long lcm_not_zero(unsigned long a, unsigned long b) __attribute_const__;
 
 #endif /* _LCM_H */
index fc03efa64ffe58ae9aa81e77bf60e5d7b4acc6c7..6b08cc106c218dc06d60988663c4bcac3eaba5fe 100644 (file)
@@ -232,6 +232,7 @@ enum {
                                              * led */
        ATA_FLAG_NO_DIPM        = (1 << 23), /* host not happy with DIPM */
        ATA_FLAG_LOWTAG         = (1 << 24), /* host wants lowest available tag */
+       ATA_FLAG_SAS_HOST       = (1 << 25), /* SAS host */
 
        /* bits 24:31 of ap->flags are reserved for LLD specific flags */
 
index fb0390a1a498f29df135bbe7500c1dfcc00a27d7..ee7b1ce7a6f8f42280abcc3d1411f7bcdebb6ac2 100644 (file)
@@ -2999,6 +2999,9 @@ enum usb_irq_events {
 #define PALMAS_GPADC_TRIM15                                    0x0E
 #define PALMAS_GPADC_TRIM16                                    0x0F
 
+/* TPS659038 regen2_ctrl offset iss different from palmas */
+#define TPS659038_REGEN2_CTRL                                  0x12
+
 /* TPS65917 Interrupt registers */
 
 /* Registers for function INTERRUPT */
index 7299e9548906ea4219ee0d6897a217a4d022b981..f62e7cf227c61c9510e96afda8482e8f5b7b606b 100644 (file)
@@ -68,6 +68,8 @@ enum {
        MLX4_CMD_UNMAP_ICM_AUX   = 0xffb,
        MLX4_CMD_SET_ICM_SIZE    = 0xffd,
        MLX4_CMD_ACCESS_REG      = 0x3b,
+       MLX4_CMD_ALLOCATE_VPP    = 0x80,
+       MLX4_CMD_SET_VPORT_QOS   = 0x81,
 
        /*master notify fw on finish for slave's flr*/
        MLX4_CMD_INFORM_FLR_DONE = 0x5b,
@@ -186,7 +188,14 @@ enum {
 };
 
 enum {
-       /* set port opcode modifiers */
+       /* Set port opcode modifiers */
+       MLX4_SET_PORT_IB_OPCODE         = 0x0,
+       MLX4_SET_PORT_ETH_OPCODE        = 0x1,
+       MLX4_SET_PORT_BEACON_OPCODE     = 0x4,
+};
+
+enum {
+       /* Set port Ethernet input modifiers */
        MLX4_SET_PORT_GENERAL   = 0x0,
        MLX4_SET_PORT_RQP_CALC  = 0x1,
        MLX4_SET_PORT_MAC_TABLE = 0x2,
@@ -294,6 +303,8 @@ void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbo
 u32 mlx4_comm_get_version(void);
 int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac);
 int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos);
+int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate,
+                    int max_tx_rate);
 int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting);
 int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf);
 int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state);
index ab7ebec943b81d82d8fdb81609830d3cd16a1818..f9ce34bec45b1d615bb1b191bffd867a23dd559a 100644 (file)
@@ -49,8 +49,6 @@
 #define MSIX_LEGACY_SZ         4
 #define MIN_MSIX_P_PORT                5
 
-#define MLX4_NUM_UP                    8
-#define MLX4_NUM_TC                    8
 #define MLX4_MAX_100M_UNITS_VAL                255     /*
                                                 * work around: can't set values
                                                 * greater then this value when
@@ -174,6 +172,7 @@ enum {
        MLX4_DEV_CAP_FLAG_VEP_UC_STEER  = 1LL << 41,
        MLX4_DEV_CAP_FLAG_VEP_MC_STEER  = 1LL << 42,
        MLX4_DEV_CAP_FLAG_COUNTERS      = 1LL << 48,
+       MLX4_DEV_CAP_FLAG_RSS_IP_FRAG   = 1LL << 52,
        MLX4_DEV_CAP_FLAG_SET_ETH_SCHED = 1LL << 53,
        MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55,
        MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59,
@@ -206,7 +205,11 @@ enum {
        MLX4_DEV_CAP_FLAG2_PORT_REMAP           = 1LL <<  21,
        MLX4_DEV_CAP_FLAG2_QCN                  = 1LL <<  22,
        MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT        = 1LL <<  23,
-       MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN         = 1LL <<  24
+       MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN         = 1LL <<  24,
+       MLX4_DEV_CAP_FLAG2_QOS_VPP              = 1LL <<  25,
+       MLX4_DEV_CAP_FLAG2_ETS_CFG              = 1LL <<  26,
+       MLX4_DEV_CAP_FLAG2_PORT_BEACON          = 1LL <<  27,
+       MLX4_DEV_CAP_FLAG2_IGNORE_FCS           = 1LL <<  28,
 };
 
 enum {
@@ -1001,6 +1004,11 @@ static inline int mlx4_is_slave(struct mlx4_dev *dev)
        return dev->flags & MLX4_FLAG_SLAVE;
 }
 
+static inline int mlx4_is_eth(struct mlx4_dev *dev, int port)
+{
+       return dev->caps.port_type[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
+}
+
 int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
                   struct mlx4_buf *buf, gfp_t gfp);
 void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
@@ -1305,9 +1313,9 @@ int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
                          u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
 int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
                           u8 promisc);
-int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc);
-int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
-               u8 *pg, u16 *ratelimit);
+int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time);
+int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port,
+                           u8 ignore_fcs_value);
 int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable);
 int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
index 1023ebe035b70041ce118a68826c923cf7d443db..6fed539e54569c3f0701632a8a17e9bdf16a53d6 100644 (file)
@@ -209,7 +209,8 @@ struct mlx4_qp_context {
        __be16                  sq_wqe_counter;
        u32                     reserved3;
        __be16                  rate_limit_params;
-       __be16                  reserved4;
+       u8                      reserved4;
+       u8                      qos_vport;
        __be32                  param3;
        __be32                  nummmcpeers_basemkey;
        u8                      log_page_size;
@@ -231,6 +232,7 @@ struct mlx4_update_qp_context {
 enum {
        MLX4_UPD_QP_MASK_PM_STATE       = 32,
        MLX4_UPD_QP_MASK_VSD            = 33,
+       MLX4_UPD_QP_MASK_QOS_VPP        = 34,
        MLX4_UPD_QP_MASK_RATE_LIMIT     = 35,
 };
 
@@ -432,7 +434,8 @@ enum mlx4_update_qp_attr {
        MLX4_UPDATE_QP_SMAC             = 1 << 0,
        MLX4_UPDATE_QP_VSD              = 1 << 1,
        MLX4_UPDATE_QP_RATE_LIMIT       = 1 << 2,
-       MLX4_UPDATE_QP_SUPPORTED_ATTRS  = (1 << 3) - 1
+       MLX4_UPDATE_QP_QOS_VPORT        = 1 << 3,
+       MLX4_UPDATE_QP_SUPPORTED_ATTRS  = (1 << 4) - 1
 };
 
 enum mlx4_update_qp_params_flags {
@@ -441,6 +444,7 @@ enum mlx4_update_qp_params_flags {
 
 struct mlx4_update_qp_params {
        u8      smac_index;
+       u8      qos_vport;
        u32     flags;
        u16     rate_unit;
        u16     rate_val;
index 2826a4b6071ef2c0c06cf6cf6fbb63e90095c04d..68cd08f02c2f62ca1d5a036a9f476396c6fa6e9e 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index f6b17ac601bda06533afb7bc4c7d80a692ff7bc4..2695ced222df23b56df42252fb7319c7d7d8b157 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -137,14 +137,15 @@ enum {
 
 static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
                               void __iomem *uar_page,
-                              spinlock_t *doorbell_lock)
+                              spinlock_t *doorbell_lock,
+                              u32 cons_index)
 {
        __be32 doorbell[2];
        u32 sn;
        u32 ci;
 
        sn = cq->arm_sn & 3;
-       ci = cq->cons_index & 0xffffff;
+       ci = cons_index & 0xffffff;
 
        *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci);
 
index 4e5bd813bb9a7d9142866857ff3c4be73f956e6e..abf65c7904214b75f5326a7f576df3b8e2f0a9d8 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 163a818411e71742c332417c0bdfb811669f663f..afc78a3f4462e3f2eb2e350abc010ae39a181e7f 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 166d9315fe4b565bdd8487da66559f6cbfe19462..9a90e7523dc24d2f7f29467023c8845cbf50cff7 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -232,6 +232,9 @@ struct mlx5_cmd_stats {
 };
 
 struct mlx5_cmd {
+       void           *cmd_alloc_buf;
+       dma_addr_t      alloc_dma;
+       int             alloc_size;
        void           *cmd_buf;
        dma_addr_t      dma;
        u16             cmdif_rev;
@@ -407,7 +410,7 @@ struct mlx5_core_srq {
 struct mlx5_eq_table {
        void __iomem           *update_ci;
        void __iomem           *update_arm_ci;
-       struct list_head       *comp_eq_head;
+       struct list_head        comp_eqs_list;
        struct mlx5_eq          pages_eq;
        struct mlx5_eq          async_eq;
        struct mlx5_eq          cmd_eq;
@@ -722,6 +725,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
 int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
 int mlx5_start_eqs(struct mlx5_core_dev *dev);
 int mlx5_stop_eqs(struct mlx5_core_dev *dev);
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn);
 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
 
@@ -777,14 +781,22 @@ enum {
        MAX_MR_CACHE_ENTRIES    = 16,
 };
 
+enum {
+       MLX5_INTERFACE_PROTOCOL_IB  = 0,
+       MLX5_INTERFACE_PROTOCOL_ETH = 1,
+};
+
 struct mlx5_interface {
        void *                  (*add)(struct mlx5_core_dev *dev);
        void                    (*remove)(struct mlx5_core_dev *dev, void *context);
        void                    (*event)(struct mlx5_core_dev *dev, void *context,
                                         enum mlx5_dev_event event, unsigned long param);
+       void *                  (*get_dev)(void *context);
+       int                     protocol;
        struct list_head        list;
 };
 
+void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
 int mlx5_register_interface(struct mlx5_interface *intf);
 void mlx5_unregister_interface(struct mlx5_interface *intf);
 
index 5f48b8f592c51a9aece4baa2755af9757b9976fa..cb3ad17edd1f5959b0499b82899ec95ada191025 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 61f7a342d1bfd1cc4f102d1b75be3d8a977e28d0..310b5f7fd6ae52101665c9f3dd6e042b6a4ceb9e 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index e1a363a336634f7506880c086a1c2e2f1689bb7e..f43ed054a3e0904c2b99a844fdd79964c633fe9a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 996807963716770486d9733d34879273c2b4d77b..83430f2ea757df9ad92b054b36db1306562f9e59 100644 (file)
@@ -33,6 +33,8 @@
 #define SDIO_DEVICE_ID_BROADCOM_43341          0xa94d
 #define SDIO_DEVICE_ID_BROADCOM_4335_4339      0x4335
 #define SDIO_DEVICE_ID_BROADCOM_43362          0xa962
+#define SDIO_DEVICE_ID_BROADCOM_43430          0xa9a6
+#define SDIO_DEVICE_ID_BROADCOM_4345           0x4345
 #define SDIO_DEVICE_ID_BROADCOM_4354           0x4354
 
 #define SDIO_VENDOR_ID_INTEL                   0x0089
index 967bb4c8caf16aa88d0a2645a3b754fd1f08b1ab..bf6d9df34d7b24755220841d45fc01c648ef7c02 100644 (file)
@@ -795,7 +795,10 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
  * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
  *                               struct net_device *dev);
  *     Called when a packet needs to be transmitted.
- *     Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
+ *     Returns NETDEV_TX_OK.  Can return NETDEV_TX_BUSY, but you should stop
+ *     the queue before that can happen; it's for obsolete devices and weird
+ *     corner cases, but the stack really does a non-trivial amount
+ *     of useless work if you return NETDEV_TX_BUSY.
  *        (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
  *     Required can not be NULL.
  *
@@ -1030,6 +1033,8 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
  *                          int queue_index, u32 maxrate);
  *     Called when a user wants to set a max-rate limitation of specific
  *     TX queue.
+ * int (*ndo_get_iflink)(const struct net_device *dev);
+ *     Called to get the iflink value of this device.
  */
 struct net_device_ops {
        int                     (*ndo_init)(struct net_device *dev);
@@ -1191,6 +1196,7 @@ struct net_device_ops {
        int                     (*ndo_set_tx_maxrate)(struct net_device *dev,
                                                      int queue_index,
                                                      u32 maxrate);
+       int                     (*ndo_get_iflink)(const struct net_device *dev);
 };
 
 /**
@@ -1322,7 +1328,7 @@ enum netdev_priv_flags {
  *     @mpls_features: Mask of features inheritable by MPLS
  *
  *     @ifindex:       interface index
- *     @iflink:        unique device identifier
+ *     @group:         The group, that the device belongs to
  *
  *     @stats:         Statistics struct, which was left as a legacy, use
  *                     rtnl_link_stats64 instead
@@ -1482,7 +1488,6 @@ enum netdev_priv_flags {
  *
  *     @qdisc_tx_busylock:     XXX: need comments on this one
  *
- *     @group:         The group, that the device belongs to
  *     @pm_qos_req:    Power Management QoS object
  *
  *     FIXME: cleanup struct net_device such that network protocol info
@@ -1535,7 +1540,7 @@ struct net_device {
        netdev_features_t       mpls_features;
 
        int                     ifindex;
-       int                     iflink;
+       int                     group;
 
        struct net_device_stats stats;
 
@@ -1738,7 +1743,6 @@ struct net_device {
 #endif
        struct phy_device *phydev;
        struct lock_class_key *qdisc_tx_busylock;
-       int group;
        struct pm_qos_request   pm_qos_req;
 };
 #define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -2149,6 +2153,7 @@ void __dev_remove_pack(struct packet_type *pt);
 void dev_add_offload(struct packet_offload *po);
 void dev_remove_offload(struct packet_offload *po);
 
+int dev_get_iflink(const struct net_device *dev);
 struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
                                      unsigned short mask);
 struct net_device *dev_get_by_name(struct net *net, const char *name);
@@ -2159,8 +2164,12 @@ int dev_open(struct net_device *dev);
 int dev_close(struct net_device *dev);
 int dev_close_many(struct list_head *head, bool unlink);
 void dev_disable_lro(struct net_device *dev);
-int dev_loopback_xmit(struct sk_buff *newskb);
-int dev_queue_xmit(struct sk_buff *skb);
+int dev_loopback_xmit(struct sock *sk, struct sk_buff *newskb);
+int dev_queue_xmit_sk(struct sock *sk, struct sk_buff *skb);
+static inline int dev_queue_xmit(struct sk_buff *skb)
+{
+       return dev_queue_xmit_sk(skb->sk, skb);
+}
 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
 int register_netdevice(struct net_device *dev);
 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
@@ -2176,6 +2185,12 @@ void netdev_freemem(struct net_device *dev);
 void synchronize_net(void);
 int init_dummy_netdev(struct net_device *dev);
 
+DECLARE_PER_CPU(int, xmit_recursion);
+static inline int dev_recursion_level(void)
+{
+       return this_cpu_read(xmit_recursion);
+}
+
 struct net_device *dev_get_by_index(struct net *net, int ifindex);
 struct net_device *__dev_get_by_index(struct net *net, int ifindex);
 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
@@ -2915,7 +2930,11 @@ static inline void dev_consume_skb_any(struct sk_buff *skb)
 
 int netif_rx(struct sk_buff *skb);
 int netif_rx_ni(struct sk_buff *skb);
-int netif_receive_skb(struct sk_buff *skb);
+int netif_receive_skb_sk(struct sock *sk, struct sk_buff *skb);
+static inline int netif_receive_skb(struct sk_buff *skb)
+{
+       return netif_receive_skb_sk(skb->sk, skb);
+}
 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
 void napi_gro_flush(struct napi_struct *napi, bool flush_old);
 struct sk_buff *napi_get_frags(struct napi_struct *napi);
index 2517ece988209a611b324a0bb8ade2b566eeb645..63560d0a8dfe2802ec826d87921a8848fe56e68b 100644 (file)
@@ -44,11 +44,39 @@ int netfilter_init(void);
 struct sk_buff;
 
 struct nf_hook_ops;
+
+struct sock;
+
+struct nf_hook_state {
+       unsigned int hook;
+       int thresh;
+       u_int8_t pf;
+       struct net_device *in;
+       struct net_device *out;
+       struct sock *sk;
+       int (*okfn)(struct sock *, struct sk_buff *);
+};
+
+static inline void nf_hook_state_init(struct nf_hook_state *p,
+                                     unsigned int hook,
+                                     int thresh, u_int8_t pf,
+                                     struct net_device *indev,
+                                     struct net_device *outdev,
+                                     struct sock *sk,
+                                     int (*okfn)(struct sock *, struct sk_buff *))
+{
+       p->hook = hook;
+       p->thresh = thresh;
+       p->pf = pf;
+       p->in = indev;
+       p->out = outdev;
+       p->sk = sk;
+       p->okfn = okfn;
+}
+
 typedef unsigned int nf_hookfn(const struct nf_hook_ops *ops,
                               struct sk_buff *skb,
-                              const struct net_device *in,
-                              const struct net_device *out,
-                              int (*okfn)(struct sk_buff *));
+                              const struct nf_hook_state *state);
 
 struct nf_hook_ops {
        struct list_head list;
@@ -118,9 +146,7 @@ static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
 }
 #endif
 
-int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
-                struct net_device *indev, struct net_device *outdev,
-                int (*okfn)(struct sk_buff *), int thresh);
+int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
 
 /**
  *     nf_hook_thresh - call a netfilter hook
@@ -130,21 +156,29 @@ int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
  *     value indicates the packet has been consumed by the hook.
  */
 static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
+                                struct sock *sk,
                                 struct sk_buff *skb,
                                 struct net_device *indev,
                                 struct net_device *outdev,
-                                int (*okfn)(struct sk_buff *), int thresh)
+                                int (*okfn)(struct sock *, struct sk_buff *),
+                                int thresh)
 {
-       if (nf_hooks_active(pf, hook))
-               return nf_hook_slow(pf, hook, skb, indev, outdev, okfn, thresh);
+       if (nf_hooks_active(pf, hook)) {
+               struct nf_hook_state state;
+
+               nf_hook_state_init(&state, hook, thresh, pf,
+                                  indev, outdev, sk, okfn);
+               return nf_hook_slow(skb, &state);
+       }
        return 1;
 }
 
-static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
-                         struct net_device *indev, struct net_device *outdev,
-                         int (*okfn)(struct sk_buff *))
+static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk,
+                         struct sk_buff *skb, struct net_device *indev,
+                         struct net_device *outdev,
+                         int (*okfn)(struct sock *, struct sk_buff *))
 {
-       return nf_hook_thresh(pf, hook, skb, indev, outdev, okfn, INT_MIN);
+       return nf_hook_thresh(pf, hook, sk, skb, indev, outdev, okfn, INT_MIN);
 }
                    
 /* Activate hook; either okfn or kfree_skb called, unless a hook
@@ -165,35 +199,36 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
 */
 
 static inline int
-NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct sk_buff *skb,
-              struct net_device *in, struct net_device *out,
-              int (*okfn)(struct sk_buff *), int thresh)
+NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct sock *sk,
+              struct sk_buff *skb, struct net_device *in,
+              struct net_device *out,
+              int (*okfn)(struct sock *, struct sk_buff *), int thresh)
 {
-       int ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, thresh);
+       int ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, thresh);
        if (ret == 1)
-               ret = okfn(skb);
+               ret = okfn(sk, skb);
        return ret;
 }
 
 static inline int
-NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sk_buff *skb,
-            struct net_device *in, struct net_device *out,
-            int (*okfn)(struct sk_buff *), bool cond)
+NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sock *sk,
+            struct sk_buff *skb, struct net_device *in, struct net_device *out,
+            int (*okfn)(struct sock *, struct sk_buff *), bool cond)
 {
        int ret;
 
        if (!cond ||
-           ((ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, INT_MIN)) == 1))
-               ret = okfn(skb);
+           ((ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, INT_MIN)) == 1))
+               ret = okfn(sk, skb);
        return ret;
 }
 
 static inline int
-NF_HOOK(uint8_t pf, unsigned int hook, struct sk_buff *skb,
+NF_HOOK(uint8_t pf, unsigned int hook, struct sock *sk, struct sk_buff *skb,
        struct net_device *in, struct net_device *out,
-       int (*okfn)(struct sk_buff *))
+       int (*okfn)(struct sock *, struct sk_buff *))
 {
-       return NF_HOOK_THRESH(pf, hook, skb, in, out, okfn, INT_MIN);
+       return NF_HOOK_THRESH(pf, hook, sk, skb, in, out, okfn, INT_MIN);
 }
 
 /* Call setsockopt() */
@@ -293,19 +328,21 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
 }
 
 #else /* !CONFIG_NETFILTER */
-#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) (okfn)(skb)
-#define NF_HOOK_COND(pf, hook, skb, indev, outdev, okfn, cond) (okfn)(skb)
+#define NF_HOOK(pf, hook, sk, skb, indev, outdev, okfn) (okfn)(sk, skb)
+#define NF_HOOK_COND(pf, hook, sk, skb, indev, outdev, okfn, cond) (okfn)(sk, skb)
 static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
+                                struct sock *sk,
                                 struct sk_buff *skb,
                                 struct net_device *indev,
                                 struct net_device *outdev,
-                                int (*okfn)(struct sk_buff *), int thresh)
+                                int (*okfn)(struct sock *sk, struct sk_buff *), int thresh)
 {
-       return okfn(skb);
+       return okfn(sk, skb);
 }
-static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
-                         struct net_device *indev, struct net_device *outdev,
-                         int (*okfn)(struct sk_buff *))
+static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk,
+                         struct sk_buff *skb, struct net_device *indev,
+                         struct net_device *outdev,
+                         int (*okfn)(struct sock *, struct sk_buff *))
 {
        return 1;
 }
index cfb7191e6efa55633dfa8eb52855e6248ccb3d4f..c22a7fb8d0df08155857d8ca01ce4c67bfa01988 100644 (file)
@@ -54,8 +54,7 @@ extern struct xt_table *arpt_register_table(struct net *net,
 extern void arpt_unregister_table(struct xt_table *table);
 extern unsigned int arpt_do_table(struct sk_buff *skb,
                                  unsigned int hook,
-                                 const struct net_device *in,
-                                 const struct net_device *out,
+                                 const struct nf_hook_state *state,
                                  struct xt_table *table);
 
 #ifdef CONFIG_COMPAT
index 2734977199cac6a587126202dff59827724af277..ab8f76dba6680f485943545407daeb117620b489 100644 (file)
@@ -2,7 +2,7 @@
 #define __LINUX_BRIDGE_NETFILTER_H
 
 #include <uapi/linux/netfilter_bridge.h>
-
+#include <linux/skbuff.h>
 
 enum nf_br_hook_priorities {
        NF_BR_PRI_FIRST = INT_MIN,
@@ -17,20 +17,17 @@ enum nf_br_hook_priorities {
 
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 
-#define BRNF_PKT_TYPE                  0x01
 #define BRNF_BRIDGED_DNAT              0x02
 #define BRNF_NF_BRIDGE_PREROUTING      0x08
-#define BRNF_8021Q                     0x10
-#define BRNF_PPPoE                     0x20
 
 static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
 {
-       if (unlikely(skb->nf_bridge->mask & BRNF_PPPoE))
+       if (skb->nf_bridge->orig_proto == BRNF_PROTO_PPPOE)
                return PPPOE_SES_HLEN;
        return 0;
 }
 
-int br_handle_frame_finish(struct sk_buff *skb);
+int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb);
 
 static inline void br_drop_fake_rtable(struct sk_buff *skb)
 {
@@ -40,6 +37,27 @@ static inline void br_drop_fake_rtable(struct sk_buff *skb)
                skb_dst_drop(skb);
 }
 
+static inline int nf_bridge_get_physinif(const struct sk_buff *skb)
+{
+       return skb->nf_bridge ? skb->nf_bridge->physindev->ifindex : 0;
+}
+
+static inline int nf_bridge_get_physoutif(const struct sk_buff *skb)
+{
+       return skb->nf_bridge ? skb->nf_bridge->physoutdev->ifindex : 0;
+}
+
+static inline struct net_device *
+nf_bridge_get_physindev(const struct sk_buff *skb)
+{
+       return skb->nf_bridge ? skb->nf_bridge->physindev : NULL;
+}
+
+static inline struct net_device *
+nf_bridge_get_physoutdev(const struct sk_buff *skb)
+{
+       return skb->nf_bridge ? skb->nf_bridge->physoutdev : NULL;
+}
 #else
 #define br_drop_fake_rtable(skb)               do { } while (0)
 #endif /* CONFIG_BRIDGE_NETFILTER */
index 901e84db847d35e5ccbe49d2d524c921dc9bdef9..4073510da485fbad83ba5236d7ba5794b599653a 100644 (file)
@@ -65,8 +65,7 @@ struct ipt_error {
 extern void *ipt_alloc_initial_table(const struct xt_table *);
 extern unsigned int ipt_do_table(struct sk_buff *skb,
                                 unsigned int hook,
-                                const struct net_device *in,
-                                const struct net_device *out,
+                                const struct nf_hook_state *state,
                                 struct xt_table *table);
 
 #ifdef CONFIG_COMPAT
index 610208b18c05819dc4cfecc0554574269157f90f..b40d2b635778372f46870d00fd06a10266359f67 100644 (file)
@@ -31,8 +31,7 @@ extern struct xt_table *ip6t_register_table(struct net *net,
 extern void ip6t_unregister_table(struct net *net, struct xt_table *table);
 extern unsigned int ip6t_do_table(struct sk_buff *skb,
                                  unsigned int hook,
-                                 const struct net_device *in,
-                                 const struct net_device *out,
+                                 const struct nf_hook_state *state,
                                  struct xt_table *table);
 
 /* Check for an extension */
index 7e75bfe37cc7cd72d5db63cd094b5110984b066b..fe5732d53edacd17af2e259e191f60e595721345 100644 (file)
@@ -21,6 +21,9 @@ extern void fixed_phy_del(int phy_addr);
 extern int fixed_phy_set_link_update(struct phy_device *phydev,
                        int (*link_update)(struct net_device *,
                                           struct fixed_phy_status *));
+extern int fixed_phy_update_state(struct phy_device *phydev,
+                          const struct fixed_phy_status *status,
+                          const struct fixed_phy_status *changed);
 #else
 static inline int fixed_phy_add(unsigned int irq, int phy_id,
                                struct fixed_phy_status *status)
@@ -43,6 +46,12 @@ static inline int fixed_phy_set_link_update(struct phy_device *phydev,
 {
        return -ENODEV;
 }
+static inline int fixed_phy_update_state(struct phy_device *phydev,
+                          const struct fixed_phy_status *status,
+                          const struct fixed_phy_status *changed)
+{
+       return -ENODEV;
+}
 #endif /* CONFIG_FIXED_PHY */
 
 #endif /* __PHY_FIXED_H */
diff --git a/include/linux/platform_data/nxp-nci.h b/include/linux/platform_data/nxp-nci.h
new file mode 100644 (file)
index 0000000..d6ed286
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Generic platform data for the NXP NCI NFC chips.
+ *
+ * Copyright (C) 2014  NXP Semiconductors  All rights reserved.
+ *
+ * Authors: Clément Perrochaud <clement.perrochaud@nxp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _NXP_NCI_H_
+#define _NXP_NCI_H_
+
+struct nxp_nci_nfc_platform_data {
+       unsigned int gpio_en;
+       unsigned int gpio_fw;
+       unsigned int irq;
+};
+
+#endif /* _NXP_NCI_H_ */
index d4ad5b5a02bb478a422b349406efba00997bab76..045f709cb89b52c5e2381635b0bea10101030120 100644 (file)
@@ -316,7 +316,7 @@ struct regulator_desc {
  * @driver_data: private regulator data
  * @of_node: OpenFirmware node to parse for device tree bindings (may be
  *           NULL).
- * @regmap: regmap to use for core regmap helpers if dev_get_regulator() is
+ * @regmap: regmap to use for core regmap helpers if dev_get_regmap() is
  *          insufficient.
  * @ena_gpio_initialized: GPIO controlling regulator enable was properly
  *                        initialized, meaning that >= 0 is a valid gpio
index 6d77432e14ff971bffd4ca211dccb917768b2c8c..a419b65770d669c3a51c88a86a145abbcd3db339 100644 (file)
@@ -1625,11 +1625,11 @@ struct task_struct {
 
        /*
         * numa_faults_locality tracks if faults recorded during the last
-        * scan window were remote/local. The task scan period is adapted
-        * based on the locality of the faults with different weights
-        * depending on whether they were shared or private faults
+        * scan window were remote/local or failed to migrate. The task scan
+        * period is adapted based on the locality of the faults with different
+        * weights depending on whether they were shared or private faults
         */
-       unsigned long numa_faults_locality[2];
+       unsigned long numa_faults_locality[3];
 
        unsigned long numa_pages_migrated;
 #endif /* CONFIG_NUMA_BALANCING */
@@ -1719,6 +1719,7 @@ struct task_struct {
 #define TNF_NO_GROUP   0x02
 #define TNF_SHARED     0x04
 #define TNF_FAULT_LOCAL        0x08
+#define TNF_MIGRATE_FAIL 0x10
 
 #ifdef CONFIG_NUMA_BALANCING
 extern void task_numa_fault(int last_node, int node, int pages, int flags);
index 36f3f43c011789efe68c372296cc691cc7a3863e..0991259643d6ef44eac3940728a4c3fd1d51d7bb 100644 (file)
@@ -166,10 +166,16 @@ struct nf_conntrack {
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 struct nf_bridge_info {
        atomic_t                use;
+       enum {
+               BRNF_PROTO_UNCHANGED,
+               BRNF_PROTO_8021Q,
+               BRNF_PROTO_PPPOE
+       } orig_proto;
+       bool                    pkt_otherhost;
        unsigned int            mask;
        struct net_device       *physindev;
        struct net_device       *physoutdev;
-       unsigned long           data[32 / sizeof(unsigned long)];
+       char                    neigh_header[8];
 };
 #endif
 
index c57d8ea0716cddea1419a37481dbd7704b230d65..59a7889e15db51c3b24bc771dbe5d0b1c965212d 100644 (file)
@@ -60,17 +60,17 @@ struct rpc_xprt;
 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
 void           rpc_register_sysctl(void);
 void           rpc_unregister_sysctl(void);
-int            sunrpc_debugfs_init(void);
+void           sunrpc_debugfs_init(void);
 void           sunrpc_debugfs_exit(void);
-int            rpc_clnt_debugfs_register(struct rpc_clnt *);
+void           rpc_clnt_debugfs_register(struct rpc_clnt *);
 void           rpc_clnt_debugfs_unregister(struct rpc_clnt *);
-int            rpc_xprt_debugfs_register(struct rpc_xprt *);
+void           rpc_xprt_debugfs_register(struct rpc_xprt *);
 void           rpc_xprt_debugfs_unregister(struct rpc_xprt *);
 #else
-static inline int
+static inline void
 sunrpc_debugfs_init(void)
 {
-       return 0;
+       return;
 }
 
 static inline void
@@ -79,10 +79,10 @@ sunrpc_debugfs_exit(void)
        return;
 }
 
-static inline int
+static inline void
 rpc_clnt_debugfs_register(struct rpc_clnt *clnt)
 {
-       return 0;
+       return;
 }
 
 static inline void
@@ -91,10 +91,10 @@ rpc_clnt_debugfs_unregister(struct rpc_clnt *clnt)
        return;
 }
 
-static inline int
+static inline void
 rpc_xprt_debugfs_register(struct rpc_xprt *xprt)
 {
-       return 0;
+       return;
 }
 
 static inline void
index f869ae8afbaf9f4092625d7f5c0317ef7538ca1f..0caa3a2d4106eab0137d20ac75518af7964281ba 100644 (file)
@@ -58,6 +58,7 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb)
 struct tcp_fastopen_cookie {
        s8      len;
        u8      val[TCP_FASTOPEN_COOKIE_MAX];
+       bool    exp;    /* In RFC6994 experimental option format */
 };
 
 /* This defines a selective acknowledgement block. */
@@ -188,6 +189,7 @@ struct tcp_sock {
        u8      do_early_retrans:1,/* Enable RFC5827 early-retransmit  */
                syn_data:1,     /* SYN includes data */
                syn_fastopen:1, /* SYN includes Fast Open option */
+               syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */
                syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
                is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */
        u32     tlp_high_seq;   /* snd_nxt at the time of TLP retransmit. */
index ff3fb2bd0e90899989791d266b9d34b47f30f12b..6e0ce8c7b8cb5a9fcb985a5a5078f82267d03092 100644 (file)
@@ -227,7 +227,7 @@ struct skb_data {   /* skb->cb is one of these */
        struct urb              *urb;
        struct usbnet           *dev;
        enum skb_state          state;
-       size_t                  length;
+       long                    length;
        unsigned long           packets;
 };
 
@@ -235,11 +235,13 @@ struct skb_data { /* skb->cb is one of these */
  * tx_fixup method before returning an skb.
  */
 static inline void
-usbnet_set_skb_tx_stats(struct sk_buff *skb, unsigned long packets)
+usbnet_set_skb_tx_stats(struct sk_buff *skb,
+                       unsigned long packets, long bytes_delta)
 {
        struct skb_data *entry = (struct skb_data *) skb->cb;
 
        entry->packets = packets;
+       entry->length = bytes_delta;
 }
 
 extern int usbnet_open(struct net_device *net);
index 00048339c23e4f252ee6a4b15cd38b49b8032de4..b2dd371ec0ca0aa6f0e1dc71d1f79c69c6dbdb2b 100644 (file)
@@ -130,6 +130,7 @@ extern int vm_dirty_ratio;
 extern unsigned long vm_dirty_bytes;
 extern unsigned int dirty_writeback_interval;
 extern unsigned int dirty_expire_interval;
+extern unsigned int dirtytime_expire_interval;
 extern int vm_highmem_is_dirtyable;
 extern int block_dump;
 extern int laptop_mode;
@@ -146,6 +147,8 @@ extern int dirty_ratio_handler(struct ctl_table *table, int write,
 extern int dirty_bytes_handler(struct ctl_table *table, int write,
                void __user *buffer, size_t *lenp,
                loff_t *ppos);
+int dirtytime_interval_handler(struct ctl_table *table, int write,
+                              void __user *buffer, size_t *lenp, loff_t *ppos);
 
 struct ctl_table;
 int dirty_writeback_centisecs_handler(struct ctl_table *, int,
index 33a5e00025aaec4c32cc0bea5ae509bb84183741..7dba80546f16a00892c5cf8e5e3b7ca1eb292e95 100644 (file)
@@ -269,11 +269,23 @@ struct l2cap_ctrl {
        __u16   reqseq;
        __u16   txseq;
        __u8    retries;
+       __le16  psm;
+       bdaddr_t bdaddr;
+       struct l2cap_chan *chan;
 };
 
 struct hci_dev;
 
 typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode);
+typedef void (*hci_req_complete_skb_t)(struct hci_dev *hdev, u8 status,
+                                      u16 opcode, struct sk_buff *skb);
+
+struct req_ctrl {
+       bool start;
+       u8 event;
+       hci_req_complete_t complete;
+       hci_req_complete_skb_t complete_skb;
+};
 
 struct bt_skb_cb {
        __u8 pkt_type;
@@ -281,13 +293,10 @@ struct bt_skb_cb {
        __u16 opcode;
        __u16 expect;
        __u8 incoming:1;
-       __u8 req_start:1;
-       u8 req_event;
-       hci_req_complete_t req_complete;
-       struct l2cap_chan *chan;
-       struct l2cap_ctrl control;
-       bdaddr_t bdaddr;
-       __le16 psm;
+       union {
+               struct l2cap_ctrl l2cap;
+               struct req_ctrl req;
+       };
 };
 #define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
 
index 3acecf35420b707a40ab18fe0fa3d221025aa07f..d95da83cb1b0f1d63325891a0281e5bc184f7dd4 100644 (file)
@@ -374,6 +374,7 @@ enum {
 /* LE features */
 #define HCI_LE_ENCRYPTION              0x01
 #define HCI_LE_CONN_PARAM_REQ_PROC     0x02
+#define HCI_LE_SLAVE_FEATURES          0x08
 #define HCI_LE_PING                    0x10
 #define HCI_LE_DATA_LEN_EXT            0x20
 #define HCI_LE_EXT_SCAN_POLICY         0x80
@@ -463,12 +464,14 @@ enum {
 #define EIR_NAME_COMPLETE      0x09 /* complete local name */
 #define EIR_TX_POWER           0x0A /* transmit power level */
 #define EIR_CLASS_OF_DEV       0x0D /* Class of Device */
-#define EIR_SSP_HASH_C         0x0E /* Simple Pairing Hash C */
-#define EIR_SSP_RAND_R         0x0F /* Simple Pairing Randomizer R */
+#define EIR_SSP_HASH_C192      0x0E /* Simple Pairing Hash C-192 */
+#define EIR_SSP_RAND_R192      0x0F /* Simple Pairing Randomizer R-192 */
 #define EIR_DEVICE_ID          0x10 /* device ID */
 #define EIR_APPEARANCE         0x19 /* Device appearance */
 #define EIR_LE_BDADDR          0x1B /* LE Bluetooth device address */
 #define EIR_LE_ROLE            0x1C /* LE role */
+#define EIR_SSP_HASH_C256      0x1D /* Simple Pairing Hash C-256 */
+#define EIR_SSP_RAND_R256      0x1E /* Simple Pairing Rand R-256 */
 #define EIR_LE_SC_CONFIRM      0x22 /* LE SC Confirmation Value */
 #define EIR_LE_SC_RANDOM       0x23 /* LE SC Random Value */
 
@@ -1374,6 +1377,11 @@ struct hci_cp_le_conn_update {
        __le16   max_ce_len;
 } __packed;
 
+#define HCI_OP_LE_READ_REMOTE_FEATURES 0x2016
+struct hci_cp_le_read_remote_features {
+       __le16   handle;
+} __packed;
+
 #define HCI_OP_LE_START_ENC            0x2019
 struct hci_cp_le_start_enc {
        __le16  handle;
@@ -1866,6 +1874,13 @@ struct hci_ev_le_conn_update_complete {
        __le16   supervision_timeout;
 } __packed;
 
+#define HCI_EV_LE_REMOTE_FEAT_COMPLETE 0x04
+struct hci_ev_le_remote_feat_complete {
+       __u8     status;
+       __le16   handle;
+       __u8     features[8];
+} __packed;
+
 #define HCI_EV_LE_LTK_REQ              0x05
 struct hci_ev_le_ltk_req {
        __le16  handle;
index 540c07feece7fa4c19fcfa11e89d389b2087fd84..a056c2bfeb811465afc285699537741d42fdaaac 100644 (file)
@@ -185,7 +185,6 @@ struct amp_assoc {
 
 #define HCI_MAX_PAGES  3
 
-#define NUM_REASSEMBLY 4
 struct hci_dev {
        struct list_head list;
        struct mutex    lock;
@@ -326,14 +325,13 @@ struct hci_dev {
        struct sk_buff_head     raw_q;
        struct sk_buff_head     cmd_q;
 
-       struct sk_buff          *recv_evt;
        struct sk_buff          *sent_cmd;
-       struct sk_buff          *reassembly[NUM_REASSEMBLY];
 
        struct mutex            req_lock;
        wait_queue_head_t       req_wait_q;
        __u32                   req_status;
        __u32                   req_result;
+       struct sk_buff          *req_skb;
 
        void                    *smp_data;
        void                    *smp_bredr_data;
@@ -1012,7 +1010,6 @@ int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
 
 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
-int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count);
 
 void hci_init_sysfs(struct hci_dev *hdev);
 void hci_conn_init_sysfs(struct hci_conn *conn);
@@ -1284,8 +1281,6 @@ static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
 int hci_register_cb(struct hci_cb *hcb);
 int hci_unregister_cb(struct hci_cb *hcb);
 
-bool hci_req_pending(struct hci_dev *hdev);
-
 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
                               const void *param, u32 timeout);
 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
@@ -1393,9 +1388,6 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
                                    u8 status);
 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
-void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
-                                      u8 *rand192, u8 *hash256, u8 *rand256,
-                                      u8 status);
 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
                       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len);
index 0f26aa707e62a13225031797c9c2edadbff05a3a..d0424269313fe2a84df0db71e07271e033756c9a 100644 (file)
@@ -18,11 +18,11 @@ struct dn_neigh {
 
 void dn_neigh_init(void);
 void dn_neigh_cleanup(void);
-int dn_neigh_router_hello(struct sk_buff *skb);
-int dn_neigh_endnode_hello(struct sk_buff *skb);
+int dn_neigh_router_hello(struct sock *sk, struct sk_buff *skb);
+int dn_neigh_endnode_hello(struct sock *sk, struct sk_buff *skb);
 void dn_neigh_pointopoint_hello(struct sk_buff *skb);
 int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n);
-int dn_to_neigh_output(struct sk_buff *skb);
+int dn_to_neigh_output(struct sock *sk, struct sk_buff *skb);
 
 extern struct neigh_table dn_neigh_table;
 
index d0808a3237630d7d94fd27d9dddff05f4b330408..d14af7edd197c2c364c1da62addc9781aa79fe84 100644 (file)
@@ -108,7 +108,8 @@ int ip_local_deliver(struct sk_buff *skb);
 int ip_mr_input(struct sk_buff *skb);
 int ip_output(struct sock *sk, struct sk_buff *skb);
 int ip_mc_output(struct sock *sk, struct sk_buff *skb);
-int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
+int ip_fragment(struct sock *sk, struct sk_buff *skb,
+               int (*output)(struct sock *, struct sk_buff *));
 int ip_do_nat(struct sk_buff *skb);
 void ip_send_check(struct iphdr *ip);
 int __ip_local_out(struct sk_buff *skb);
@@ -455,22 +456,6 @@ static __inline__ void inet_reset_saddr(struct sock *sk)
 
 #endif
 
-static inline int sk_mc_loop(struct sock *sk)
-{
-       if (!sk)
-               return 1;
-       switch (sk->sk_family) {
-       case AF_INET:
-               return inet_sk(sk)->mc_loop;
-#if IS_ENABLED(CONFIG_IPV6)
-       case AF_INET6:
-               return inet6_sk(sk)->mc_loop;
-#endif
-       }
-       WARN_ON(1);
-       return 1;
-}
-
 bool ip_call_ra_chain(struct sk_buff *skb);
 
 /*
index 1d09b46c1e489325b95f9987327d95ca8affed08..5e192068e6cb61a78d9b19b2b58bffd7c68b44bb 100644 (file)
@@ -170,11 +170,13 @@ static inline bool ipv6_anycast_destination(const struct sk_buff *skb)
        return rt->rt6i_flags & RTF_ANYCAST;
 }
 
-int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
+int ip6_fragment(struct sock *sk, struct sk_buff *skb,
+                int (*output)(struct sock *, struct sk_buff *));
 
 static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
 {
-       struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
+       struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
+                               inet6_sk(skb->sk) : NULL;
 
        return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ?
               skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
index 76c091b53daef0c44105a99d843dba281fd08d31..b8529aa1dae7a0b601008afd18a8218c41ab10ff 100644 (file)
@@ -71,14 +71,16 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw);
 __u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr,
                             const struct in6_addr *raddr);
 struct net *ip6_tnl_get_link_net(const struct net_device *dev);
+int ip6_tnl_get_iflink(const struct net_device *dev);
 
-static inline void ip6tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
+static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
+                                 struct net_device *dev)
 {
        struct net_device_stats *stats = &dev->stats;
        int pkt_len, err;
 
        pkt_len = skb->len;
-       err = ip6_local_out(skb);
+       err = ip6_local_out_sk(sk, skb);
 
        if (net_xmit_eval(err) == 0) {
                struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
index 2c47061a6954543abe462ece352e93cdd0538033..d8214cb88bbcfa6524a7d1900c543a45a05f7f31 100644 (file)
@@ -142,6 +142,7 @@ int ip_tunnel_init(struct net_device *dev);
 void ip_tunnel_uninit(struct net_device *dev);
 void  ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
 struct net *ip_tunnel_get_link_net(const struct net_device *dev);
+int ip_tunnel_get_iflink(const struct net_device *dev);
 int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
                       struct rtnl_link_ops *ops, char *devname);
 
index 65142e6af44052b2807914c96cf48cf5882072df..eec8ad3c98432af6250a7a0c659d443da90ce45a 100644 (file)
@@ -47,8 +47,6 @@
 
 #define NEXTHDR_MAX            255
 
-
-
 #define IPV6_DEFAULT_HOPLIMIT   64
 #define IPV6_DEFAULT_MCASTHOPS 1
 
@@ -769,7 +767,7 @@ static inline u8 ip6_tclass(__be32 flowinfo)
 int ipv6_rcv(struct sk_buff *skb, struct net_device *dev,
             struct packet_type *pt, struct net_device *orig_dev);
 
-int ip6_rcv_finish(struct sk_buff *skb);
+int ip6_rcv_finish(struct sock *sk, struct sk_buff *skb);
 
 /*
  *     upper-layer output functions
@@ -827,6 +825,7 @@ int ip6_input(struct sk_buff *skb);
 int ip6_mc_input(struct sk_buff *skb);
 
 int __ip6_local_out(struct sk_buff *skb);
+int ip6_local_out_sk(struct sock *sk, struct sk_buff *skb);
 int ip6_local_out(struct sk_buff *skb);
 
 /*
index fb4e8a3d6229b1205bd14644900d4640a3ac40fa..e18e7fd43f47d996613b0c1e7dfe6bc0e636c476 100644 (file)
@@ -213,7 +213,7 @@ struct ieee802154_ops {
        int             (*set_hw_addr_filt)(struct ieee802154_hw *hw,
                                            struct ieee802154_hw_addr_filt *filt,
                                            unsigned long changed);
-       int             (*set_txpower)(struct ieee802154_hw *hw, int db);
+       int             (*set_txpower)(struct ieee802154_hw *hw, s8 dbm);
        int             (*set_lbt)(struct ieee802154_hw *hw, bool on);
        int             (*set_cca_mode)(struct ieee802154_hw *hw,
                                        const struct wpan_phy_cca *cca);
@@ -247,7 +247,7 @@ static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src)
        __put_unaligned_memmove64(swab64p(le64_src), be64_dst);
 }
 
-/* Basic interface to register ieee802154 hwice */
+/* Basic interface to register ieee802154 device */
 struct ieee802154_hw *
 ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops);
 void ieee802154_free_hw(struct ieee802154_hw *hw);
index 340c013795a49c9fcd91469ccb04a2be88f332fe..a3127325f624b9afd8cf160c63358e557f5cdf40 100644 (file)
@@ -44,40 +44,32 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
                                  unsigned int hooknum);
 
 unsigned int nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                           const struct net_device *in,
-                           const struct net_device *out,
+                           const struct nf_hook_state *state,
                            unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                                     struct sk_buff *skb,
-                                                    const struct net_device *in,
-                                                    const struct net_device *out,
+                                                    const struct nf_hook_state *state,
                                                     struct nf_conn *ct));
 
 unsigned int nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                            const struct net_device *in,
-                            const struct net_device *out,
+                            const struct nf_hook_state *state,
                             unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                                      struct sk_buff *skb,
-                                                     const struct net_device *in,
-                                                     const struct net_device *out,
+                                                     const struct nf_hook_state *state,
                                                      struct nf_conn *ct));
 
 unsigned int nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops,
                                  struct sk_buff *skb,
-                                 const struct net_device *in,
-                                 const struct net_device *out,
+                                 const struct nf_hook_state *state,
                                  unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                                           struct sk_buff *skb,
-                                                          const struct net_device *in,
-                                                          const struct net_device *out,
+                                                          const struct nf_hook_state *state,
                                                           struct nf_conn *ct));
 
 unsigned int nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                           const struct net_device *in,
-                           const struct net_device *out,
+                           const struct nf_hook_state *state,
                            unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                                     struct sk_buff *skb,
-                                                    const struct net_device *in,
-                                                    const struct net_device *out,
+                                                    const struct nf_hook_state *state,
                                                     struct nf_conn *ct));
 
 int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
@@ -85,40 +77,32 @@ int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
                                    unsigned int hooknum, unsigned int hdrlen);
 
 unsigned int nf_nat_ipv6_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                           const struct net_device *in,
-                           const struct net_device *out,
+                           const struct nf_hook_state *state,
                            unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                                     struct sk_buff *skb,
-                                                    const struct net_device *in,
-                                                    const struct net_device *out,
+                                                    const struct nf_hook_state *state,
                                                     struct nf_conn *ct));
 
 unsigned int nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                            const struct net_device *in,
-                            const struct net_device *out,
+                            const struct nf_hook_state *state,
                             unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                                      struct sk_buff *skb,
-                                                     const struct net_device *in,
-                                                     const struct net_device *out,
+                                                     const struct nf_hook_state *state,
                                                      struct nf_conn *ct));
 
 unsigned int nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops,
                                  struct sk_buff *skb,
-                                 const struct net_device *in,
-                                 const struct net_device *out,
+                                 const struct nf_hook_state *state,
                                  unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                                           struct sk_buff *skb,
-                                                          const struct net_device *in,
-                                                          const struct net_device *out,
+                                                          const struct nf_hook_state *state,
                                                           struct nf_conn *ct));
 
 unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                           const struct net_device *in,
-                           const struct net_device *out,
+                           const struct nf_hook_state *state,
                            unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                                     struct sk_buff *skb,
-                                                    const struct net_device *in,
-                                                    const struct net_device *out,
+                                                    const struct nf_hook_state *state,
                                                     struct nf_conn *ct));
 
 #endif /* _NF_NAT_L3PROTO_H */
index 84a53d7803069dd15bb5bb8575dffa1a900d8ea5..d81d584157e11f5d80013de967ee341fa512baa0 100644 (file)
@@ -12,12 +12,8 @@ struct nf_queue_entry {
        unsigned int            id;
 
        struct nf_hook_ops      *elem;
-       u_int8_t                pf;
+       struct nf_hook_state    state;
        u16                     size; /* sizeof(entry) + saved route keys */
-       unsigned int            hook;
-       struct net_device       *indev;
-       struct net_device       *outdev;
-       int                     (*okfn)(struct sk_buff *);
 
        /* extra space to store route keys */
 };
index b8cd60dcb4e1f7fd1ae8b7efe7e5802a1ddd86ca..d6a2f0ed5130582d0489bc372e3b7e033f5267e8 100644 (file)
@@ -26,12 +26,11 @@ struct nft_pktinfo {
 static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
                                   const struct nf_hook_ops *ops,
                                   struct sk_buff *skb,
-                                  const struct net_device *in,
-                                  const struct net_device *out)
+                                  const struct nf_hook_state *state)
 {
        pkt->skb = skb;
-       pkt->in = pkt->xt.in = in;
-       pkt->out = pkt->xt.out = out;
+       pkt->in = pkt->xt.in = state->in;
+       pkt->out = pkt->xt.out = state->out;
        pkt->ops = ops;
        pkt->xt.hooknum = ops->hooknum;
        pkt->xt.family = ops->pf;
@@ -196,6 +195,7 @@ struct nft_set_estimate {
 };
 
 struct nft_set_ext;
+struct nft_expr;
 
 /**
  *     struct nft_set_ops - nf_tables set operations
@@ -218,6 +218,15 @@ struct nft_set_ops {
        bool                            (*lookup)(const struct nft_set *set,
                                                  const struct nft_data *key,
                                                  const struct nft_set_ext **ext);
+       bool                            (*update)(struct nft_set *set,
+                                                 const struct nft_data *key,
+                                                 void *(*new)(struct nft_set *,
+                                                              const struct nft_expr *,
+                                                              struct nft_data []),
+                                                 const struct nft_expr *expr,
+                                                 struct nft_data data[],
+                                                 const struct nft_set_ext **ext);
+
        int                             (*insert)(const struct nft_set *set,
                                                  const struct nft_set_elem *elem);
        void                            (*activate)(const struct nft_set *set,
@@ -258,6 +267,9 @@ void nft_unregister_set(struct nft_set_ops *ops);
  *     @dtype: data type (verdict or numeric type defined by userspace)
  *     @size: maximum set size
  *     @nelems: number of elements
+ *     @ndeact: number of deactivated elements queued for removal
+ *     @timeout: default timeout value in msecs
+ *     @gc_int: garbage collection interval in msecs
  *     @policy: set parameterization (see enum nft_set_policies)
  *     @ops: set ops
  *     @pnet: network namespace
@@ -273,7 +285,10 @@ struct nft_set {
        u32                             ktype;
        u32                             dtype;
        u32                             size;
-       u32                             nelems;
+       atomic_t                        nelems;
+       u32                             ndeact;
+       u64                             timeout;
+       u32                             gc_int;
        u16                             policy;
        /* runtime data below here */
        const struct nft_set_ops        *ops ____cacheline_aligned;
@@ -290,16 +305,27 @@ static inline void *nft_set_priv(const struct nft_set *set)
        return (void *)set->data;
 }
 
+static inline struct nft_set *nft_set_container_of(const void *priv)
+{
+       return (void *)priv - offsetof(struct nft_set, data);
+}
+
 struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
                                     const struct nlattr *nla);
 struct nft_set *nf_tables_set_lookup_byid(const struct net *net,
                                          const struct nlattr *nla);
 
+static inline unsigned long nft_set_gc_interval(const struct nft_set *set)
+{
+       return set->gc_int ? msecs_to_jiffies(set->gc_int) : HZ;
+}
+
 /**
  *     struct nft_set_binding - nf_tables set binding
  *
  *     @list: set bindings list node
  *     @chain: chain containing the rule bound to the set
+ *     @flags: set action flags
  *
  *     A set binding contains all information necessary for validation
  *     of new elements added to a bound set.
@@ -307,6 +333,7 @@ struct nft_set *nf_tables_set_lookup_byid(const struct net *net,
 struct nft_set_binding {
        struct list_head                list;
        const struct nft_chain          *chain;
+       u32                             flags;
 };
 
 int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
@@ -320,12 +347,18 @@ void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
  *     @NFT_SET_EXT_KEY: element key
  *     @NFT_SET_EXT_DATA: mapping data
  *     @NFT_SET_EXT_FLAGS: element flags
+ *     @NFT_SET_EXT_TIMEOUT: element timeout
+ *     @NFT_SET_EXT_EXPIRATION: element expiration time
+ *     @NFT_SET_EXT_USERDATA: user data associated with the element
  *     @NFT_SET_EXT_NUM: number of extension types
  */
 enum nft_set_extensions {
        NFT_SET_EXT_KEY,
        NFT_SET_EXT_DATA,
        NFT_SET_EXT_FLAGS,
+       NFT_SET_EXT_TIMEOUT,
+       NFT_SET_EXT_EXPIRATION,
+       NFT_SET_EXT_USERDATA,
        NFT_SET_EXT_NUM
 };
 
@@ -422,14 +455,96 @@ static inline u8 *nft_set_ext_flags(const struct nft_set_ext *ext)
        return nft_set_ext(ext, NFT_SET_EXT_FLAGS);
 }
 
+static inline u64 *nft_set_ext_timeout(const struct nft_set_ext *ext)
+{
+       return nft_set_ext(ext, NFT_SET_EXT_TIMEOUT);
+}
+
+static inline unsigned long *nft_set_ext_expiration(const struct nft_set_ext *ext)
+{
+       return nft_set_ext(ext, NFT_SET_EXT_EXPIRATION);
+}
+
+static inline struct nft_userdata *nft_set_ext_userdata(const struct nft_set_ext *ext)
+{
+       return nft_set_ext(ext, NFT_SET_EXT_USERDATA);
+}
+
+static inline bool nft_set_elem_expired(const struct nft_set_ext *ext)
+{
+       return nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION) &&
+              time_is_before_eq_jiffies(*nft_set_ext_expiration(ext));
+}
+
 static inline struct nft_set_ext *nft_set_elem_ext(const struct nft_set *set,
                                                   void *elem)
 {
        return elem + set->ops->elemsize;
 }
 
+void *nft_set_elem_init(const struct nft_set *set,
+                       const struct nft_set_ext_tmpl *tmpl,
+                       const struct nft_data *key,
+                       const struct nft_data *data,
+                       u64 timeout, gfp_t gfp);
 void nft_set_elem_destroy(const struct nft_set *set, void *elem);
 
+/**
+ *     struct nft_set_gc_batch_head - nf_tables set garbage collection batch
+ *
+ *     @rcu: rcu head
+ *     @set: set the elements belong to
+ *     @cnt: count of elements
+ */
+struct nft_set_gc_batch_head {
+       struct rcu_head                 rcu;
+       const struct nft_set            *set;
+       unsigned int                    cnt;
+};
+
+#define NFT_SET_GC_BATCH_SIZE  ((PAGE_SIZE -                             \
+                                 sizeof(struct nft_set_gc_batch_head)) / \
+                                sizeof(void *))
+
+/**
+ *     struct nft_set_gc_batch - nf_tables set garbage collection batch
+ *
+ *     @head: GC batch head
+ *     @elems: garbage collection elements
+ */
+struct nft_set_gc_batch {
+       struct nft_set_gc_batch_head    head;
+       void                            *elems[NFT_SET_GC_BATCH_SIZE];
+};
+
+struct nft_set_gc_batch *nft_set_gc_batch_alloc(const struct nft_set *set,
+                                               gfp_t gfp);
+void nft_set_gc_batch_release(struct rcu_head *rcu);
+
+static inline void nft_set_gc_batch_complete(struct nft_set_gc_batch *gcb)
+{
+       if (gcb != NULL)
+               call_rcu(&gcb->head.rcu, nft_set_gc_batch_release);
+}
+
+static inline struct nft_set_gc_batch *
+nft_set_gc_batch_check(const struct nft_set *set, struct nft_set_gc_batch *gcb,
+                      gfp_t gfp)
+{
+       if (gcb != NULL) {
+               if (gcb->head.cnt + 1 < ARRAY_SIZE(gcb->elems))
+                       return gcb;
+               nft_set_gc_batch_complete(gcb);
+       }
+       return nft_set_gc_batch_alloc(set, gfp);
+}
+
+static inline void nft_set_gc_batch_add(struct nft_set_gc_batch *gcb,
+                                       void *elem)
+{
+       gcb->elems[gcb->head.cnt++] = elem;
+}
+
 /**
  *     struct nft_expr_type - nf_tables expression type
  *
@@ -751,6 +866,8 @@ static inline u8 nft_genmask_cur(const struct net *net)
        return 1 << ACCESS_ONCE(net->nft.gencursor);
 }
 
+#define NFT_GENMASK_ANY                ((1 << 0) | (1 << 1))
+
 /*
  * Set element transaction helpers
  */
@@ -767,6 +884,41 @@ static inline void nft_set_elem_change_active(const struct nft_set *set,
        ext->genmask ^= nft_genmask_next(read_pnet(&set->pnet));
 }
 
+/*
+ * We use a free bit in the genmask field to indicate the element
+ * is busy, meaning it is currently being processed either by
+ * the netlink API or GC.
+ *
+ * Even though the genmask is only a single byte wide, this works
+ * because the extension structure if fully constant once initialized,
+ * so there are no non-atomic write accesses unless it is already
+ * marked busy.
+ */
+#define NFT_SET_ELEM_BUSY_MASK (1 << 2)
+
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+#define NFT_SET_ELEM_BUSY_BIT  2
+#elif defined(__BIG_ENDIAN_BITFIELD)
+#define NFT_SET_ELEM_BUSY_BIT  (BITS_PER_LONG - BITS_PER_BYTE + 2)
+#else
+#error
+#endif
+
+static inline int nft_set_elem_mark_busy(struct nft_set_ext *ext)
+{
+       unsigned long *word = (unsigned long *)ext;
+
+       BUILD_BUG_ON(offsetof(struct nft_set_ext, genmask) != 0);
+       return test_and_set_bit(NFT_SET_ELEM_BUSY_BIT, word);
+}
+
+static inline void nft_set_elem_clear_busy(struct nft_set_ext *ext)
+{
+       unsigned long *word = (unsigned long *)ext;
+
+       clear_bit(NFT_SET_ELEM_BUSY_BIT, word);
+}
+
 /**
  *     struct nft_trans - nf_tables object update in transaction
  *
index a75fc8e27cd698483232a3f3a379efc76308a905..c6f400cfaac8d76673d559bf041c5da4b32abf13 100644 (file)
@@ -31,6 +31,9 @@ void nft_cmp_module_exit(void);
 int nft_lookup_module_init(void);
 void nft_lookup_module_exit(void);
 
+int nft_dynset_module_init(void);
+void nft_dynset_module_exit(void);
+
 int nft_bitwise_module_init(void);
 void nft_bitwise_module_exit(void);
 
index cba143fbd2e4fca9f5d0280dda035215ebd922fb..2df7f96902ee96edaa9a85f7c96c254220b5e12d 100644 (file)
@@ -8,12 +8,11 @@ static inline void
 nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
                     const struct nf_hook_ops *ops,
                     struct sk_buff *skb,
-                    const struct net_device *in,
-                    const struct net_device *out)
+                    const struct nf_hook_state *state)
 {
        struct iphdr *ip;
 
-       nft_set_pktinfo(pkt, ops, skb, in, out);
+       nft_set_pktinfo(pkt, ops, skb, state);
 
        ip = ip_hdr(pkt->skb);
        pkt->tprot = ip->protocol;
index 74d97613765801512e523bab6e40b6e2767d982d..97db2e3a5e657c4b67fdbcfd9b609aea390e1ec0 100644 (file)
@@ -8,13 +8,12 @@ static inline int
 nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
                     const struct nf_hook_ops *ops,
                     struct sk_buff *skb,
-                    const struct net_device *in,
-                    const struct net_device *out)
+                    const struct nf_hook_state *state)
 {
        int protohdr, thoff = 0;
        unsigned short frag_off;
 
-       nft_set_pktinfo(pkt, ops, skb, in, out);
+       nft_set_pktinfo(pkt, ops, skb, state);
 
        protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
        /* If malformed, drop it */
index ab672b537dd4618dc25c31d1e10f400c96284fd2..020a814bc8ed7145f7b7491c6c21ba07b01e9c36 100644 (file)
@@ -83,6 +83,10 @@ struct nfc_hci_pipe {
 };
 
 #define NFC_HCI_MAX_CUSTOM_GATES       50
+/*
+ * According to specification 102 622 chapter 4.4 Pipes,
+ * the pipe identifier is 7 bits long.
+ */
 #define NFC_HCI_MAX_PIPES              127
 struct nfc_hci_init_data {
        u8 gate_count;
index ff87f8611fa3246d4570c9c780d29f8ee4ff0f6c..d4dcc7199fd766aeddd2cfe51a8411d6a873581f 100644 (file)
@@ -71,6 +71,7 @@ struct nci_ops {
        int   (*close)(struct nci_dev *ndev);
        int   (*send)(struct nci_dev *ndev, struct sk_buff *skb);
        int   (*setup)(struct nci_dev *ndev);
+       int   (*fw_download)(struct nci_dev *ndev, const char *firmware_name);
        __u32 (*get_rfprotocol)(struct nci_dev *ndev, __u8 rf_protocol);
        int   (*discover_se)(struct nci_dev *ndev);
        int   (*disable_se)(struct nci_dev *ndev, u32 se_idx);
@@ -137,6 +138,10 @@ struct nci_conn_info {
 #define NCI_HCI_INVALID_HOST               0x80
 
 #define NCI_HCI_MAX_CUSTOM_GATES   50
+/*
+ * According to specification 102 622 chapter 4.4 Pipes,
+ * the pipe identifier is 7 bits long.
+ */
 #define NCI_HCI_MAX_PIPES          127
 
 struct nci_hci_gate {
index 73190e65d5c13aa7217cce910203f4bd5f63d385..7ac029c0754678d900bb1cb78dc20fee6ba859c1 100644 (file)
@@ -157,7 +157,7 @@ struct nfc_evt_transaction {
        u32 aid_len;
        u8 aid[NFC_MAX_AID_LENGTH];
        u8 params_len;
-       u8 params[NFC_MAX_PARAMS_LENGTH];
+       u8 params[0];
 } __packed;
 
 struct nfc_genl_data {
index 6c6d5393fc349b46c384aa033554e4d12682040d..343d922d15c2ce0ce1a53dd55d7c006a42e15c99 100644 (file)
@@ -137,7 +137,7 @@ void rtnl_af_register(struct rtnl_af_ops *ops);
 void rtnl_af_unregister(struct rtnl_af_ops *ops);
 
 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
-struct net_device *rtnl_create_link(struct net *net, char *ifname,
+struct net_device *rtnl_create_link(struct net *net, const char *ifname,
                                    unsigned char name_assign_type,
                                    const struct rtnl_link_ops *ops,
                                    struct nlattr *tb[]);
index 3f9b8ce569481d758b5b2262c8baa2bc03462ca5..bd6f523f2251a9efe255bb11c3d995df2b6f8db2 100644 (file)
@@ -1762,6 +1762,8 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
 
 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
 
+bool sk_mc_loop(struct sock *sk);
+
 static inline bool sk_can_gso(const struct sock *sk)
 {
        return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
index 963303fb96ae227263e648fb0c8dbafdc9cbc945..9598871485ce3d7d36f96be9643d94e7ca85cfe0 100644 (file)
@@ -179,6 +179,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
 #define TCPOPT_SACK             5       /* SACK Block */
 #define TCPOPT_TIMESTAMP       8       /* Better RTT estimations/PAWS */
 #define TCPOPT_MD5SIG          19      /* MD5 Signature (RFC2385) */
+#define TCPOPT_FASTOPEN                34      /* Fast open (RFC7413) */
 #define TCPOPT_EXP             254     /* Experimental */
 /* Magic number to be after the option value for sharing TCP
  * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
@@ -194,6 +195,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
 #define TCPOLEN_SACK_PERM      2
 #define TCPOLEN_TIMESTAMP      10
 #define TCPOLEN_MD5SIG         18
+#define TCPOLEN_FASTOPEN_BASE  2
 #define TCPOLEN_EXP_FASTOPEN_BASE  4
 
 /* But this is what stacks really send out. */
@@ -1337,7 +1339,8 @@ void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
                            struct tcp_fastopen_cookie *cookie, int *syn_loss,
                            unsigned long *last_syn_loss);
 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
-                           struct tcp_fastopen_cookie *cookie, bool syn_lost);
+                           struct tcp_fastopen_cookie *cookie, bool syn_lost,
+                           u16 try_exp);
 struct tcp_fastopen_request {
        /* Fast Open cookie. Size 0 means a cookie request */
        struct tcp_fastopen_cookie      cookie;
index 1a20d33d56bc1ffb6d91826282ddabfc53d4fb25..c491c1221606e0f2625de8b53cfd8fdcd29282c9 100644 (file)
@@ -77,13 +77,14 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
                           struct udp_tunnel_sock_cfg *sock_cfg);
 
 /* Transmit the skb using UDP encapsulation. */
-int udp_tunnel_xmit_skb(struct rtable *rt, struct sk_buff *skb,
+int udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
                        __be32 src, __be32 dst, __u8 tos, __u8 ttl,
                        __be16 df, __be16 src_port, __be16 dst_port,
                        bool xnet, bool nocheck);
 
 #if IS_ENABLED(CONFIG_IPV6)
-int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
+int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
+                        struct sk_buff *skb,
                         struct net_device *dev, struct in6_addr *saddr,
                         struct in6_addr *daddr,
                         __u8 prio, __u8 ttl, __be16 src_port,
index 756e4636bad8a3a8395013db990cf774aa49aa19..0082b5d33d7d3f2ea66fe94c26b8c3572188affc 100644 (file)
@@ -145,7 +145,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
 
 void vxlan_sock_release(struct vxlan_sock *vs);
 
-int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb,
+int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
                   __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
                   __be16 src_port, __be16 dst_port, struct vxlan_metadata *md,
                   bool xnet, u32 vxflags);
index 461f8353949370f7342f83d23513d864ac0adc6f..36ac102c97c72b1b5d62f99e28bf285fbad9f8bb 100644 (file)
@@ -332,7 +332,7 @@ struct xfrm_state_afinfo {
        int                     (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
        int                     (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
        int                     (*output)(struct sock *sk, struct sk_buff *skb);
-       int                     (*output_finish)(struct sk_buff *skb);
+       int                     (*output_finish)(struct sock *sk, struct sk_buff *skb);
        int                     (*extract_input)(struct xfrm_state *x,
                                                 struct sk_buff *skb);
        int                     (*extract_output)(struct xfrm_state *x,
@@ -1503,7 +1503,7 @@ int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
 int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
 int xfrm_output_resume(struct sk_buff *skb, int err);
-int xfrm_output(struct sk_buff *skb);
+int xfrm_output(struct sock *sk, struct sk_buff *skb);
 int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
 void xfrm_local_error(struct sk_buff *skb, int mtu);
 int xfrm4_extract_header(struct sk_buff *skb);
@@ -1524,7 +1524,7 @@ static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
 int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
 int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
 int xfrm4_output(struct sock *sk, struct sk_buff *skb);
-int xfrm4_output_finish(struct sk_buff *skb);
+int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb);
 int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err);
 int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
 int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol);
@@ -1549,7 +1549,7 @@ __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
 int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
 int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
 int xfrm6_output(struct sock *sk, struct sk_buff *skb);
-int xfrm6_output_finish(struct sk_buff *skb);
+int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb);
 int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
                          u8 **prevhdr);
 
index 23d561512f64fe65a5165a23d4620f7d457504a4..22317d2b52abcbe194ef8e9964bf179ae7d32846 100644 (file)
@@ -7,27 +7,26 @@
 #include <linux/ktime.h>
 #include <linux/tracepoint.h>
 
-struct device;
-struct regmap;
+#include "../../../drivers/base/regmap/internal.h"
 
 /*
  * Log register events
  */
 DECLARE_EVENT_CLASS(regmap_reg,
 
-       TP_PROTO(struct device *dev, unsigned int reg,
+       TP_PROTO(struct regmap *map, unsigned int reg,
                 unsigned int val),
 
-       TP_ARGS(dev, reg, val),
+       TP_ARGS(map, reg, val),
 
        TP_STRUCT__entry(
-               __string(       name,           dev_name(dev)   )
-               __field(        unsigned int,   reg             )
-               __field(        unsigned int,   val             )
+               __string(       name,           regmap_name(map)        )
+               __field(        unsigned int,   reg                     )
+               __field(        unsigned int,   val                     )
        ),
 
        TP_fast_assign(
-               __assign_str(name, dev_name(dev));
+               __assign_str(name, regmap_name(map));
                __entry->reg = reg;
                __entry->val = val;
        ),
@@ -39,45 +38,45 @@ DECLARE_EVENT_CLASS(regmap_reg,
 
 DEFINE_EVENT(regmap_reg, regmap_reg_write,
 
-       TP_PROTO(struct device *dev, unsigned int reg,
+       TP_PROTO(struct regmap *map, unsigned int reg,
                 unsigned int val),
 
-       TP_ARGS(dev, reg, val)
+       TP_ARGS(map, reg, val)
 
 );
 
 DEFINE_EVENT(regmap_reg, regmap_reg_read,
 
-       TP_PROTO(struct device *dev, unsigned int reg,
+       TP_PROTO(struct regmap *map, unsigned int reg,
                 unsigned int val),
 
-       TP_ARGS(dev, reg, val)
+       TP_ARGS(map, reg, val)
 
 );
 
 DEFINE_EVENT(regmap_reg, regmap_reg_read_cache,
 
-       TP_PROTO(struct device *dev, unsigned int reg,
+       TP_PROTO(struct regmap *map, unsigned int reg,
                 unsigned int val),
 
-       TP_ARGS(dev, reg, val)
+       TP_ARGS(map, reg, val)
 
 );
 
 DECLARE_EVENT_CLASS(regmap_block,
 
-       TP_PROTO(struct device *dev, unsigned int reg, int count),
+       TP_PROTO(struct regmap *map, unsigned int reg, int count),
 
-       TP_ARGS(dev, reg, count),
+       TP_ARGS(map, reg, count),
 
        TP_STRUCT__entry(
-               __string(       name,           dev_name(dev)   )
-               __field(        unsigned int,   reg             )
-               __field(        int,            count           )
+               __string(       name,           regmap_name(map)        )
+               __field(        unsigned int,   reg                     )
+               __field(        int,            count                   )
        ),
 
        TP_fast_assign(
-               __assign_str(name, dev_name(dev));
+               __assign_str(name, regmap_name(map));
                __entry->reg = reg;
                __entry->count = count;
        ),
@@ -89,48 +88,48 @@ DECLARE_EVENT_CLASS(regmap_block,
 
 DEFINE_EVENT(regmap_block, regmap_hw_read_start,
 
-       TP_PROTO(struct device *dev, unsigned int reg, int count),
+       TP_PROTO(struct regmap *map, unsigned int reg, int count),
 
-       TP_ARGS(dev, reg, count)
+       TP_ARGS(map, reg, count)
 );
 
 DEFINE_EVENT(regmap_block, regmap_hw_read_done,
 
-       TP_PROTO(struct device *dev, unsigned int reg, int count),
+       TP_PROTO(struct regmap *map, unsigned int reg, int count),
 
-       TP_ARGS(dev, reg, count)
+       TP_ARGS(map, reg, count)
 );
 
 DEFINE_EVENT(regmap_block, regmap_hw_write_start,
 
-       TP_PROTO(struct device *dev, unsigned int reg, int count),
+       TP_PROTO(struct regmap *map, unsigned int reg, int count),
 
-       TP_ARGS(dev, reg, count)
+       TP_ARGS(map, reg, count)
 );
 
 DEFINE_EVENT(regmap_block, regmap_hw_write_done,
 
-       TP_PROTO(struct device *dev, unsigned int reg, int count),
+       TP_PROTO(struct regmap *map, unsigned int reg, int count),
 
-       TP_ARGS(dev, reg, count)
+       TP_ARGS(map, reg, count)
 );
 
 TRACE_EVENT(regcache_sync,
 
-       TP_PROTO(struct device *dev, const char *type,
+       TP_PROTO(struct regmap *map, const char *type,
                 const char *status),
 
-       TP_ARGS(dev, type, status),
+       TP_ARGS(map, type, status),
 
        TP_STRUCT__entry(
-               __string(       name,           dev_name(dev)   )
-               __string(       status,         status          )
-               __string(       type,           type            )
-               __field(        int,            type            )
+               __string(       name,           regmap_name(map)        )
+               __string(       status,         status                  )
+               __string(       type,           type                    )
+               __field(        int,            type                    )
        ),
 
        TP_fast_assign(
-               __assign_str(name, dev_name(dev));
+               __assign_str(name, regmap_name(map));
                __assign_str(status, status);
                __assign_str(type, type);
        ),
@@ -141,17 +140,17 @@ TRACE_EVENT(regcache_sync,
 
 DECLARE_EVENT_CLASS(regmap_bool,
 
-       TP_PROTO(struct device *dev, bool flag),
+       TP_PROTO(struct regmap *map, bool flag),
 
-       TP_ARGS(dev, flag),
+       TP_ARGS(map, flag),
 
        TP_STRUCT__entry(
-               __string(       name,           dev_name(dev)   )
-               __field(        int,            flag            )
+               __string(       name,           regmap_name(map)        )
+               __field(        int,            flag                    )
        ),
 
        TP_fast_assign(
-               __assign_str(name, dev_name(dev));
+               __assign_str(name, regmap_name(map));
                __entry->flag = flag;
        ),
 
@@ -161,32 +160,32 @@ DECLARE_EVENT_CLASS(regmap_bool,
 
 DEFINE_EVENT(regmap_bool, regmap_cache_only,
 
-       TP_PROTO(struct device *dev, bool flag),
+       TP_PROTO(struct regmap *map, bool flag),
 
-       TP_ARGS(dev, flag)
+       TP_ARGS(map, flag)
 
 );
 
 DEFINE_EVENT(regmap_bool, regmap_cache_bypass,
 
-       TP_PROTO(struct device *dev, bool flag),
+       TP_PROTO(struct regmap *map, bool flag),
 
-       TP_ARGS(dev, flag)
+       TP_ARGS(map, flag)
 
 );
 
 DECLARE_EVENT_CLASS(regmap_async,
 
-       TP_PROTO(struct device *dev),
+       TP_PROTO(struct regmap *map),
 
-       TP_ARGS(dev),
+       TP_ARGS(map),
 
        TP_STRUCT__entry(
-               __string(       name,           dev_name(dev)   )
+               __string(       name,           regmap_name(map)        )
        ),
 
        TP_fast_assign(
-               __assign_str(name, dev_name(dev));
+               __assign_str(name, regmap_name(map));
        ),
 
        TP_printk("%s", __get_str(name))
@@ -194,50 +193,50 @@ DECLARE_EVENT_CLASS(regmap_async,
 
 DEFINE_EVENT(regmap_block, regmap_async_write_start,
 
-       TP_PROTO(struct device *dev, unsigned int reg, int count),
+       TP_PROTO(struct regmap *map, unsigned int reg, int count),
 
-       TP_ARGS(dev, reg, count)
+       TP_ARGS(map, reg, count)
 );
 
 DEFINE_EVENT(regmap_async, regmap_async_io_complete,
 
-       TP_PROTO(struct device *dev),
+       TP_PROTO(struct regmap *map),
 
-       TP_ARGS(dev)
+       TP_ARGS(map)
 
 );
 
 DEFINE_EVENT(regmap_async, regmap_async_complete_start,
 
-       TP_PROTO(struct device *dev),
+       TP_PROTO(struct regmap *map),
 
-       TP_ARGS(dev)
+       TP_ARGS(map)
 
 );
 
 DEFINE_EVENT(regmap_async, regmap_async_complete_done,
 
-       TP_PROTO(struct device *dev),
+       TP_PROTO(struct regmap *map),
 
-       TP_ARGS(dev)
+       TP_ARGS(map)
 
 );
 
 TRACE_EVENT(regcache_drop_region,
 
-       TP_PROTO(struct device *dev, unsigned int from,
+       TP_PROTO(struct regmap *map, unsigned int from,
                 unsigned int to),
 
-       TP_ARGS(dev, from, to),
+       TP_ARGS(map, from, to),
 
        TP_STRUCT__entry(
-               __string(       name,           dev_name(dev)   )
-               __field(        unsigned int,   from            )
-               __field(        unsigned int,   to              )
+               __string(       name,           regmap_name(map)        )
+               __field(        unsigned int,   from                    )
+               __field(        unsigned int,   to                      )
        ),
 
        TP_fast_assign(
-               __assign_str(name, dev_name(dev));
+               __assign_str(name, regmap_name(map));
                __entry->from = from;
                __entry->to = to;
        ),
index 74aab6e0d96436b5d24e19c18e9c8d81dc14eee9..23df3e7f8e7d2eee7b196bff4c825d38ab284043 100644 (file)
@@ -168,7 +168,43 @@ enum bpf_func_id {
        BPF_FUNC_map_delete_elem, /* int map_delete_elem(&map, &key) */
        BPF_FUNC_get_prandom_u32, /* u32 prandom_u32(void) */
        BPF_FUNC_get_smp_processor_id, /* u32 raw_smp_processor_id(void) */
-       BPF_FUNC_skb_store_bytes, /* int skb_store_bytes(skb, offset, from, len) */
+
+       /**
+        * skb_store_bytes(skb, offset, from, len, flags) - store bytes into packet
+        * @skb: pointer to skb
+        * @offset: offset within packet from skb->data
+        * @from: pointer where to copy bytes from
+        * @len: number of bytes to store into packet
+        * @flags: bit 0 - if true, recompute skb->csum
+        *         other bits - reserved
+        * Return: 0 on success
+        */
+       BPF_FUNC_skb_store_bytes,
+
+       /**
+        * l3_csum_replace(skb, offset, from, to, flags) - recompute IP checksum
+        * @skb: pointer to skb
+        * @offset: offset within packet where IP checksum is located
+        * @from: old value of header field
+        * @to: new value of header field
+        * @flags: bits 0-3 - size of header field
+        *         other bits - reserved
+        * Return: 0 on success
+        */
+       BPF_FUNC_l3_csum_replace,
+
+       /**
+        * l4_csum_replace(skb, offset, from, to, flags) - recompute TCP/UDP checksum
+        * @skb: pointer to skb
+        * @offset: offset within packet where TCP/UDP checksum is located
+        * @from: old value of header field
+        * @to: new value of header field
+        * @flags: bits 0-3 - size of header field
+        *         bit 4 - is pseudo header
+        *         other bits - reserved
+        * Return: 0 on success
+        */
+       BPF_FUNC_l4_csum_replace,
        __BPF_FUNC_MAX_ID,
 };
 
@@ -184,6 +220,7 @@ struct __sk_buff {
        __u32 vlan_present;
        __u32 vlan_tci;
        __u32 vlan_proto;
+       __u32 priority;
 };
 
 #endif /* _UAPI__LINUX_BPF_H__ */
index 78ec76fd89a6ce4fe70161576ec0c01e5d6156d3..8735f1080385d045fff82660fd53c3a79dd98fc8 100644 (file)
@@ -57,6 +57,7 @@ enum {
        CAN_RAW_LOOPBACK,       /* local loopback (default:on)       */
        CAN_RAW_RECV_OWN_MSGS,  /* receive my own msgs (default:off) */
        CAN_RAW_FD_FRAMES,      /* allow CAN FD frames (default:off) */
+       CAN_RAW_JOIN_FILTERS,   /* all filters must match to trigger */
 };
 
 #endif /* !_UAPI_CAN_RAW_H */
index b0a81307985282005ade90ee6da96e3048c405d3..2f62ab2d7bf99bd75b16eef6db74ce8aa233e8ff 100644 (file)
@@ -973,7 +973,8 @@ struct input_keymap_entry {
  */
 #define MT_TOOL_FINGER         0
 #define MT_TOOL_PEN            1
-#define MT_TOOL_MAX            1
+#define MT_TOOL_PALM           2
+#define MT_TOOL_MAX            2
 
 /*
  * Values describing the status of a force-feedback effect
index b9783931503b1f704a2422d85922301929dc2675..05ee1e0804a3f2c8971c1956dc768c416e4cec71 100644 (file)
@@ -208,12 +208,14 @@ enum nft_rule_compat_attributes {
  * @NFT_SET_CONSTANT: set contents may not change while bound
  * @NFT_SET_INTERVAL: set contains intervals
  * @NFT_SET_MAP: set is used as a dictionary
+ * @NFT_SET_TIMEOUT: set uses timeouts
  */
 enum nft_set_flags {
        NFT_SET_ANONYMOUS               = 0x1,
        NFT_SET_CONSTANT                = 0x2,
        NFT_SET_INTERVAL                = 0x4,
        NFT_SET_MAP                     = 0x8,
+       NFT_SET_TIMEOUT                 = 0x10,
 };
 
 /**
@@ -252,6 +254,8 @@ enum nft_set_desc_attributes {
  * @NFTA_SET_POLICY: selection policy (NLA_U32)
  * @NFTA_SET_DESC: set description (NLA_NESTED)
  * @NFTA_SET_ID: uniquely identifies a set in a transaction (NLA_U32)
+ * @NFTA_SET_TIMEOUT: default timeout value (NLA_U64)
+ * @NFTA_SET_GC_INTERVAL: garbage collection interval (NLA_U32)
  */
 enum nft_set_attributes {
        NFTA_SET_UNSPEC,
@@ -265,6 +269,8 @@ enum nft_set_attributes {
        NFTA_SET_POLICY,
        NFTA_SET_DESC,
        NFTA_SET_ID,
+       NFTA_SET_TIMEOUT,
+       NFTA_SET_GC_INTERVAL,
        __NFTA_SET_MAX
 };
 #define NFTA_SET_MAX           (__NFTA_SET_MAX - 1)
@@ -284,12 +290,18 @@ enum nft_set_elem_flags {
  * @NFTA_SET_ELEM_KEY: key value (NLA_NESTED: nft_data)
  * @NFTA_SET_ELEM_DATA: data value of mapping (NLA_NESTED: nft_data_attributes)
  * @NFTA_SET_ELEM_FLAGS: bitmask of nft_set_elem_flags (NLA_U32)
+ * @NFTA_SET_ELEM_TIMEOUT: timeout value (NLA_U64)
+ * @NFTA_SET_ELEM_EXPIRATION: expiration time (NLA_U64)
+ * @NFTA_SET_ELEM_USERDATA: user data (NLA_BINARY)
  */
 enum nft_set_elem_attributes {
        NFTA_SET_ELEM_UNSPEC,
        NFTA_SET_ELEM_KEY,
        NFTA_SET_ELEM_DATA,
        NFTA_SET_ELEM_FLAGS,
+       NFTA_SET_ELEM_TIMEOUT,
+       NFTA_SET_ELEM_EXPIRATION,
+       NFTA_SET_ELEM_USERDATA,
        __NFTA_SET_ELEM_MAX
 };
 #define NFTA_SET_ELEM_MAX      (__NFTA_SET_ELEM_MAX - 1)
@@ -505,6 +517,33 @@ enum nft_lookup_attributes {
 };
 #define NFTA_LOOKUP_MAX                (__NFTA_LOOKUP_MAX - 1)
 
+enum nft_dynset_ops {
+       NFT_DYNSET_OP_ADD,
+       NFT_DYNSET_OP_UPDATE,
+};
+
+/**
+ * enum nft_dynset_attributes - dynset expression attributes
+ *
+ * @NFTA_DYNSET_SET_NAME: name of set the to add data to (NLA_STRING)
+ * @NFTA_DYNSET_SET_ID: uniquely identifier of the set in the transaction (NLA_U32)
+ * @NFTA_DYNSET_OP: operation (NLA_U32)
+ * @NFTA_DYNSET_SREG_KEY: source register of the key (NLA_U32)
+ * @NFTA_DYNSET_SREG_DATA: source register of the data (NLA_U32)
+ * @NFTA_DYNSET_TIMEOUT: timeout value for the new element (NLA_U64)
+ */
+enum nft_dynset_attributes {
+       NFTA_DYNSET_UNSPEC,
+       NFTA_DYNSET_SET_NAME,
+       NFTA_DYNSET_SET_ID,
+       NFTA_DYNSET_OP,
+       NFTA_DYNSET_SREG_KEY,
+       NFTA_DYNSET_SREG_DATA,
+       NFTA_DYNSET_TIMEOUT,
+       __NFTA_DYNSET_MAX,
+};
+#define NFTA_DYNSET_MAX                (__NFTA_DYNSET_MAX - 1)
+
 /**
  * enum nft_payload_bases - nf_tables payload expression offset bases
  *
index 4742f2cb42f2bd46180421efef423f101579bd03..d3bd6ffec04101138e831076789fc11cee98f2be 100644 (file)
@@ -47,7 +47,7 @@
  * exported filesystem.
  */
 #define        NFSEXP_V4ROOT           0x10000
-#define NFSEXP_NOPNFS          0x20000
+#define NFSEXP_PNFS            0x20000
 
 /* All flags that we claim to support.  (Note we don't support NOACL.) */
 #define NFSEXP_ALLFLAGS                0x3FE7F
index bea910f924dde6b125568e83e737801e721778c0..974db03f7b1a2d9ddf96d0b34a409f8356e94d1a 100644 (file)
@@ -134,6 +134,8 @@ enum {
 
        RTM_NEWNSID = 88,
 #define RTM_NEWNSID RTM_NEWNSID
+       RTM_DELNSID = 89,
+#define RTM_DELNSID RTM_DELNSID
        RTM_GETNSID = 90,
 #define RTM_GETNSID RTM_GETNSID
 
@@ -635,6 +637,8 @@ enum rtnetlink_groups {
 #define RTNLGRP_MDB            RTNLGRP_MDB
        RTNLGRP_MPLS_ROUTE,
 #define RTNLGRP_MPLS_ROUTE     RTNLGRP_MPLS_ROUTE
+       RTNLGRP_NSID,
+#define RTNLGRP_NSID           RTNLGRP_NSID
        __RTNLGRP_MAX
 };
 #define RTNLGRP_MAX    (__RTNLGRP_MAX - 1)
index 453ef61311d4cf069669fb449270a0700ae0f2f9..2fabc062716591e960dbab2c9cfc16a755895773 100644 (file)
@@ -4574,6 +4574,13 @@ static void perf_pending_event(struct irq_work *entry)
 {
        struct perf_event *event = container_of(entry,
                        struct perf_event, pending);
+       int rctx;
+
+       rctx = perf_swevent_get_recursion_context();
+       /*
+        * If we 'fail' here, that's OK, it means recursion is already disabled
+        * and we won't recurse 'further'.
+        */
 
        if (event->pending_disable) {
                event->pending_disable = 0;
@@ -4584,6 +4591,9 @@ static void perf_pending_event(struct irq_work *entry)
                event->pending_wakeup = 0;
                perf_event_wakeup(event);
        }
+
+       if (rctx >= 0)
+               perf_swevent_put_recursion_context(rctx);
 }
 
 /*
index 88d0d4420ad2e3e47e71129d361b153c96a49e18..ba77ab5f64dd9809f5e24f1b079ac5abbefeb495 100644 (file)
@@ -633,7 +633,7 @@ static int count_matching_names(struct lock_class *new_class)
        if (!new_class->name)
                return 0;
 
-       list_for_each_entry(class, &all_lock_classes, lock_entry) {
+       list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) {
                if (new_class->key - new_class->subclass == class->key)
                        return class->name_version;
                if (class->name && !strcmp(class->name, new_class->name))
@@ -700,10 +700,12 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
        hash_head = classhashentry(key);
 
        /*
-        * We can walk the hash lockfree, because the hash only
-        * grows, and we are careful when adding entries to the end:
+        * We do an RCU walk of the hash, see lockdep_free_key_range().
         */
-       list_for_each_entry(class, hash_head, hash_entry) {
+       if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
+               return NULL;
+
+       list_for_each_entry_rcu(class, hash_head, hash_entry) {
                if (class->key == key) {
                        /*
                         * Huh! same key, different name? Did someone trample
@@ -728,7 +730,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
        struct lockdep_subclass_key *key;
        struct list_head *hash_head;
        struct lock_class *class;
-       unsigned long flags;
+
+       DEBUG_LOCKS_WARN_ON(!irqs_disabled());
 
        class = look_up_lock_class(lock, subclass);
        if (likely(class))
@@ -750,28 +753,26 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
        key = lock->key->subkeys + subclass;
        hash_head = classhashentry(key);
 
-       raw_local_irq_save(flags);
        if (!graph_lock()) {
-               raw_local_irq_restore(flags);
                return NULL;
        }
        /*
         * We have to do the hash-walk again, to avoid races
         * with another CPU:
         */
-       list_for_each_entry(class, hash_head, hash_entry)
+       list_for_each_entry_rcu(class, hash_head, hash_entry) {
                if (class->key == key)
                        goto out_unlock_set;
+       }
+
        /*
         * Allocate a new key from the static array, and add it to
         * the hash:
         */
        if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
                if (!debug_locks_off_graph_unlock()) {
-                       raw_local_irq_restore(flags);
                        return NULL;
                }
-               raw_local_irq_restore(flags);
 
                print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
                dump_stack();
@@ -798,7 +799,6 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
 
        if (verbose(class)) {
                graph_unlock();
-               raw_local_irq_restore(flags);
 
                printk("\nnew class %p: %s", class->key, class->name);
                if (class->name_version > 1)
@@ -806,15 +806,12 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
                printk("\n");
                dump_stack();
 
-               raw_local_irq_save(flags);
                if (!graph_lock()) {
-                       raw_local_irq_restore(flags);
                        return NULL;
                }
        }
 out_unlock_set:
        graph_unlock();
-       raw_local_irq_restore(flags);
 
 out_set_class_cache:
        if (!subclass || force)
@@ -870,11 +867,9 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
        entry->distance = distance;
        entry->trace = *trace;
        /*
-        * Since we never remove from the dependency list, the list can
-        * be walked lockless by other CPUs, it's only allocation
-        * that must be protected by the spinlock. But this also means
-        * we must make new entries visible only once writes to the
-        * entry become visible - hence the RCU op:
+        * Both allocation and removal are done under the graph lock; but
+        * iteration is under RCU-sched; see look_up_lock_class() and
+        * lockdep_free_key_range().
         */
        list_add_tail_rcu(&entry->entry, head);
 
@@ -1025,7 +1020,9 @@ static int __bfs(struct lock_list *source_entry,
                else
                        head = &lock->class->locks_before;
 
-               list_for_each_entry(entry, head, entry) {
+               DEBUG_LOCKS_WARN_ON(!irqs_disabled());
+
+               list_for_each_entry_rcu(entry, head, entry) {
                        if (!lock_accessed(entry)) {
                                unsigned int cq_depth;
                                mark_lock_accessed(entry, lock);
@@ -2022,7 +2019,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
         * We can walk it lock-free, because entries only get added
         * to the hash:
         */
-       list_for_each_entry(chain, hash_head, entry) {
+       list_for_each_entry_rcu(chain, hash_head, entry) {
                if (chain->chain_key == chain_key) {
 cache_hit:
                        debug_atomic_inc(chain_lookup_hits);
@@ -2996,8 +2993,18 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
        if (unlikely(!debug_locks))
                return;
 
-       if (subclass)
+       if (subclass) {
+               unsigned long flags;
+
+               if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
+                       return;
+
+               raw_local_irq_save(flags);
+               current->lockdep_recursion = 1;
                register_lock_class(lock, subclass, 1);
+               current->lockdep_recursion = 0;
+               raw_local_irq_restore(flags);
+       }
 }
 EXPORT_SYMBOL_GPL(lockdep_init_map);
 
@@ -3887,9 +3894,17 @@ static inline int within(const void *addr, void *start, unsigned long size)
        return addr >= start && addr < start + size;
 }
 
+/*
+ * Used in module.c to remove lock classes from memory that is going to be
+ * freed; and possibly re-used by other modules.
+ *
+ * We will have had one sync_sched() before getting here, so we're guaranteed
+ * nobody will look up these exact classes -- they're properly dead but still
+ * allocated.
+ */
 void lockdep_free_key_range(void *start, unsigned long size)
 {
-       struct lock_class *class, *next;
+       struct lock_class *class;
        struct list_head *head;
        unsigned long flags;
        int i;
@@ -3905,7 +3920,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
                head = classhash_table + i;
                if (list_empty(head))
                        continue;
-               list_for_each_entry_safe(class, next, head, hash_entry) {
+               list_for_each_entry_rcu(class, head, hash_entry) {
                        if (within(class->key, start, size))
                                zap_class(class);
                        else if (within(class->name, start, size))
@@ -3916,11 +3931,25 @@ void lockdep_free_key_range(void *start, unsigned long size)
        if (locked)
                graph_unlock();
        raw_local_irq_restore(flags);
+
+       /*
+        * Wait for any possible iterators from look_up_lock_class() to pass
+        * before continuing to free the memory they refer to.
+        *
+        * sync_sched() is sufficient because the read-side is IRQ disable.
+        */
+       synchronize_sched();
+
+       /*
+        * XXX at this point we could return the resources to the pool;
+        * instead we leak them. We would need to change to bitmap allocators
+        * instead of the linear allocators we have now.
+        */
 }
 
 void lockdep_reset_lock(struct lockdep_map *lock)
 {
-       struct lock_class *class, *next;
+       struct lock_class *class;
        struct list_head *head;
        unsigned long flags;
        int i, j;
@@ -3948,7 +3977,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
                head = classhash_table + i;
                if (list_empty(head))
                        continue;
-               list_for_each_entry_safe(class, next, head, hash_entry) {
+               list_for_each_entry_rcu(class, head, hash_entry) {
                        int match = 0;
 
                        for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
index b3d634ed06c94f1b2ce986293d0cda84078d4a46..99fdf94efce80f432fc4ab203aeb2a918ad5c564 100644 (file)
@@ -1865,7 +1865,7 @@ static void free_module(struct module *mod)
        kfree(mod->args);
        percpu_modfree(mod);
 
-       /* Free lock-classes: */
+       /* Free lock-classes; relies on the preceding sync_rcu(). */
        lockdep_free_key_range(mod->module_core, mod->core_size);
 
        /* Finally, free the core (containing the module structure) */
@@ -3349,9 +3349,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
        module_bug_cleanup(mod);
        mutex_unlock(&module_mutex);
 
-       /* Free lock-classes: */
-       lockdep_free_key_range(mod->module_core, mod->core_size);
-
        /* we can't deallocate the module until we clear memory protection */
        unset_module_init_ro_nx(mod);
        unset_module_core_ro_nx(mod);
@@ -3375,6 +3372,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
        synchronize_rcu();
        mutex_unlock(&module_mutex);
  free_module:
+       /* Free lock-classes; relies on the preceding sync_rcu() */
+       lockdep_free_key_range(mod->module_core, mod->core_size);
+
        module_deallocate(mod, info);
  free_copy:
        free_copy(info);
index f0f831e8a345d835f4cb21bf899c50ab67042b43..62671f53202ac7d4de8037dce950c934c7a4ddbc 100644 (file)
@@ -3034,6 +3034,8 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
        } else {
                if (dl_prio(oldprio))
                        p->dl.dl_boosted = 0;
+               if (rt_prio(oldprio))
+                       p->rt.timeout = 0;
                p->sched_class = &fair_sched_class;
        }
 
index 7ce18f3c097ac4779eb4cf6ed0ad14ac1beb3eb5..bcfe32088b3768363c2f37502a953b61a361f7ff 100644 (file)
@@ -1609,9 +1609,11 @@ static void update_task_scan_period(struct task_struct *p,
        /*
         * If there were no record hinting faults then either the task is
         * completely idle or all activity is areas that are not of interest
-        * to automatic numa balancing. Scan slower
+        * to automatic numa balancing. Related to that, if there were failed
+        * migration then it implies we are migrating too quickly or the local
+        * node is overloaded. In either case, scan slower
         */
-       if (local + shared == 0) {
+       if (local + shared == 0 || p->numa_faults_locality[2]) {
                p->numa_scan_period = min(p->numa_scan_period_max,
                        p->numa_scan_period << 1);
 
@@ -2080,6 +2082,8 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
 
        if (migrated)
                p->numa_pages_migrated += pages;
+       if (flags & TNF_MIGRATE_FAIL)
+               p->numa_faults_locality[2] += pages;
 
        p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
        p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
index 88ea2d6e00314059b96adb0505ffc5f9c98fcf73..ce410bb9f2e103e0fcfda7d7b844948a0a28fbce 100644 (file)
@@ -1227,6 +1227,14 @@ static struct ctl_table vm_table[] = {
                .proc_handler   = proc_dointvec_minmax,
                .extra1         = &zero,
        },
+       {
+               .procname       = "dirtytime_expire_seconds",
+               .data           = &dirtytime_expire_interval,
+               .maxlen         = sizeof(dirty_expire_interval),
+               .mode           = 0644,
+               .proc_handler   = dirtytime_interval_handler,
+               .extra1         = &zero,
+       },
        {
                .procname       = "nr_pdflush_threads",
                .mode           = 0444 /* read-only */,
index eb682d5c697cd5b67c988654915d1f5333fbe342..6aac4beedbbe235951c0671336e52b2459a047fb 100644 (file)
@@ -49,6 +49,7 @@ static void bc_set_mode(enum clock_event_mode mode,
  */
 static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
 {
+       int bc_moved;
        /*
         * We try to cancel the timer first. If the callback is on
         * flight on some other cpu then we let it handle it. If we
@@ -60,9 +61,15 @@ static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
         * restart the timer because we are in the callback, but we
         * can set the expiry time and let the callback return
         * HRTIMER_RESTART.
+        *
+        * Since we are in the idle loop at this point and because
+        * hrtimer_{start/cancel} functions call into tracing,
+        * calls to these functions must be bound within RCU_NONIDLE.
         */
-       if (hrtimer_try_to_cancel(&bctimer) >= 0) {
-               hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED);
+       RCU_NONIDLE(bc_moved = (hrtimer_try_to_cancel(&bctimer) >= 0) ?
+               !hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED) :
+                       0);
+       if (bc_moved) {
                /* Bind the "device" to the cpu */
                bc->bound_on = smp_processor_id();
        } else if (bc->bound_on == smp_processor_id()) {
index e97dbd51e7569f6a7ba273227752f2cdbcaebb49..03d7fcb420b5d60c564ad10935011ed8a6556b69 100644 (file)
--- a/lib/lcm.c
+++ b/lib/lcm.c
@@ -12,3 +12,14 @@ unsigned long lcm(unsigned long a, unsigned long b)
                return 0;
 }
 EXPORT_SYMBOL_GPL(lcm);
+
+unsigned long lcm_not_zero(unsigned long a, unsigned long b)
+{
+       unsigned long l = lcm(a, b);
+
+       if (l)
+               return l;
+
+       return (b ? : a);
+}
+EXPORT_SYMBOL_GPL(lcm_not_zero);
index 76a1b59523ab05907403f4f9bd5dc547fca1bcab..f5907d23272d48562c69b911e5a0a619e3f4c180 100644 (file)
@@ -279,6 +279,8 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count)
        int minlen = min_t(int, count, nla_len(src));
 
        memcpy(dest, nla_data(src), minlen);
+       if (count > minlen)
+               memset(dest + minlen, 0, count - minlen);
 
        return minlen;
 }
index a42a0d44e8181cbcac4b292b5837e2e0f463c067..b2957540d3c722d5c7b3d9f94e5b5a1d6c9d7975 100644 (file)
@@ -44,7 +44,6 @@ static const struct rhashtable_params test_rht_params = {
        .key_offset = offsetof(struct test_obj, value),
        .key_len = sizeof(int),
        .hashfn = jhash,
-       .max_size = 2, /* we expand/shrink manually here */
        .nulls_base = (3U << RHT_BASE_SHIFT),
 };
 
index 626e93db28ba162d11e7d286985604bbc523981c..6817b0350c71c43b0f89a4972ce19c51a2408a2b 100644 (file)
@@ -1260,6 +1260,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
        int target_nid, last_cpupid = -1;
        bool page_locked;
        bool migrated = false;
+       bool was_writable;
        int flags = 0;
 
        /* A PROT_NONE fault should not end up here */
@@ -1291,17 +1292,8 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
                flags |= TNF_FAULT_LOCAL;
        }
 
-       /*
-        * Avoid grouping on DSO/COW pages in specific and RO pages
-        * in general, RO pages shouldn't hurt as much anyway since
-        * they can be in shared cache state.
-        *
-        * FIXME! This checks "pmd_dirty()" as an approximation of
-        * "is this a read-only page", since checking "pmd_write()"
-        * is even more broken. We haven't actually turned this into
-        * a writable page, so pmd_write() will always be false.
-        */
-       if (!pmd_dirty(pmd))
+       /* See similar comment in do_numa_page for explanation */
+       if (!(vma->vm_flags & VM_WRITE))
                flags |= TNF_NO_GROUP;
 
        /*
@@ -1358,12 +1350,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
        if (migrated) {
                flags |= TNF_MIGRATED;
                page_nid = target_nid;
-       }
+       } else
+               flags |= TNF_MIGRATE_FAIL;
 
        goto out;
 clear_pmdnuma:
        BUG_ON(!PageLocked(page));
+       was_writable = pmd_write(pmd);
        pmd = pmd_modify(pmd, vma->vm_page_prot);
+       pmd = pmd_mkyoung(pmd);
+       if (was_writable)
+               pmd = pmd_mkwrite(pmd);
        set_pmd_at(mm, haddr, pmdp, pmd);
        update_mmu_cache_pmd(vma, addr, pmdp);
        unlock_page(page);
@@ -1487,6 +1484,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 
        if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
                pmd_t entry;
+               bool preserve_write = prot_numa && pmd_write(*pmd);
                ret = 1;
 
                /*
@@ -1502,9 +1500,11 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                if (!prot_numa || !pmd_protnone(*pmd)) {
                        entry = pmdp_get_and_clear_notify(mm, addr, pmd);
                        entry = pmd_modify(entry, newprot);
+                       if (preserve_write)
+                               entry = pmd_mkwrite(entry);
                        ret = HPAGE_PMD_NR;
                        set_pmd_at(mm, addr, pmd, entry);
-                       BUG_ON(pmd_write(entry));
+                       BUG_ON(!preserve_write && pmd_write(entry));
                }
                spin_unlock(ptl);
        }
index 411144f977b10eab492410728784efe37c4ea54a..97839f5c8c303df324a1cec1dfacadb1b0bfa04c 100644 (file)
@@ -3035,6 +3035,7 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
        int last_cpupid;
        int target_nid;
        bool migrated = false;
+       bool was_writable = pte_write(pte);
        int flags = 0;
 
        /* A PROT_NONE fault should not end up here */
@@ -3059,6 +3060,8 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
        /* Make it present again */
        pte = pte_modify(pte, vma->vm_page_prot);
        pte = pte_mkyoung(pte);
+       if (was_writable)
+               pte = pte_mkwrite(pte);
        set_pte_at(mm, addr, ptep, pte);
        update_mmu_cache(vma, addr, ptep);
 
@@ -3069,16 +3072,14 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
        }
 
        /*
-        * Avoid grouping on DSO/COW pages in specific and RO pages
-        * in general, RO pages shouldn't hurt as much anyway since
-        * they can be in shared cache state.
-        *
-        * FIXME! This checks "pmd_dirty()" as an approximation of
-        * "is this a read-only page", since checking "pmd_write()"
-        * is even more broken. We haven't actually turned this into
-        * a writable page, so pmd_write() will always be false.
+        * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
+        * much anyway since they can be in shared cache state. This misses
+        * the case where a mapping is writable but the process never writes
+        * to it but pte_write gets cleared during protection updates and
+        * pte_dirty has unpredictable behaviour between PTE scan updates,
+        * background writeback, dirty balancing and application behaviour.
         */
-       if (!pte_dirty(pte))
+       if (!(vma->vm_flags & VM_WRITE))
                flags |= TNF_NO_GROUP;
 
        /*
@@ -3102,7 +3103,8 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
        if (migrated) {
                page_nid = target_nid;
                flags |= TNF_MIGRATED;
-       }
+       } else
+               flags |= TNF_MIGRATE_FAIL;
 
 out:
        if (page_nid != -1)
index 9fab10795beabd723c29722a442ace37c349c66b..65842d688b7c9bb5dbfd2440fc07339fa94650f4 100644 (file)
@@ -1092,6 +1092,10 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
                        return NULL;
 
                arch_refresh_nodedata(nid, pgdat);
+       } else {
+               /* Reset the nr_zones and classzone_idx to 0 before reuse */
+               pgdat->nr_zones = 0;
+               pgdat->classzone_idx = 0;
        }
 
        /* we can use NODE_DATA(nid) from here */
@@ -1977,15 +1981,6 @@ void try_offline_node(int nid)
                if (is_vmalloc_addr(zone->wait_table))
                        vfree(zone->wait_table);
        }
-
-       /*
-        * Since there is no way to guarentee the address of pgdat/zone is not
-        * on stack of any kernel threads or used by other kernel objects
-        * without reference counting or other symchronizing method, do not
-        * reset node_data and free pgdat here. Just reset it to 0 and reuse
-        * the memory when the node is online again.
-        */
-       memset(pgdat, 0, sizeof(*pgdat));
 }
 EXPORT_SYMBOL(try_offline_node);
 
index da9990acc08b2d8014e342771a52650b47ee05fc..9ec50a368634a8d9a0824504d479b03798bdc402 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -774,10 +774,8 @@ again:                     remove_next = 1 + (end > next->vm_end);
 
                        importer->anon_vma = exporter->anon_vma;
                        error = anon_vma_clone(importer, exporter);
-                       if (error) {
-                               importer->anon_vma = NULL;
+                       if (error)
                                return error;
-                       }
                }
        }
 
index 44727811bf4cf62e3579261ee9699a37fab78b3d..88584838e7046bec724d68c0cafcd94eec65a040 100644 (file)
@@ -75,6 +75,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                oldpte = *pte;
                if (pte_present(oldpte)) {
                        pte_t ptent;
+                       bool preserve_write = prot_numa && pte_write(oldpte);
 
                        /*
                         * Avoid trapping faults against the zero or KSM
@@ -94,6 +95,8 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 
                        ptent = ptep_modify_prot_start(mm, addr, pte);
                        ptent = pte_modify(ptent, newprot);
+                       if (preserve_write)
+                               ptent = pte_mkwrite(ptent);
 
                        /* Avoid taking write faults for known dirty pages */
                        if (dirty_accountable && pte_dirty(ptent) &&
index 45e187b2d97183a90df9a5ee8558404f9f1bd826..644bcb665773f6e53f50fe6599429595b506f0c5 100644 (file)
@@ -857,8 +857,11 @@ static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
         *                   bw * elapsed + write_bandwidth * (period - elapsed)
         * write_bandwidth = ---------------------------------------------------
         *                                          period
+        *
+        * @written may have decreased due to account_page_redirty().
+        * Avoid underflowing @bw calculation.
         */
-       bw = written - bdi->written_stamp;
+       bw = written - min(written, bdi->written_stamp);
        bw *= HZ;
        if (unlikely(elapsed > period)) {
                do_div(bw, elapsed);
@@ -922,7 +925,7 @@ static void global_update_bandwidth(unsigned long thresh,
                                    unsigned long now)
 {
        static DEFINE_SPINLOCK(dirty_lock);
-       static unsigned long update_time;
+       static unsigned long update_time = INITIAL_JIFFIES;
 
        /*
         * check locklessly first to optimize away locking for the most time
index 72f5ac381ab3253b6016583e0618be6f3e91367c..755a42c76eb4747623da51acdeb780b322b5ac06 100644 (file)
@@ -103,6 +103,7 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype)
 
                        if (!is_migrate_isolate_page(buddy)) {
                                __isolate_free_page(page, order);
+                               kernel_map_pages(page, (1 << order), 1);
                                set_page_refcounted(page);
                                isolated_page = page;
                        }
index 75c1f2878519171139ae2d0b6f1e24809067ab4a..29f2f8b853ae51be4f9e35fbc1495ad69297ff82 100644 (file)
@@ -265,8 +265,15 @@ int walk_page_range(unsigned long start, unsigned long end,
                        vma = vma->vm_next;
 
                        err = walk_page_test(start, next, walk);
-                       if (err > 0)
+                       if (err > 0) {
+                               /*
+                                * positive return values are purely for
+                                * controlling the pagewalk, so should never
+                                * be passed to the callers.
+                                */
+                               err = 0;
                                continue;
+                       }
                        if (err < 0)
                                break;
                }
index 5e3e09081164b83814683513c174445e67669a4b..c161a14b6a8fb127150678f18582ad6f0a29f55d 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -287,6 +287,13 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
        return 0;
 
  enomem_failure:
+       /*
+        * dst->anon_vma is dropped here otherwise its degree can be incorrectly
+        * decremented in unlink_anon_vmas().
+        * We can safely do this because callers of anon_vma_clone() don't care
+        * about dst->anon_vma if anon_vma_clone() failed.
+        */
+       dst->anon_vma = NULL;
        unlink_anon_vmas(dst);
        return -ENOMEM;
 }
index 6832c4eab104d15ff3d3bd907c92591540facf01..82c473780c9188ecf7bfc393703f47793a2fbfe9 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2449,7 +2449,8 @@ redo:
        do {
                tid = this_cpu_read(s->cpu_slab->tid);
                c = raw_cpu_ptr(s->cpu_slab);
-       } while (IS_ENABLED(CONFIG_PREEMPT) && unlikely(tid != c->tid));
+       } while (IS_ENABLED(CONFIG_PREEMPT) &&
+                unlikely(tid != READ_ONCE(c->tid)));
 
        /*
         * Irqless object alloc/free algorithm used here depends on sequence
@@ -2718,7 +2719,8 @@ redo:
        do {
                tid = this_cpu_read(s->cpu_slab->tid);
                c = raw_cpu_ptr(s->cpu_slab);
-       } while (IS_ENABLED(CONFIG_PREEMPT) && unlikely(tid != c->tid));
+       } while (IS_ENABLED(CONFIG_PREEMPT) &&
+                unlikely(tid != READ_ONCE(c->tid)));
 
        /* Same with comment on barrier() in slab_alloc_node() */
        barrier();
index 8b5ab9033b418606ecef53cbb9dcc91669e9d752..01d7ba840df8dbf48b07e3c8697bb7c11f424a8d 100644 (file)
@@ -538,7 +538,6 @@ static int vlan_dev_init(struct net_device *dev)
        /* IFF_BROADCAST|IFF_MULTICAST; ??? */
        dev->flags  = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
                                          IFF_MASTER | IFF_SLAVE);
-       dev->iflink = real_dev->ifindex;
        dev->state  = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
                                          (1<<__LINK_STATE_DORMANT))) |
                      (1<<__LINK_STATE_PRESENT);
@@ -733,6 +732,13 @@ static void vlan_dev_netpoll_cleanup(struct net_device *dev)
 }
 #endif /* CONFIG_NET_POLL_CONTROLLER */
 
+static int vlan_dev_get_iflink(const struct net_device *dev)
+{
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+
+       return real_dev->ifindex;
+}
+
 static const struct ethtool_ops vlan_ethtool_ops = {
        .get_settings           = vlan_ethtool_get_settings,
        .get_drvinfo            = vlan_ethtool_get_drvinfo,
@@ -769,6 +775,7 @@ static const struct net_device_ops vlan_netdev_ops = {
 #endif
        .ndo_fix_features       = vlan_dev_fix_features,
        .ndo_get_lock_subclass  = vlan_dev_get_lock_subclass,
+       .ndo_get_iflink         = vlan_dev_get_iflink,
 };
 
 static void vlan_dev_free(struct net_device *dev)
index fbda6b54baffccf798375cb8add49bb179738386..baf1f9843f2c42a78c7df31b0c60b3012d7fe22d 100644 (file)
@@ -83,11 +83,12 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
                return true;
 
        /* no more parents..stop recursion */
-       if (net_dev->iflink == 0 || net_dev->iflink == net_dev->ifindex)
+       if (dev_get_iflink(net_dev) == 0 ||
+           dev_get_iflink(net_dev) == net_dev->ifindex)
                return false;
 
        /* recurse over the parent device */
-       parent_dev = __dev_get_by_index(&init_net, net_dev->iflink);
+       parent_dev = __dev_get_by_index(&init_net, dev_get_iflink(net_dev));
        /* if we got a NULL parent_dev there is something broken.. */
        if (WARN(!parent_dev, "Cannot find parent device"))
                return false;
index 5a5b16f365e9baae89f345b22930f1109e5f0b4c..40854c99bc1ecff42e8943ac506b77155ebc4fe1 100644 (file)
@@ -111,6 +111,10 @@ struct bnep_ext_hdr {
 #define BNEPCONNDEL    _IOW('B', 201, int)
 #define BNEPGETCONNLIST        _IOR('B', 210, int)
 #define BNEPGETCONNINFO        _IOR('B', 211, int)
+#define BNEPGETSUPPFEAT        _IOR('B', 212, int)
+
+#define BNEP_SETUP_RESPONSE    0
+#define BNEP_SETUP_RSP_SENT    10
 
 struct bnep_connadd_req {
        int   sock;             /* Connected socket */
index 05f57e491ccbd614a1d306c49df891e4a2ec00c6..1641367e54cadb461903e554c39fabf05997c9de 100644 (file)
@@ -231,7 +231,14 @@ static int bnep_rx_control(struct bnep_session *s, void *data, int len)
                break;
 
        case BNEP_SETUP_CONN_REQ:
-               err = bnep_send_rsp(s, BNEP_SETUP_CONN_RSP, BNEP_CONN_NOT_ALLOWED);
+               /* Successful response should be sent only once */
+               if (test_bit(BNEP_SETUP_RESPONSE, &s->flags) &&
+                   !test_and_set_bit(BNEP_SETUP_RSP_SENT, &s->flags))
+                       err = bnep_send_rsp(s, BNEP_SETUP_CONN_RSP,
+                                           BNEP_SUCCESS);
+               else
+                       err = bnep_send_rsp(s, BNEP_SETUP_CONN_RSP,
+                                           BNEP_CONN_NOT_ALLOWED);
                break;
 
        default: {
@@ -239,7 +246,7 @@ static int bnep_rx_control(struct bnep_session *s, void *data, int len)
                        pkt[0] = BNEP_CONTROL;
                        pkt[1] = BNEP_CMD_NOT_UNDERSTOOD;
                        pkt[2] = cmd;
-                       bnep_send(s, pkt, sizeof(pkt));
+                       err = bnep_send(s, pkt, sizeof(pkt));
                }
                break;
        }
@@ -292,29 +299,55 @@ static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
 {
        struct net_device *dev = s->dev;
        struct sk_buff *nskb;
-       u8 type;
+       u8 type, ctrl_type;
 
        dev->stats.rx_bytes += skb->len;
 
        type = *(u8 *) skb->data;
        skb_pull(skb, 1);
+       ctrl_type = *(u8 *)skb->data;
 
        if ((type & BNEP_TYPE_MASK) >= sizeof(__bnep_rx_hlen))
                goto badframe;
 
        if ((type & BNEP_TYPE_MASK) == BNEP_CONTROL) {
-               bnep_rx_control(s, skb->data, skb->len);
-               kfree_skb(skb);
-               return 0;
-       }
+               if (bnep_rx_control(s, skb->data, skb->len) < 0) {
+                       dev->stats.tx_errors++;
+                       kfree_skb(skb);
+                       return 0;
+               }
 
-       skb_reset_mac_header(skb);
+               if (!(type & BNEP_EXT_HEADER)) {
+                       kfree_skb(skb);
+                       return 0;
+               }
 
-       /* Verify and pull out header */
-       if (!skb_pull(skb, __bnep_rx_hlen[type & BNEP_TYPE_MASK]))
-               goto badframe;
+               /* Verify and pull ctrl message since it's already processed */
+               switch (ctrl_type) {
+               case BNEP_SETUP_CONN_REQ:
+                       /* Pull: ctrl type (1 b), len (1 b), data (len bytes) */
+                       if (!skb_pull(skb, 2 + *(u8 *)(skb->data + 1) * 2))
+                               goto badframe;
+                       break;
+               case BNEP_FILTER_MULTI_ADDR_SET:
+               case BNEP_FILTER_NET_TYPE_SET:
+                       /* Pull: ctrl type (1 b), len (2 b), data (len bytes) */
+                       if (!skb_pull(skb, 3 + *(u16 *)(skb->data + 1) * 2))
+                               goto badframe;
+                       break;
+               default:
+                       kfree_skb(skb);
+                       return 0;
+               }
+       } else {
+               skb_reset_mac_header(skb);
 
-       s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2));
+               /* Verify and pull out header */
+               if (!skb_pull(skb, __bnep_rx_hlen[type & BNEP_TYPE_MASK]))
+                       goto badframe;
+
+               s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2));
+       }
 
        if (type & BNEP_EXT_HEADER) {
                if (bnep_rx_extension(s, skb) < 0)
@@ -525,6 +558,7 @@ static struct device_type bnep_type = {
 
 int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
 {
+       u32 valid_flags = BIT(BNEP_SETUP_RESPONSE);
        struct net_device *dev;
        struct bnep_session *s, *ss;
        u8 dst[ETH_ALEN], src[ETH_ALEN];
@@ -535,6 +569,9 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
        if (!l2cap_is_socket(sock))
                return -EBADFD;
 
+       if (req->flags & ~valid_flags)
+               return -EINVAL;
+
        baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst);
        baswap((void *) src, &l2cap_pi(sock->sk)->chan->src);
 
@@ -566,6 +603,7 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
        s->sock  = sock;
        s->role  = req->role;
        s->state = BT_CONNECTED;
+       s->flags = req->flags;
 
        s->msg.msg_flags = MSG_NOSIGNAL;
 
@@ -611,11 +649,15 @@ failed:
 
 int bnep_del_connection(struct bnep_conndel_req *req)
 {
+       u32 valid_flags = 0;
        struct bnep_session *s;
        int  err = 0;
 
        BT_DBG("");
 
+       if (req->flags & ~valid_flags)
+               return -EINVAL;
+
        down_read(&bnep_session_sem);
 
        s = __bnep_get_session(req->dst);
@@ -631,10 +673,12 @@ int bnep_del_connection(struct bnep_conndel_req *req)
 
 static void __bnep_copy_ci(struct bnep_conninfo *ci, struct bnep_session *s)
 {
+       u32 valid_flags = BIT(BNEP_SETUP_RESPONSE);
+
        memset(ci, 0, sizeof(*ci));
        memcpy(ci->dst, s->eh.h_source, ETH_ALEN);
        strcpy(ci->device, s->dev->name);
-       ci->flags = s->flags;
+       ci->flags = s->flags & valid_flags;
        ci->state = s->state;
        ci->role  = s->role;
 }
index 5f051290dabab83ec76422dbb7cd44732b22eb6e..bde2bdd9e929e854c9e2d001a7bf9e6a364d6a2c 100644 (file)
@@ -57,6 +57,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
        struct bnep_conninfo ci;
        struct socket *nsock;
        void __user *argp = (void __user *)arg;
+       __u32 supp_feat = BIT(BNEP_SETUP_RESPONSE);
        int err;
 
        BT_DBG("cmd %x arg %lx", cmd, arg);
@@ -120,6 +121,12 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
 
                return err;
 
+       case BNEPGETSUPPFEAT:
+               if (copy_to_user(argp, &supp_feat, sizeof(supp_feat)))
+                       return -EFAULT;
+
+               return 0;
+
        default:
                return -EINVAL;
        }
index 75bd2c42e3e791024abf9d4014fbc41d12dea0da..b0c6c6af76ef07c311ea940d482b3d45ab83696d 100644 (file)
@@ -333,7 +333,7 @@ void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb)
                return;
        }
 
-       if (session->flags & (1 << CMTP_LOOPBACK)) {
+       if (session->flags & BIT(CMTP_LOOPBACK)) {
                kfree_skb(skb);
                return;
        }
index 278a194e6af488f67197c3725ca937f554989498..298ed37010e691a6f2bb72c506b6e50f9e8676f9 100644 (file)
@@ -75,10 +75,11 @@ static void __cmtp_unlink_session(struct cmtp_session *session)
 
 static void __cmtp_copy_session(struct cmtp_session *session, struct cmtp_conninfo *ci)
 {
+       u32 valid_flags = BIT(CMTP_LOOPBACK);
        memset(ci, 0, sizeof(*ci));
        bacpy(&ci->bdaddr, &session->bdaddr);
 
-       ci->flags = session->flags;
+       ci->flags = session->flags & valid_flags;
        ci->state = session->state;
 
        ci->num = session->num;
@@ -313,7 +314,7 @@ static int cmtp_session(void *arg)
 
        down_write(&cmtp_session_sem);
 
-       if (!(session->flags & (1 << CMTP_LOOPBACK)))
+       if (!(session->flags & BIT(CMTP_LOOPBACK)))
                cmtp_detach_device(session);
 
        fput(session->sock->file);
@@ -329,6 +330,7 @@ static int cmtp_session(void *arg)
 
 int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
 {
+       u32 valid_flags = BIT(CMTP_LOOPBACK);
        struct cmtp_session *session, *s;
        int i, err;
 
@@ -337,6 +339,9 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
        if (!l2cap_is_socket(sock))
                return -EBADFD;
 
+       if (req->flags & ~valid_flags)
+               return -EINVAL;
+
        session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL);
        if (!session)
                return -ENOMEM;
@@ -385,7 +390,7 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
                goto unlink;
        }
 
-       if (!(session->flags & (1 << CMTP_LOOPBACK))) {
+       if (!(session->flags & BIT(CMTP_LOOPBACK))) {
                err = cmtp_attach_device(session);
                if (err < 0) {
                        atomic_inc(&session->terminate);
@@ -409,11 +414,15 @@ failed:
 
 int cmtp_del_connection(struct cmtp_conndel_req *req)
 {
+       u32 valid_flags = 0;
        struct cmtp_session *session;
        int err = 0;
 
        BT_DBG("");
 
+       if (req->flags & ~valid_flags)
+               return -EINVAL;
+
        down_read(&cmtp_session_sem);
 
        session = __cmtp_get_session(&req->bdaddr);
index e6bfeb7b4415c485d928c7675ea6765d41dabdf2..476709bd068a474f7edcac83a4869849ccfb4b17 100644 (file)
@@ -141,13 +141,16 @@ static const struct file_operations dut_mode_fops = {
 
 /* ---- HCI requests ---- */
 
-static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode)
+static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
+                                 struct sk_buff *skb)
 {
        BT_DBG("%s result 0x%2.2x", hdev->name, result);
 
        if (hdev->req_status == HCI_REQ_PEND) {
                hdev->req_result = result;
                hdev->req_status = HCI_REQ_DONE;
+               if (skb)
+                       hdev->req_skb = skb_get(skb);
                wake_up_interruptible(&hdev->req_wait_q);
        }
 }
@@ -163,66 +166,12 @@ static void hci_req_cancel(struct hci_dev *hdev, int err)
        }
 }
 
-static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
-                                           u8 event)
-{
-       struct hci_ev_cmd_complete *ev;
-       struct hci_event_hdr *hdr;
-       struct sk_buff *skb;
-
-       hci_dev_lock(hdev);
-
-       skb = hdev->recv_evt;
-       hdev->recv_evt = NULL;
-
-       hci_dev_unlock(hdev);
-
-       if (!skb)
-               return ERR_PTR(-ENODATA);
-
-       if (skb->len < sizeof(*hdr)) {
-               BT_ERR("Too short HCI event");
-               goto failed;
-       }
-
-       hdr = (void *) skb->data;
-       skb_pull(skb, HCI_EVENT_HDR_SIZE);
-
-       if (event) {
-               if (hdr->evt != event)
-                       goto failed;
-               return skb;
-       }
-
-       if (hdr->evt != HCI_EV_CMD_COMPLETE) {
-               BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
-               goto failed;
-       }
-
-       if (skb->len < sizeof(*ev)) {
-               BT_ERR("Too short cmd_complete event");
-               goto failed;
-       }
-
-       ev = (void *) skb->data;
-       skb_pull(skb, sizeof(*ev));
-
-       if (opcode == __le16_to_cpu(ev->opcode))
-               return skb;
-
-       BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
-              __le16_to_cpu(ev->opcode));
-
-failed:
-       kfree_skb(skb);
-       return ERR_PTR(-ENODATA);
-}
-
 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
                                  const void *param, u8 event, u32 timeout)
 {
        DECLARE_WAITQUEUE(wait, current);
        struct hci_request req;
+       struct sk_buff *skb;
        int err = 0;
 
        BT_DBG("%s", hdev->name);
@@ -236,7 +185,7 @@ struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
        add_wait_queue(&hdev->req_wait_q, &wait);
        set_current_state(TASK_INTERRUPTIBLE);
 
-       err = hci_req_run(&req, hci_req_sync_complete);
+       err = hci_req_run_skb(&req, hci_req_sync_complete);
        if (err < 0) {
                remove_wait_queue(&hdev->req_wait_q, &wait);
                set_current_state(TASK_RUNNING);
@@ -265,13 +214,20 @@ struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
        }
 
        hdev->req_status = hdev->req_result = 0;
+       skb = hdev->req_skb;
+       hdev->req_skb = NULL;
 
        BT_DBG("%s end: err %d", hdev->name, err);
 
-       if (err < 0)
+       if (err < 0) {
+               kfree_skb(skb);
                return ERR_PTR(err);
+       }
 
-       return hci_get_cmd_complete(hdev, opcode, event);
+       if (!skb)
+               return ERR_PTR(-ENODATA);
+
+       return skb;
 }
 EXPORT_SYMBOL(__hci_cmd_sync_ev);
 
@@ -303,7 +259,7 @@ static int __hci_req_sync(struct hci_dev *hdev,
        add_wait_queue(&hdev->req_wait_q, &wait);
        set_current_state(TASK_INTERRUPTIBLE);
 
-       err = hci_req_run(&req, hci_req_sync_complete);
+       err = hci_req_run_skb(&req, hci_req_sync_complete);
        if (err < 0) {
                hdev->req_status = 0;
 
@@ -1690,9 +1646,6 @@ static int hci_dev_do_close(struct hci_dev *hdev)
                hdev->sent_cmd = NULL;
        }
 
-       kfree_skb(hdev->recv_evt);
-       hdev->recv_evt = NULL;
-
        /* After this point our queues are empty
         * and no tasks are scheduled. */
        hdev->close(hdev);
@@ -3247,7 +3200,7 @@ EXPORT_SYMBOL(hci_register_dev);
 /* Unregister HCI device */
 void hci_unregister_dev(struct hci_dev *hdev)
 {
-       int i, id;
+       int id;
 
        BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
 
@@ -3261,9 +3214,6 @@ void hci_unregister_dev(struct hci_dev *hdev)
 
        hci_dev_do_close(hdev);
 
-       for (i = 0; i < NUM_REASSEMBLY; i++)
-               kfree_skb(hdev->reassembly[i]);
-
        cancel_work_sync(&hdev->power_on);
 
        if (!test_bit(HCI_INIT, &hdev->flags) &&
@@ -3367,149 +3317,6 @@ int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(hci_recv_frame);
 
-static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
-                         int count, __u8 index)
-{
-       int len = 0;
-       int hlen = 0;
-       int remain = count;
-       struct sk_buff *skb;
-       struct bt_skb_cb *scb;
-
-       if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
-           index >= NUM_REASSEMBLY)
-               return -EILSEQ;
-
-       skb = hdev->reassembly[index];
-
-       if (!skb) {
-               switch (type) {
-               case HCI_ACLDATA_PKT:
-                       len = HCI_MAX_FRAME_SIZE;
-                       hlen = HCI_ACL_HDR_SIZE;
-                       break;
-               case HCI_EVENT_PKT:
-                       len = HCI_MAX_EVENT_SIZE;
-                       hlen = HCI_EVENT_HDR_SIZE;
-                       break;
-               case HCI_SCODATA_PKT:
-                       len = HCI_MAX_SCO_SIZE;
-                       hlen = HCI_SCO_HDR_SIZE;
-                       break;
-               }
-
-               skb = bt_skb_alloc(len, GFP_ATOMIC);
-               if (!skb)
-                       return -ENOMEM;
-
-               scb = (void *) skb->cb;
-               scb->expect = hlen;
-               scb->pkt_type = type;
-
-               hdev->reassembly[index] = skb;
-       }
-
-       while (count) {
-               scb = (void *) skb->cb;
-               len = min_t(uint, scb->expect, count);
-
-               memcpy(skb_put(skb, len), data, len);
-
-               count -= len;
-               data += len;
-               scb->expect -= len;
-               remain = count;
-
-               switch (type) {
-               case HCI_EVENT_PKT:
-                       if (skb->len == HCI_EVENT_HDR_SIZE) {
-                               struct hci_event_hdr *h = hci_event_hdr(skb);
-                               scb->expect = h->plen;
-
-                               if (skb_tailroom(skb) < scb->expect) {
-                                       kfree_skb(skb);
-                                       hdev->reassembly[index] = NULL;
-                                       return -ENOMEM;
-                               }
-                       }
-                       break;
-
-               case HCI_ACLDATA_PKT:
-                       if (skb->len  == HCI_ACL_HDR_SIZE) {
-                               struct hci_acl_hdr *h = hci_acl_hdr(skb);
-                               scb->expect = __le16_to_cpu(h->dlen);
-
-                               if (skb_tailroom(skb) < scb->expect) {
-                                       kfree_skb(skb);
-                                       hdev->reassembly[index] = NULL;
-                                       return -ENOMEM;
-                               }
-                       }
-                       break;
-
-               case HCI_SCODATA_PKT:
-                       if (skb->len == HCI_SCO_HDR_SIZE) {
-                               struct hci_sco_hdr *h = hci_sco_hdr(skb);
-                               scb->expect = h->dlen;
-
-                               if (skb_tailroom(skb) < scb->expect) {
-                                       kfree_skb(skb);
-                                       hdev->reassembly[index] = NULL;
-                                       return -ENOMEM;
-                               }
-                       }
-                       break;
-               }
-
-               if (scb->expect == 0) {
-                       /* Complete frame */
-
-                       bt_cb(skb)->pkt_type = type;
-                       hci_recv_frame(hdev, skb);
-
-                       hdev->reassembly[index] = NULL;
-                       return remain;
-               }
-       }
-
-       return remain;
-}
-
-#define STREAM_REASSEMBLY 0
-
-int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
-{
-       int type;
-       int rem = 0;
-
-       while (count) {
-               struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
-
-               if (!skb) {
-                       struct { char type; } *pkt;
-
-                       /* Start of the frame */
-                       pkt = data;
-                       type = pkt->type;
-
-                       data++;
-                       count--;
-               } else
-                       type = bt_cb(skb)->pkt_type;
-
-               rem = hci_reassembly(hdev, type, data, count,
-                                    STREAM_REASSEMBLY);
-               if (rem < 0)
-                       return rem;
-
-               data += (count - rem);
-               count = rem;
-       }
-
-       return rem;
-}
-EXPORT_SYMBOL(hci_recv_stream_fragment);
-
 /* ---- Interface to upper protocols ---- */
 
 int hci_register_cb(struct hci_cb *cb)
@@ -3563,11 +3370,6 @@ static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
        }
 }
 
-bool hci_req_pending(struct hci_dev *hdev)
-{
-       return (hdev->req_status == HCI_REQ_PEND);
-}
-
 /* Send HCI command */
 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
                 const void *param)
@@ -3585,7 +3387,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
        /* Stand-alone HCI commands must be flagged as
         * single-command requests.
         */
-       bt_cb(skb)->req_start = 1;
+       bt_cb(skb)->req.start = true;
 
        skb_queue_tail(&hdev->cmd_q, skb);
        queue_work(hdev->workqueue, &hdev->cmd_work);
@@ -4263,7 +4065,7 @@ static bool hci_req_is_complete(struct hci_dev *hdev)
        if (!skb)
                return true;
 
-       return bt_cb(skb)->req_start;
+       return bt_cb(skb)->req.start;
 }
 
 static void hci_resend_last(struct hci_dev *hdev)
@@ -4288,9 +4090,10 @@ static void hci_resend_last(struct hci_dev *hdev)
        queue_work(hdev->workqueue, &hdev->cmd_work);
 }
 
-void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
+void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
+                         hci_req_complete_t *req_complete,
+                         hci_req_complete_skb_t *req_complete_skb)
 {
-       hci_req_complete_t req_complete = NULL;
        struct sk_buff *skb;
        unsigned long flags;
 
@@ -4322,36 +4125,29 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
         * callback would be found in hdev->sent_cmd instead of the
         * command queue (hdev->cmd_q).
         */
-       if (hdev->sent_cmd) {
-               req_complete = bt_cb(hdev->sent_cmd)->req_complete;
-
-               if (req_complete) {
-                       /* We must set the complete callback to NULL to
-                        * avoid calling the callback more than once if
-                        * this function gets called again.
-                        */
-                       bt_cb(hdev->sent_cmd)->req_complete = NULL;
+       if (bt_cb(hdev->sent_cmd)->req.complete) {
+               *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
+               return;
+       }
 
-                       goto call_complete;
-               }
+       if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
+               *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
+               return;
        }
 
        /* Remove all pending commands belonging to this request */
        spin_lock_irqsave(&hdev->cmd_q.lock, flags);
        while ((skb = __skb_dequeue(&hdev->cmd_q))) {
-               if (bt_cb(skb)->req_start) {
+               if (bt_cb(skb)->req.start) {
                        __skb_queue_head(&hdev->cmd_q, skb);
                        break;
                }
 
-               req_complete = bt_cb(skb)->req_complete;
+               *req_complete = bt_cb(skb)->req.complete;
+               *req_complete_skb = bt_cb(skb)->req.complete_skb;
                kfree_skb(skb);
        }
        spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
-
-call_complete:
-       if (req_complete)
-               req_complete(hdev, status, status ? opcode : HCI_OP_NOP);
 }
 
 static void hci_rx_work(struct work_struct *work)
index e6255833a2582e7f4deeed9b138b9390ea726095..7db4220941cc60dfed6c5ca4043e7158e4458387 100644 (file)
@@ -114,6 +114,30 @@ static const struct file_operations features_fops = {
        .release        = single_release,
 };
 
+static int device_id_show(struct seq_file *f, void *ptr)
+{
+       struct hci_dev *hdev = f->private;
+
+       hci_dev_lock(hdev);
+       seq_printf(f, "%4.4x:%4.4x:%4.4x:%4.4x\n", hdev->devid_source,
+                 hdev->devid_vendor, hdev->devid_product, hdev->devid_version);
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int device_id_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, device_id_show, inode->i_private);
+}
+
+static const struct file_operations device_id_fops = {
+       .open           = device_id_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
 static int device_list_show(struct seq_file *f, void *ptr)
 {
        struct hci_dev *hdev = f->private;
@@ -335,6 +359,8 @@ void hci_debugfs_create_common(struct hci_dev *hdev)
        debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
        debugfs_create_u8("hardware_error", 0444, hdev->debugfs,
                          &hdev->hw_error_code);
+       debugfs_create_file("device_id", 0444, hdev->debugfs, hdev,
+                           &device_id_fops);
 
        debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
                            &device_list_fops);
index 62f92a50896120d72ed77cd677768acdce6ba36e..7b61be73650fe574bd655c19ae79f8b4d8a1b877 100644 (file)
@@ -1045,11 +1045,6 @@ static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
        struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
 
        BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
-
-       hci_dev_lock(hdev);
-       mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->rand, NULL, NULL,
-                                         rp->status);
-       hci_dev_unlock(hdev);
 }
 
 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
@@ -1058,15 +1053,8 @@ static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
        struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
 
        BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
-
-       hci_dev_lock(hdev);
-       mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->rand192,
-                                         rp->hash256, rp->rand256,
-                                         rp->status);
-       hci_dev_unlock(hdev);
 }
 
-
 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
 {
        __u8 status = *((__u8 *) skb->data);
@@ -2048,6 +2036,33 @@ unlock:
        hci_dev_unlock(hdev);
 }
 
+static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
+{
+       struct hci_cp_le_read_remote_features *cp;
+       struct hci_conn *conn;
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+       if (!status)
+               return;
+
+       cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
+       if (!cp)
+               return;
+
+       hci_dev_lock(hdev);
+
+       conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
+       if (conn) {
+               if (conn->state == BT_CONFIG) {
+                       hci_connect_cfm(conn, status);
+                       hci_conn_drop(conn);
+               }
+       }
+
+       hci_dev_unlock(hdev);
+}
+
 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
 {
        struct hci_cp_le_start_enc *cp;
@@ -2732,17 +2747,19 @@ unlock:
        hci_dev_unlock(hdev);
 }
 
-static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
+                                u16 *opcode, u8 *status,
+                                hci_req_complete_t *req_complete,
+                                hci_req_complete_skb_t *req_complete_skb)
 {
        struct hci_ev_cmd_complete *ev = (void *) skb->data;
-       u8 status = skb->data[sizeof(*ev)];
-       __u16 opcode;
 
-       skb_pull(skb, sizeof(*ev));
+       *opcode = __le16_to_cpu(ev->opcode);
+       *status = skb->data[sizeof(*ev)];
 
-       opcode = __le16_to_cpu(ev->opcode);
+       skb_pull(skb, sizeof(*ev));
 
-       switch (opcode) {
+       switch (*opcode) {
        case HCI_OP_INQUIRY_CANCEL:
                hci_cc_inquiry_cancel(hdev, skb);
                break;
@@ -3020,32 +3037,36 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                break;
 
        default:
-               BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
+               BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
                break;
        }
 
-       if (opcode != HCI_OP_NOP)
+       if (*opcode != HCI_OP_NOP)
                cancel_delayed_work(&hdev->cmd_timer);
 
-       hci_req_cmd_complete(hdev, opcode, status);
-
-       if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
+       if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
                atomic_set(&hdev->cmd_cnt, 1);
-               if (!skb_queue_empty(&hdev->cmd_q))
-                       queue_work(hdev->workqueue, &hdev->cmd_work);
-       }
+
+       hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
+                            req_complete_skb);
+
+       if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
+               queue_work(hdev->workqueue, &hdev->cmd_work);
 }
 
-static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
+                              u16 *opcode, u8 *status,
+                              hci_req_complete_t *req_complete,
+                              hci_req_complete_skb_t *req_complete_skb)
 {
        struct hci_ev_cmd_status *ev = (void *) skb->data;
-       __u16 opcode;
 
        skb_pull(skb, sizeof(*ev));
 
-       opcode = __le16_to_cpu(ev->opcode);
+       *opcode = __le16_to_cpu(ev->opcode);
+       *status = ev->status;
 
-       switch (opcode) {
+       switch (*opcode) {
        case HCI_OP_INQUIRY:
                hci_cs_inquiry(hdev, ev->status);
                break;
@@ -3110,27 +3131,38 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
                hci_cs_le_create_conn(hdev, ev->status);
                break;
 
+       case HCI_OP_LE_READ_REMOTE_FEATURES:
+               hci_cs_le_read_remote_features(hdev, ev->status);
+               break;
+
        case HCI_OP_LE_START_ENC:
                hci_cs_le_start_enc(hdev, ev->status);
                break;
 
        default:
-               BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
+               BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
                break;
        }
 
-       if (opcode != HCI_OP_NOP)
+       if (*opcode != HCI_OP_NOP)
                cancel_delayed_work(&hdev->cmd_timer);
 
+       if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
+               atomic_set(&hdev->cmd_cnt, 1);
+
+       /* Indicate request completion if the command failed. Also, if
+        * we're not waiting for a special event and we get a success
+        * command status we should try to flag the request as completed
+        * (since for this kind of commands there will not be a command
+        * complete event).
+        */
        if (ev->status ||
-           (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req_event))
-               hci_req_cmd_complete(hdev, opcode, ev->status);
+           (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
+               hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
+                                    req_complete_skb);
 
-       if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
-               atomic_set(&hdev->cmd_cnt, 1);
-               if (!skb_queue_empty(&hdev->cmd_q))
-                       queue_work(hdev->workqueue, &hdev->cmd_work);
-       }
+       if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
+               queue_work(hdev->workqueue, &hdev->cmd_work);
 }
 
 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -4514,7 +4546,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
        conn->sec_level = BT_SECURITY_LOW;
        conn->handle = __le16_to_cpu(ev->handle);
-       conn->state = BT_CONNECTED;
+       conn->state = BT_CONFIG;
 
        conn->le_conn_interval = le16_to_cpu(ev->interval);
        conn->le_conn_latency = le16_to_cpu(ev->latency);
@@ -4523,7 +4555,33 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
        hci_debugfs_create_conn(conn);
        hci_conn_add_sysfs(conn);
 
-       hci_connect_cfm(conn, ev->status);
+       if (!ev->status) {
+               /* The remote features procedure is defined for master
+                * role only. So only in case of an initiated connection
+                * request the remote features.
+                *
+                * If the local controller supports slave-initiated features
+                * exchange, then requesting the remote features in slave
+                * role is possible. Otherwise just transition into the
+                * connected state without requesting the remote features.
+                */
+               if (conn->out ||
+                   (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
+                       struct hci_cp_le_read_remote_features cp;
+
+                       cp.handle = __cpu_to_le16(conn->handle);
+
+                       hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
+                                    sizeof(cp), &cp);
+
+                       hci_conn_hold(conn);
+               } else {
+                       conn->state = BT_CONNECTED;
+                       hci_connect_cfm(conn, ev->status);
+               }
+       } else {
+               hci_connect_cfm(conn, ev->status);
+       }
 
        params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
                                           conn->dst_type);
@@ -4825,6 +4883,48 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
        hci_dev_unlock(hdev);
 }
 
+static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
+                                           struct sk_buff *skb)
+{
+       struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
+       struct hci_conn *conn;
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+
+       hci_dev_lock(hdev);
+
+       conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
+       if (conn) {
+               if (!ev->status)
+                       memcpy(conn->features[0], ev->features, 8);
+
+               if (conn->state == BT_CONFIG) {
+                       __u8 status;
+
+                       /* If the local controller supports slave-initiated
+                        * features exchange, but the remote controller does
+                        * not, then it is possible that the error code 0x1a
+                        * for unsupported remote feature gets returned.
+                        *
+                        * In this specific case, allow the connection to
+                        * transition into connected state and mark it as
+                        * successful.
+                        */
+                       if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
+                           !conn->out && ev->status == 0x1a)
+                               status = 0x00;
+                       else
+                               status = ev->status;
+
+                       conn->state = BT_CONNECTED;
+                       hci_connect_cfm(conn, status);
+                       hci_conn_drop(conn);
+               }
+       }
+
+       hci_dev_unlock(hdev);
+}
+
 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_ev_le_ltk_req *ev = (void *) skb->data;
@@ -4998,6 +5098,10 @@ static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
                hci_le_adv_report_evt(hdev, skb);
                break;
 
+       case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
+               hci_le_remote_feat_complete_evt(hdev, skb);
+               break;
+
        case HCI_EV_LE_LTK_REQ:
                hci_le_ltk_request_evt(hdev, skb);
                break;
@@ -5031,32 +5135,79 @@ static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
        amp_read_loc_assoc_final_data(hdev, hcon);
 }
 
-void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
+static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
+                                u8 event, struct sk_buff *skb)
 {
-       struct hci_event_hdr *hdr = (void *) skb->data;
-       __u8 event = hdr->evt;
+       struct hci_ev_cmd_complete *ev;
+       struct hci_event_hdr *hdr;
 
-       hci_dev_lock(hdev);
+       if (!skb)
+               return false;
 
-       /* Received events are (currently) only needed when a request is
-        * ongoing so avoid unnecessary memory allocation.
-        */
-       if (hci_req_pending(hdev)) {
-               kfree_skb(hdev->recv_evt);
-               hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
+       if (skb->len < sizeof(*hdr)) {
+               BT_ERR("Too short HCI event");
+               return false;
        }
 
-       hci_dev_unlock(hdev);
-
+       hdr = (void *) skb->data;
        skb_pull(skb, HCI_EVENT_HDR_SIZE);
 
-       if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req_event == event) {
-               struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
-               u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
+       if (event) {
+               if (hdr->evt != event)
+                       return false;
+               return true;
+       }
+
+       if (hdr->evt != HCI_EV_CMD_COMPLETE) {
+               BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
+               return false;
+       }
+
+       if (skb->len < sizeof(*ev)) {
+               BT_ERR("Too short cmd_complete event");
+               return false;
+       }
 
-               hci_req_cmd_complete(hdev, opcode, 0);
+       ev = (void *) skb->data;
+       skb_pull(skb, sizeof(*ev));
+
+       if (opcode != __le16_to_cpu(ev->opcode)) {
+               BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
+                      __le16_to_cpu(ev->opcode));
+               return false;
        }
 
+       return true;
+}
+
+void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hci_event_hdr *hdr = (void *) skb->data;
+       hci_req_complete_t req_complete = NULL;
+       hci_req_complete_skb_t req_complete_skb = NULL;
+       struct sk_buff *orig_skb = NULL;
+       u8 status = 0, event = hdr->evt, req_evt = 0;
+       u16 opcode = HCI_OP_NOP;
+
+       if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
+               struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
+               opcode = __le16_to_cpu(cmd_hdr->opcode);
+               hci_req_cmd_complete(hdev, opcode, status, &req_complete,
+                                    &req_complete_skb);
+               req_evt = event;
+       }
+
+       /* If it looks like we might end up having to call
+        * req_complete_skb, store a pristine copy of the skb since the
+        * various handlers may modify the original one through
+        * skb_pull() calls, etc.
+        */
+       if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
+           event == HCI_EV_CMD_COMPLETE)
+               orig_skb = skb_clone(skb, GFP_KERNEL);
+
+       skb_pull(skb, HCI_EVENT_HDR_SIZE);
+
        switch (event) {
        case HCI_EV_INQUIRY_COMPLETE:
                hci_inquiry_complete_evt(hdev, skb);
@@ -5099,11 +5250,13 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
                break;
 
        case HCI_EV_CMD_COMPLETE:
-               hci_cmd_complete_evt(hdev, skb);
+               hci_cmd_complete_evt(hdev, skb, &opcode, &status,
+                                    &req_complete, &req_complete_skb);
                break;
 
        case HCI_EV_CMD_STATUS:
-               hci_cmd_status_evt(hdev, skb);
+               hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
+                                  &req_complete_skb);
                break;
 
        case HCI_EV_HARDWARE_ERROR:
@@ -5235,6 +5388,17 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
                break;
        }
 
+       if (req_complete) {
+               req_complete(hdev, status, opcode);
+       } else if (req_complete_skb) {
+               if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
+                       kfree_skb(orig_skb);
+                       orig_skb = NULL;
+               }
+               req_complete_skb(hdev, status, opcode, orig_skb);
+       }
+
+       kfree_skb(orig_skb);
        kfree_skb(skb);
        hdev->stat.evt_rx++;
 }
index 55e096d20a0fc61ea2bb8f1ee895e36788a47559..d6025d6e6d59f957c612a1e7ff455f770734eb6a 100644 (file)
@@ -34,7 +34,8 @@ void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
        req->err = 0;
 }
 
-int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
+static int req_run(struct hci_request *req, hci_req_complete_t complete,
+                  hci_req_complete_skb_t complete_skb)
 {
        struct hci_dev *hdev = req->hdev;
        struct sk_buff *skb;
@@ -55,7 +56,8 @@ int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
                return -ENODATA;
 
        skb = skb_peek_tail(&req->cmd_q);
-       bt_cb(skb)->req_complete = complete;
+       bt_cb(skb)->req.complete = complete;
+       bt_cb(skb)->req.complete_skb = complete_skb;
 
        spin_lock_irqsave(&hdev->cmd_q.lock, flags);
        skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
@@ -66,6 +68,16 @@ int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
        return 0;
 }
 
+int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
+{
+       return req_run(req, complete, NULL);
+}
+
+int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
+{
+       return req_run(req, NULL, complete);
+}
+
 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
                                const void *param)
 {
@@ -116,9 +128,9 @@ void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
        }
 
        if (skb_queue_empty(&req->cmd_q))
-               bt_cb(skb)->req_start = 1;
+               bt_cb(skb)->req.start = true;
 
-       bt_cb(skb)->req_event = event;
+       bt_cb(skb)->req.event = event;
 
        skb_queue_tail(&req->cmd_q, skb);
 }
index adf074d33544083ca74f9dd3f22123a84bd8b527..bf6df92f42dbf44be59134349f8e62ca953d9214 100644 (file)
@@ -32,11 +32,14 @@ struct hci_request {
 
 void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
 int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
+int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete);
 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
                 const void *param);
 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
                    const void *param, u8 event);
-void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status);
+void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
+                         hci_req_complete_t *req_complete,
+                         hci_req_complete_skb_t *req_complete_skb);
 
 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
                                const void *param);
index 85a44a7dc1503b8dfb1f62ded862143b96da596f..56f9edbf3d05dc6a2c6ba4f42174b2314d5e920d 100644 (file)
@@ -1164,7 +1164,7 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
                        /* Stand-alone HCI commands must be flagged as
                         * single-command requests.
                         */
-                       bt_cb(skb)->req_start = 1;
+                       bt_cb(skb)->req.start = true;
 
                        skb_queue_tail(&hdev->cmd_q, skb);
                        queue_work(hdev->workqueue, &hdev->cmd_work);
index 07348e142f16a783b7764d72314830f4b7844330..a05b9dbf14c991dd1a90de4f23877bc02b78e1b5 100644 (file)
@@ -70,10 +70,11 @@ static void hidp_session_terminate(struct hidp_session *s);
 
 static void hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci)
 {
+       u32 valid_flags = 0;
        memset(ci, 0, sizeof(*ci));
        bacpy(&ci->bdaddr, &session->bdaddr);
 
-       ci->flags = session->flags;
+       ci->flags = session->flags & valid_flags;
        ci->state = BT_CONNECTED;
 
        if (session->input) {
@@ -907,7 +908,7 @@ static int hidp_session_new(struct hidp_session **out, const bdaddr_t *bdaddr,
        kref_init(&session->ref);
        atomic_set(&session->state, HIDP_SESSION_IDLING);
        init_waitqueue_head(&session->state_queue);
-       session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID);
+       session->flags = req->flags & BIT(HIDP_BLUETOOTH_VENDOR_ID);
 
        /* connection management */
        bacpy(&session->bdaddr, bdaddr);
@@ -1312,6 +1313,7 @@ int hidp_connection_add(struct hidp_connadd_req *req,
                        struct socket *ctrl_sock,
                        struct socket *intr_sock)
 {
+       u32 valid_flags = 0;
        struct hidp_session *session;
        struct l2cap_conn *conn;
        struct l2cap_chan *chan;
@@ -1321,6 +1323,9 @@ int hidp_connection_add(struct hidp_connadd_req *req,
        if (ret)
                return ret;
 
+       if (req->flags & ~valid_flags)
+               return -EINVAL;
+
        chan = l2cap_pi(ctrl_sock->sk)->chan;
        conn = NULL;
        l2cap_chan_lock(chan);
@@ -1351,13 +1356,17 @@ out_conn:
 
 int hidp_connection_del(struct hidp_conndel_req *req)
 {
+       u32 valid_flags = BIT(HIDP_VIRTUAL_CABLE_UNPLUG);
        struct hidp_session *session;
 
+       if (req->flags & ~valid_flags)
+               return -EINVAL;
+
        session = hidp_session_find(&req->bdaddr);
        if (!session)
                return -ENOENT;
 
-       if (req->flags & (1 << HIDP_VIRTUAL_CABLE_UNPLUG))
+       if (req->flags & BIT(HIDP_VIRTUAL_CABLE_UNPLUG))
                hidp_send_ctrl_message(session,
                                       HIDP_TRANS_HID_CONTROL |
                                         HIDP_CTRL_VIRTUAL_CABLE_UNPLUG,
index d69861c89bb55abb7ae52f70d226b77c7a2d8683..dad419782a1280bbff079c4978578f0be54e37a3 100644 (file)
@@ -292,7 +292,7 @@ static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
        struct sk_buff *skb;
 
        skb_queue_walk(head, skb) {
-               if (bt_cb(skb)->control.txseq == seq)
+               if (bt_cb(skb)->l2cap.txseq == seq)
                        return skb;
        }
 
@@ -954,11 +954,11 @@ static inline void __unpack_control(struct l2cap_chan *chan,
 {
        if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
                __unpack_extended_control(get_unaligned_le32(skb->data),
-                                         &bt_cb(skb)->control);
+                                         &bt_cb(skb)->l2cap);
                skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
        } else {
                __unpack_enhanced_control(get_unaligned_le16(skb->data),
-                                         &bt_cb(skb)->control);
+                                         &bt_cb(skb)->l2cap);
                skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
        }
 }
@@ -1200,8 +1200,8 @@ static void l2cap_move_setup(struct l2cap_chan *chan)
 
        chan->retry_count = 0;
        skb_queue_walk(&chan->tx_q, skb) {
-               if (bt_cb(skb)->control.retries)
-                       bt_cb(skb)->control.retries = 1;
+               if (bt_cb(skb)->l2cap.retries)
+                       bt_cb(skb)->l2cap.retries = 1;
                else
                        break;
        }
@@ -1846,8 +1846,8 @@ static void l2cap_streaming_send(struct l2cap_chan *chan,
 
                skb = skb_dequeue(&chan->tx_q);
 
-               bt_cb(skb)->control.retries = 1;
-               control = &bt_cb(skb)->control;
+               bt_cb(skb)->l2cap.retries = 1;
+               control = &bt_cb(skb)->l2cap;
 
                control->reqseq = 0;
                control->txseq = chan->next_tx_seq;
@@ -1891,8 +1891,8 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
 
                skb = chan->tx_send_head;
 
-               bt_cb(skb)->control.retries = 1;
-               control = &bt_cb(skb)->control;
+               bt_cb(skb)->l2cap.retries = 1;
+               control = &bt_cb(skb)->l2cap;
 
                if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
                        control->final = 1;
@@ -1963,11 +1963,11 @@ static void l2cap_ertm_resend(struct l2cap_chan *chan)
                        continue;
                }
 
-               bt_cb(skb)->control.retries++;
-               control = bt_cb(skb)->control;
+               bt_cb(skb)->l2cap.retries++;
+               control = bt_cb(skb)->l2cap;
 
                if (chan->max_tx != 0 &&
-                   bt_cb(skb)->control.retries > chan->max_tx) {
+                   bt_cb(skb)->l2cap.retries > chan->max_tx) {
                        BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
                        l2cap_send_disconn_req(chan, ECONNRESET);
                        l2cap_seq_list_clear(&chan->retrans_list);
@@ -2045,7 +2045,7 @@ static void l2cap_retransmit_all(struct l2cap_chan *chan,
 
        if (chan->unacked_frames) {
                skb_queue_walk(&chan->tx_q, skb) {
-                       if (bt_cb(skb)->control.txseq == control->reqseq ||
+                       if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
                            skb == chan->tx_send_head)
                                break;
                }
@@ -2055,7 +2055,7 @@ static void l2cap_retransmit_all(struct l2cap_chan *chan,
                                break;
 
                        l2cap_seq_list_append(&chan->retrans_list,
-                                             bt_cb(skb)->control.txseq);
+                                             bt_cb(skb)->l2cap.txseq);
                }
 
                l2cap_ertm_resend(chan);
@@ -2267,8 +2267,8 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
                return ERR_PTR(err);
        }
 
-       bt_cb(skb)->control.fcs = chan->fcs;
-       bt_cb(skb)->control.retries = 0;
+       bt_cb(skb)->l2cap.fcs = chan->fcs;
+       bt_cb(skb)->l2cap.retries = 0;
        return skb;
 }
 
@@ -2321,7 +2321,7 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
                        return PTR_ERR(skb);
                }
 
-               bt_cb(skb)->control.sar = sar;
+               bt_cb(skb)->l2cap.sar = sar;
                __skb_queue_tail(seg_queue, skb);
 
                len -= pdu_len;
@@ -2856,7 +2856,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
                        continue;
 
                /* Don't send frame to the channel it came from */
-               if (bt_cb(skb)->chan == chan)
+               if (bt_cb(skb)->l2cap.chan == chan)
                        continue;
 
                nskb = skb_clone(skb, GFP_KERNEL);
@@ -5918,7 +5918,7 @@ static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
 
                skb_unlink(skb, &chan->srej_q);
                chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
-               err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
+               err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
                if (err)
                        break;
        }
@@ -5952,7 +5952,7 @@ static void l2cap_handle_srej(struct l2cap_chan *chan,
                return;
        }
 
-       if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
+       if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
                BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
                l2cap_send_disconn_req(chan, ECONNRESET);
                return;
@@ -6005,7 +6005,7 @@ static void l2cap_handle_rej(struct l2cap_chan *chan,
        skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
 
        if (chan->max_tx && skb &&
-           bt_cb(skb)->control.retries >= chan->max_tx) {
+           bt_cb(skb)->l2cap.retries >= chan->max_tx) {
                BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
                l2cap_send_disconn_req(chan, ECONNRESET);
                return;
@@ -6565,7 +6565,7 @@ static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
 
 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
 {
-       struct l2cap_ctrl *control = &bt_cb(skb)->control;
+       struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
        u16 len;
        u8 event;
 
@@ -6864,8 +6864,8 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
                goto drop;
 
        /* Store remote BD_ADDR and PSM for msg_name */
-       bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
-       bt_cb(skb)->psm = psm;
+       bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
+       bt_cb(skb)->l2cap.psm = psm;
 
        if (!chan->ops->recv(chan, skb)) {
                l2cap_chan_put(chan);
index 9070720eedc8730ddd6982e709a1c4d5c9bd2b66..a7278f05eafbbda65da7c991820474a95e4e1a3c 100644 (file)
@@ -1330,7 +1330,7 @@ static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
 
        skb->priority = sk->sk_priority;
 
-       bt_cb(skb)->chan = chan;
+       bt_cb(skb)->l2cap.chan = chan;
 
        return skb;
 }
@@ -1444,8 +1444,8 @@ static void l2cap_skb_msg_name(struct sk_buff *skb, void *msg_name,
 
        memset(la, 0, sizeof(struct sockaddr_l2));
        la->l2_family = AF_BLUETOOTH;
-       la->l2_psm = bt_cb(skb)->psm;
-       bacpy(&la->l2_bdaddr, &bt_cb(skb)->bdaddr);
+       la->l2_psm = bt_cb(skb)->l2cap.psm;
+       bacpy(&la->l2_bdaddr, &bt_cb(skb)->l2cap.bdaddr);
 
        *msg_namelen = sizeof(struct sockaddr_l2);
 }
index fb2e764c62113d8f08768e6dd118b081f7be0531..7fd87e7135b52753c0bcefd58cb4a290c57c77ba 100644 (file)
@@ -985,14 +985,27 @@ static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
        /* Instance 0 always manages the "Tx Power" and "Flags" fields */
        flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
 
-       /* For instance 0, assemble the flags from global settings */
-       if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE) ||
-           get_connectable(hdev))
+       /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting corresponds
+        * to the "connectable" instance flag.
+        */
+       if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
                flags |= MGMT_ADV_FLAG_CONNECTABLE;
 
        return flags;
 }
 
+static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
+{
+       /* Ignore instance 0 and other unsupported instances */
+       if (instance != 0x01)
+               return 0;
+
+       /* TODO: Take into account the "appearance" and "local-name" flags here.
+        * These are currently being ignored as they are not supported.
+        */
+       return hdev->adv_instance.scan_rsp_len;
+}
+
 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
 {
        u8 ad_len = 0, flags = 0;
@@ -1030,6 +1043,14 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
                }
        }
 
+       if (instance) {
+               memcpy(ptr, hdev->adv_instance.adv_data,
+                      hdev->adv_instance.adv_data_len);
+
+               ad_len += hdev->adv_instance.adv_data_len;
+               ptr += hdev->adv_instance.adv_data_len;
+       }
+
        /* Provide Tx Power only if we can provide a valid value for it */
        if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
            (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
@@ -1041,12 +1062,6 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
                ptr += 3;
        }
 
-       if (instance) {
-               memcpy(ptr, hdev->adv_instance.adv_data,
-                      hdev->adv_instance.adv_data_len);
-               ad_len += hdev->adv_instance.adv_data_len;
-       }
-
        return ad_len;
 }
 
@@ -1242,7 +1257,12 @@ static void enable_advertising(struct hci_request *req)
 
        instance = get_current_adv_instance(hdev);
        flags = get_adv_instance_flags(hdev, instance);
-       connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE);
+
+       /* If the "connectable" instance flag was not set, then choose between
+        * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
+        */
+       connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
+                     get_connectable(hdev);
 
        /* Set require_privacy to true only when non-connectable
         * advertising is used. In that case it is fine to use a
@@ -1254,7 +1274,14 @@ static void enable_advertising(struct hci_request *req)
        memset(&cp, 0, sizeof(cp));
        cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
        cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
-       cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
+
+       if (connectable)
+               cp.type = LE_ADV_IND;
+       else if (get_adv_instance_scan_rsp_len(hdev, instance))
+               cp.type = LE_ADV_SCAN_IND;
+       else
+               cp.type = LE_ADV_NONCONN_IND;
+
        cp.own_address_type = own_addr_type;
        cp.channel_map = hdev->le_adv_channel_map;
 
@@ -2088,7 +2115,8 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
 
 no_scan_update:
        /* Update the advertising parameters if necessary */
-       if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
+       if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
+           hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
                enable_advertising(&req);
 
        err = hci_req_run(&req, set_connectable_complete);
@@ -3757,10 +3785,70 @@ failed:
        return err;
 }
 
+static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
+                                        u16 opcode, struct sk_buff *skb)
+{
+       struct mgmt_rp_read_local_oob_data mgmt_rp;
+       size_t rp_size = sizeof(mgmt_rp);
+       struct mgmt_pending_cmd *cmd;
+
+       BT_DBG("%s status %u", hdev->name, status);
+
+       cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
+       if (!cmd)
+               return;
+
+       if (status || !skb) {
+               mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+                               status ? mgmt_status(status) : MGMT_STATUS_FAILED);
+               goto remove;
+       }
+
+       memset(&mgmt_rp, 0, sizeof(mgmt_rp));
+
+       if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
+               struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
+
+               if (skb->len < sizeof(*rp)) {
+                       mgmt_cmd_status(cmd->sk, hdev->id,
+                                       MGMT_OP_READ_LOCAL_OOB_DATA,
+                                       MGMT_STATUS_FAILED);
+                       goto remove;
+               }
+
+               memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
+               memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
+
+               rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
+       } else {
+               struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
+
+               if (skb->len < sizeof(*rp)) {
+                       mgmt_cmd_status(cmd->sk, hdev->id,
+                                       MGMT_OP_READ_LOCAL_OOB_DATA,
+                                       MGMT_STATUS_FAILED);
+                       goto remove;
+               }
+
+               memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
+               memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
+
+               memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
+               memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
+       }
+
+       mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+                         MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
+
+remove:
+       mgmt_pending_remove(cmd);
+}
+
 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
                               void *data, u16 data_len)
 {
        struct mgmt_pending_cmd *cmd;
+       struct hci_request req;
        int err;
 
        BT_DBG("%s", hdev->name);
@@ -3791,12 +3879,14 @@ static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
                goto unlock;
        }
 
+       hci_req_init(&req, hdev);
+
        if (bredr_sc_enabled(hdev))
-               err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
-                                  0, NULL);
+               hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
        else
-               err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
+               hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
 
+       err = hci_req_run_skb(&req, read_local_oob_data_complete);
        if (err < 0)
                mgmt_pending_remove(cmd);
 
@@ -6376,6 +6466,145 @@ static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
        return eir_len;
 }
 
+static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
+                                            u16 opcode, struct sk_buff *skb)
+{
+       const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
+       struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
+       u8 *h192, *r192, *h256, *r256;
+       struct mgmt_pending_cmd *cmd;
+       u16 eir_len;
+       int err;
+
+       BT_DBG("%s status %u", hdev->name, status);
+
+       cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
+       if (!cmd)
+               return;
+
+       mgmt_cp = cmd->param;
+
+       if (status) {
+               status = mgmt_status(status);
+               eir_len = 0;
+
+               h192 = NULL;
+               r192 = NULL;
+               h256 = NULL;
+               r256 = NULL;
+       } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
+               struct hci_rp_read_local_oob_data *rp;
+
+               if (skb->len != sizeof(*rp)) {
+                       status = MGMT_STATUS_FAILED;
+                       eir_len = 0;
+               } else {
+                       status = MGMT_STATUS_SUCCESS;
+                       rp = (void *)skb->data;
+
+                       eir_len = 5 + 18 + 18;
+                       h192 = rp->hash;
+                       r192 = rp->rand;
+                       h256 = NULL;
+                       r256 = NULL;
+               }
+       } else {
+               struct hci_rp_read_local_oob_ext_data *rp;
+
+               if (skb->len != sizeof(*rp)) {
+                       status = MGMT_STATUS_FAILED;
+                       eir_len = 0;
+               } else {
+                       status = MGMT_STATUS_SUCCESS;
+                       rp = (void *)skb->data;
+
+                       if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
+                               eir_len = 5 + 18 + 18;
+                               h192 = NULL;
+                               r192 = NULL;
+                       } else {
+                               eir_len = 5 + 18 + 18 + 18 + 18;
+                               h192 = rp->hash192;
+                               r192 = rp->rand192;
+                       }
+
+                       h256 = rp->hash256;
+                       r256 = rp->rand256;
+               }
+       }
+
+       mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
+       if (!mgmt_rp)
+               goto done;
+
+       if (status)
+               goto send_rsp;
+
+       eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
+                                 hdev->dev_class, 3);
+
+       if (h192 && r192) {
+               eir_len = eir_append_data(mgmt_rp->eir, eir_len,
+                                         EIR_SSP_HASH_C192, h192, 16);
+               eir_len = eir_append_data(mgmt_rp->eir, eir_len,
+                                         EIR_SSP_RAND_R192, r192, 16);
+       }
+
+       if (h256 && r256) {
+               eir_len = eir_append_data(mgmt_rp->eir, eir_len,
+                                         EIR_SSP_HASH_C256, h256, 16);
+               eir_len = eir_append_data(mgmt_rp->eir, eir_len,
+                                         EIR_SSP_RAND_R256, r256, 16);
+       }
+
+send_rsp:
+       mgmt_rp->type = mgmt_cp->type;
+       mgmt_rp->eir_len = cpu_to_le16(eir_len);
+
+       err = mgmt_cmd_complete(cmd->sk, hdev->id,
+                               MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
+                               mgmt_rp, sizeof(*mgmt_rp) + eir_len);
+       if (err < 0 || status)
+               goto done;
+
+       hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
+
+       err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
+                                mgmt_rp, sizeof(*mgmt_rp) + eir_len,
+                                HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
+done:
+       kfree(mgmt_rp);
+       mgmt_pending_remove(cmd);
+}
+
+static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
+                                 struct mgmt_cp_read_local_oob_ext_data *cp)
+{
+       struct mgmt_pending_cmd *cmd;
+       struct hci_request req;
+       int err;
+
+       cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
+                              cp, sizeof(*cp));
+       if (!cmd)
+               return -ENOMEM;
+
+       hci_req_init(&req, hdev);
+
+       if (bredr_sc_enabled(hdev))
+               hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
+       else
+               hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
+
+       err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
+       if (err < 0) {
+               mgmt_pending_remove(cmd);
+               return err;
+       }
+
+       return 0;
+}
+
 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
                                   void *data, u16 data_len)
 {
@@ -6388,71 +6617,87 @@ static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
 
        BT_DBG("%s", hdev->name);
 
-       if (!hdev_is_powered(hdev))
-               return mgmt_cmd_complete(sk, hdev->id,
-                                        MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
-                                        MGMT_STATUS_NOT_POWERED,
-                                        &cp->type, sizeof(cp->type));
-
-       switch (cp->type) {
-       case BIT(BDADDR_BREDR):
-               status = mgmt_bredr_support(hdev);
-               if (status)
-                       return mgmt_cmd_complete(sk, hdev->id,
-                                                MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
-                                                status, &cp->type,
-                                                sizeof(cp->type));
-               eir_len = 5;
-               break;
-       case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
-               status = mgmt_le_support(hdev);
-               if (status)
-                       return mgmt_cmd_complete(sk, hdev->id,
-                                                MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
-                                                status, &cp->type,
-                                                sizeof(cp->type));
-               eir_len = 9 + 3 + 18 + 18 + 3;
-               break;
-       default:
-               return mgmt_cmd_complete(sk, hdev->id,
-                                        MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
-                                        MGMT_STATUS_INVALID_PARAMS,
-                                        &cp->type, sizeof(cp->type));
+       if (hdev_is_powered(hdev)) {
+               switch (cp->type) {
+               case BIT(BDADDR_BREDR):
+                       status = mgmt_bredr_support(hdev);
+                       if (status)
+                               eir_len = 0;
+                       else
+                               eir_len = 5;
+                       break;
+               case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
+                       status = mgmt_le_support(hdev);
+                       if (status)
+                               eir_len = 0;
+                       else
+                               eir_len = 9 + 3 + 18 + 18 + 3;
+                       break;
+               default:
+                       status = MGMT_STATUS_INVALID_PARAMS;
+                       eir_len = 0;
+                       break;
+               }
+       } else {
+               status = MGMT_STATUS_NOT_POWERED;
+               eir_len = 0;
        }
 
-       hci_dev_lock(hdev);
-
        rp_len = sizeof(*rp) + eir_len;
        rp = kmalloc(rp_len, GFP_ATOMIC);
-       if (!rp) {
-               hci_dev_unlock(hdev);
+       if (!rp)
                return -ENOMEM;
-       }
+
+       if (status)
+               goto complete;
+
+       hci_dev_lock(hdev);
 
        eir_len = 0;
        switch (cp->type) {
        case BIT(BDADDR_BREDR):
-               eir_len = eir_append_data(rp->eir, eir_len, EIR_CLASS_OF_DEV,
-                                         hdev->dev_class, 3);
+               if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
+                       err = read_local_ssp_oob_req(hdev, sk, cp);
+                       hci_dev_unlock(hdev);
+                       if (!err)
+                               goto done;
+
+                       status = MGMT_STATUS_FAILED;
+                       goto complete;
+               } else {
+                       eir_len = eir_append_data(rp->eir, eir_len,
+                                                 EIR_CLASS_OF_DEV,
+                                                 hdev->dev_class, 3);
+               }
                break;
        case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
                if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
                    smp_generate_oob(hdev, hash, rand) < 0) {
                        hci_dev_unlock(hdev);
-                       err = mgmt_cmd_complete(sk, hdev->id,
-                                               MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
-                                               MGMT_STATUS_FAILED,
-                                               &cp->type, sizeof(cp->type));
-                       goto done;
+                       status = MGMT_STATUS_FAILED;
+                       goto complete;
                }
 
+               /* This should return the active RPA, but since the RPA
+                * is only programmed on demand, it is really hard to fill
+                * this in at the moment. For now disallow retrieving
+                * local out-of-band data when privacy is in use.
+                *
+                * Returning the identity address will not help here since
+                * pairing happens before the identity resolving key is
+                * known and thus the connection establishment happens
+                * based on the RPA and not the identity address.
+                */
                if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
-                       memcpy(addr, &hdev->rpa, 6);
-                       addr[6] = 0x01;
-               } else if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
-                          !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
-                          (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
-                           bacmp(&hdev->static_addr, BDADDR_ANY))) {
+                       hci_dev_unlock(hdev);
+                       status = MGMT_STATUS_REJECTED;
+                       goto complete;
+               }
+
+               if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
+                  !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
+                  (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
+                   bacmp(&hdev->static_addr, BDADDR_ANY))) {
                        memcpy(addr, &hdev->static_addr, 6);
                        addr[6] = 0x01;
                } else {
@@ -6491,16 +6736,19 @@ static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
                break;
        }
 
-       rp->type = cp->type;
-       rp->eir_len = cpu_to_le16(eir_len);
-
        hci_dev_unlock(hdev);
 
        hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
 
+       status = MGMT_STATUS_SUCCESS;
+
+complete:
+       rp->type = cp->type;
+       rp->eir_len = cpu_to_le16(eir_len);
+
        err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
-                               MGMT_STATUS_SUCCESS, rp, sizeof(*rp) + eir_len);
-       if (err < 0)
+                               status, rp, sizeof(*rp) + eir_len);
+       if (err < 0 || status)
                goto done;
 
        err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
@@ -7899,43 +8147,6 @@ void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
                           cmd ? cmd->sk : NULL);
 }
 
-void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
-                                      u8 *rand192, u8 *hash256, u8 *rand256,
-                                      u8 status)
-{
-       struct mgmt_pending_cmd *cmd;
-
-       BT_DBG("%s status %u", hdev->name, status);
-
-       cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
-       if (!cmd)
-               return;
-
-       if (status) {
-               mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
-                               mgmt_status(status));
-       } else {
-               struct mgmt_rp_read_local_oob_data rp;
-               size_t rp_size = sizeof(rp);
-
-               memcpy(rp.hash192, hash192, sizeof(rp.hash192));
-               memcpy(rp.rand192, rand192, sizeof(rp.rand192));
-
-               if (bredr_sc_enabled(hdev) && hash256 && rand256) {
-                       memcpy(rp.hash256, hash256, sizeof(rp.hash256));
-                       memcpy(rp.rand256, rand256, sizeof(rp.rand256));
-               } else {
-                       rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
-               }
-
-               mgmt_cmd_complete(cmd->sk, hdev->id,
-                                 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
-                                 &rp, rp_size);
-       }
-
-       mgmt_pending_remove(cmd);
-}
-
 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
 {
        int i;
index 378f4064952cfd0fe954511e76d307a486118ca4..dc688f13e49612cd74decf8853b8c270803942f3 100644 (file)
@@ -21,6 +21,8 @@
    SOFTWARE IS DISCLAIMED.
 */
 
+#include <linux/debugfs.h>
+
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
@@ -154,6 +156,21 @@ static int __init test_ecdh_sample(const u8 priv_a[32], const u8 priv_b[32],
        return 0;
 }
 
+static char test_ecdh_buffer[32];
+
+static ssize_t test_ecdh_read(struct file *file, char __user *user_buf,
+                             size_t count, loff_t *ppos)
+{
+       return simple_read_from_buffer(user_buf, count, ppos, test_ecdh_buffer,
+                                      strlen(test_ecdh_buffer));
+}
+
+static const struct file_operations test_ecdh_fops = {
+       .open           = simple_open,
+       .read           = test_ecdh_read,
+       .llseek         = default_llseek,
+};
+
 static int __init test_ecdh(void)
 {
        ktime_t calltime, delta, rettime;
@@ -165,19 +182,19 @@ static int __init test_ecdh(void)
        err = test_ecdh_sample(priv_a_1, priv_b_1, pub_a_1, pub_b_1, dhkey_1);
        if (err) {
                BT_ERR("ECDH sample 1 failed");
-               return err;
+               goto done;
        }
 
        err = test_ecdh_sample(priv_a_2, priv_b_2, pub_a_2, pub_b_2, dhkey_2);
        if (err) {
                BT_ERR("ECDH sample 2 failed");
-               return err;
+               goto done;
        }
 
        err = test_ecdh_sample(priv_a_3, priv_a_3, pub_a_3, pub_a_3, dhkey_3);
        if (err) {
                BT_ERR("ECDH sample 3 failed");
-               return err;
+               goto done;
        }
 
        rettime = ktime_get();
@@ -186,7 +203,17 @@ static int __init test_ecdh(void)
 
        BT_INFO("ECDH test passed in %llu usecs", duration);
 
-       return 0;
+done:
+       if (!err)
+               snprintf(test_ecdh_buffer, sizeof(test_ecdh_buffer),
+                        "PASS (%llu usecs)\n", duration);
+       else
+               snprintf(test_ecdh_buffer, sizeof(test_ecdh_buffer), "FAIL\n");
+
+       debugfs_create_file("selftest_ecdh", 0444, bt_debugfs, NULL,
+                           &test_ecdh_fops);
+
+       return err;
 }
 
 #else
index 1ec3f66b5a74ec1027efa078409cfbcf88e18c73..1ab3dc9c8f99bf425a2a24403cfe6e54ddbbd550 100644 (file)
@@ -3017,7 +3017,7 @@ static struct sk_buff *smp_alloc_skb_cb(struct l2cap_chan *chan,
                return ERR_PTR(-ENOMEM);
 
        skb->priority = HCI_PRIO_MAX;
-       bt_cb(skb)->chan = chan;
+       bt_cb(skb)->l2cap.chan = chan;
 
        return skb;
 }
@@ -3549,6 +3549,21 @@ static int __init test_h6(struct crypto_hash *tfm_cmac)
        return 0;
 }
 
+static char test_smp_buffer[32];
+
+static ssize_t test_smp_read(struct file *file, char __user *user_buf,
+                            size_t count, loff_t *ppos)
+{
+       return simple_read_from_buffer(user_buf, count, ppos, test_smp_buffer,
+                                      strlen(test_smp_buffer));
+}
+
+static const struct file_operations test_smp_fops = {
+       .open           = simple_open,
+       .read           = test_smp_read,
+       .llseek         = default_llseek,
+};
+
 static int __init run_selftests(struct crypto_blkcipher *tfm_aes,
                                struct crypto_hash *tfm_cmac)
 {
@@ -3561,49 +3576,49 @@ static int __init run_selftests(struct crypto_blkcipher *tfm_aes,
        err = test_ah(tfm_aes);
        if (err) {
                BT_ERR("smp_ah test failed");
-               return err;
+               goto done;
        }
 
        err = test_c1(tfm_aes);
        if (err) {
                BT_ERR("smp_c1 test failed");
-               return err;
+               goto done;
        }
 
        err = test_s1(tfm_aes);
        if (err) {
                BT_ERR("smp_s1 test failed");
-               return err;
+               goto done;
        }
 
        err = test_f4(tfm_cmac);
        if (err) {
                BT_ERR("smp_f4 test failed");
-               return err;
+               goto done;
        }
 
        err = test_f5(tfm_cmac);
        if (err) {
                BT_ERR("smp_f5 test failed");
-               return err;
+               goto done;
        }
 
        err = test_f6(tfm_cmac);
        if (err) {
                BT_ERR("smp_f6 test failed");
-               return err;
+               goto done;
        }
 
        err = test_g2(tfm_cmac);
        if (err) {
                BT_ERR("smp_g2 test failed");
-               return err;
+               goto done;
        }
 
        err = test_h6(tfm_cmac);
        if (err) {
                BT_ERR("smp_h6 test failed");
-               return err;
+               goto done;
        }
 
        rettime = ktime_get();
@@ -3612,7 +3627,17 @@ static int __init run_selftests(struct crypto_blkcipher *tfm_aes,
 
        BT_INFO("SMP test passed in %llu usecs", duration);
 
-       return 0;
+done:
+       if (!err)
+               snprintf(test_smp_buffer, sizeof(test_smp_buffer),
+                        "PASS (%llu usecs)\n", duration);
+       else
+               snprintf(test_smp_buffer, sizeof(test_smp_buffer), "FAIL\n");
+
+       debugfs_create_file("selftest_smp", 0444, bt_debugfs, NULL,
+                           &test_smp_fops);
+
+       return err;
 }
 
 int __init bt_selftest_smp(void)
index 3304a544233174a3d1c7474cb19fffc05483be78..e97572b5d2ccfbce420009f60b30f5439fd1c571 100644 (file)
@@ -35,7 +35,7 @@ static inline int should_deliver(const struct net_bridge_port *p,
                p->state == BR_STATE_FORWARDING;
 }
 
-int br_dev_queue_push_xmit(struct sk_buff *skb)
+int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb)
 {
        if (!is_skb_forwardable(skb->dev, skb)) {
                kfree_skb(skb);
@@ -49,9 +49,10 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
 }
 EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
 
-int br_forward_finish(struct sk_buff *skb)
+int br_forward_finish(struct sock *sk, struct sk_buff *skb)
 {
-       return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
+       return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, sk, skb,
+                      NULL, skb->dev,
                       br_dev_queue_push_xmit);
 
 }
@@ -75,7 +76,8 @@ static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
                return;
        }
 
-       NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
+       NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, NULL, skb,
+               NULL, skb->dev,
                br_forward_finish);
 }
 
@@ -96,7 +98,8 @@ static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
        skb->dev = to->dev;
        skb_forward_csum(skb);
 
-       NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev,
+       NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, NULL, skb,
+               indev, skb->dev,
                br_forward_finish);
 }
 
index 052c5ebbc9472c833df81e28a4895b96ba3f389c..f921a5dce22dadf465b79dd93be733707d5d5d0d 100644 (file)
@@ -55,8 +55,9 @@ static int br_pass_frame_up(struct sk_buff *skb)
        if (!skb)
                return NET_RX_DROP;
 
-       return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
-                      netif_receive_skb);
+       return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, NULL, skb,
+                      indev, NULL,
+                      netif_receive_skb_sk);
 }
 
 static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br,
@@ -119,7 +120,7 @@ static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br,
 }
 
 /* note: already called with rcu_read_lock */
-int br_handle_frame_finish(struct sk_buff *skb)
+int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb)
 {
        const unsigned char *dest = eth_hdr(skb)->h_dest;
        struct net_bridge_port *p = br_port_get_rcu(skb->dev);
@@ -207,7 +208,7 @@ drop:
 EXPORT_SYMBOL_GPL(br_handle_frame_finish);
 
 /* note: already called with rcu_read_lock */
-static int br_handle_local_finish(struct sk_buff *skb)
+static int br_handle_local_finish(struct sock *sk, struct sk_buff *skb)
 {
        struct net_bridge_port *p = br_port_get_rcu(skb->dev);
        u16 vid = 0;
@@ -277,8 +278,8 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
                }
 
                /* Deliver packet to local host only */
-               if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
-                           NULL, br_handle_local_finish)) {
+               if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, NULL, skb,
+                           skb->dev, NULL, br_handle_local_finish)) {
                        return RX_HANDLER_CONSUMED; /* consumed by filter */
                } else {
                        *pskb = skb;
@@ -302,7 +303,8 @@ forward:
                if (ether_addr_equal(p->br->dev->dev_addr, dest))
                        skb->pkt_type = PACKET_HOST;
 
-               NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
+               NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, NULL, skb,
+                       skb->dev, NULL,
                        br_handle_frame_finish);
                break;
        default:
index c465876c7861814ba545cf83783c7ba11bbd91eb..4b6722f8f1790811d2ef4b9b1ae8839628b745c8 100644 (file)
@@ -814,7 +814,8 @@ static void __br_multicast_send_query(struct net_bridge *br,
 
        if (port) {
                skb->dev = port->dev;
-               NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
+               NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, NULL, skb,
+                       NULL, skb->dev,
                        br_dev_queue_push_xmit);
        } else {
                br_multicast_select_own_querier(br, ip, skb);
index f3884a1b942f7ae788dc19ff6131f44ea0c809eb..ab55e2472beb0e44dece07e327f2e0eb8d3f502c 100644 (file)
@@ -111,6 +111,24 @@ static inline __be16 pppoe_proto(const struct sk_buff *skb)
         pppoe_proto(skb) == htons(PPP_IPV6) && \
         brnf_filter_pppoe_tagged)
 
+/* largest possible L2 header, see br_nf_dev_queue_xmit() */
+#define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN)
+
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
+struct brnf_frag_data {
+       char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH];
+       u8 encap_size;
+       u8 size;
+};
+
+static DEFINE_PER_CPU(struct brnf_frag_data, brnf_frag_data_storage);
+#endif
+
+static struct nf_bridge_info *nf_bridge_info_get(const struct sk_buff *skb)
+{
+       return skb->nf_bridge;
+}
+
 static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
 {
        struct net_bridge_port *port;
@@ -189,14 +207,6 @@ static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff *skb)
        skb->network_header += len;
 }
 
-static inline void nf_bridge_save_header(struct sk_buff *skb)
-{
-       int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
-
-       skb_copy_from_linear_data_offset(skb, -header_size,
-                                        skb->nf_bridge->data, header_size);
-}
-
 /* When handing a packet over to the IP layer
  * check whether we have a skb that is in the
  * expected format
@@ -252,23 +262,29 @@ drop:
 
 static void nf_bridge_update_protocol(struct sk_buff *skb)
 {
-       if (skb->nf_bridge->mask & BRNF_8021Q)
+       switch (skb->nf_bridge->orig_proto) {
+       case BRNF_PROTO_8021Q:
                skb->protocol = htons(ETH_P_8021Q);
-       else if (skb->nf_bridge->mask & BRNF_PPPoE)
+               break;
+       case BRNF_PROTO_PPPOE:
                skb->protocol = htons(ETH_P_PPP_SES);
+               break;
+       case BRNF_PROTO_UNCHANGED:
+               break;
+       }
 }
 
 /* PF_BRIDGE/PRE_ROUTING *********************************************/
 /* Undo the changes made for ip6tables PREROUTING and continue the
  * bridge PRE_ROUTING hook. */
-static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
+static int br_nf_pre_routing_finish_ipv6(struct sock *sk, struct sk_buff *skb)
 {
-       struct nf_bridge_info *nf_bridge = skb->nf_bridge;
+       struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
        struct rtable *rt;
 
-       if (nf_bridge->mask & BRNF_PKT_TYPE) {
+       if (nf_bridge->pkt_otherhost) {
                skb->pkt_type = PACKET_OTHERHOST;
-               nf_bridge->mask ^= BRNF_PKT_TYPE;
+               nf_bridge->pkt_otherhost = false;
        }
        nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
 
@@ -282,7 +298,8 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
        skb->dev = nf_bridge->physindev;
        nf_bridge_update_protocol(skb);
        nf_bridge_push_encap_header(skb);
-       NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
+       NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, sk, skb,
+                      skb->dev, NULL,
                       br_handle_frame_finish, 1);
 
        return 0;
@@ -293,9 +310,8 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
  * don't, we use the neighbour framework to find out. In both cases, we make
  * sure that br_handle_frame_finish() is called afterwards.
  */
-static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
+static int br_nf_pre_routing_finish_bridge(struct sock *sk, struct sk_buff *skb)
 {
-       struct nf_bridge_info *nf_bridge = skb->nf_bridge;
        struct neighbour *neigh;
        struct dst_entry *dst;
 
@@ -305,12 +321,13 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
        dst = skb_dst(skb);
        neigh = dst_neigh_lookup_skb(dst, skb);
        if (neigh) {
+               struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
                int ret;
 
                if (neigh->hh.hh_len) {
                        neigh_hh_bridge(&neigh->hh, skb);
                        skb->dev = nf_bridge->physindev;
-                       ret = br_handle_frame_finish(skb);
+                       ret = br_handle_frame_finish(sk, skb);
                } else {
                        /* the neighbour function below overwrites the complete
                         * MAC header, so we save the Ethernet source address and
@@ -318,7 +335,7 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
                         */
                        skb_copy_from_linear_data_offset(skb,
                                                         -(ETH_HLEN-ETH_ALEN),
-                                                        skb->nf_bridge->data,
+                                                        nf_bridge->neigh_header,
                                                         ETH_HLEN-ETH_ALEN);
                        /* tell br_dev_xmit to continue with forwarding */
                        nf_bridge->mask |= BRNF_BRIDGED_DNAT;
@@ -387,11 +404,11 @@ static bool dnat_took_place(const struct sk_buff *skb)
  * device, we proceed as if ip_route_input() succeeded. If it differs from the
  * logical bridge port or if ip_route_output_key() fails we drop the packet.
  */
-static int br_nf_pre_routing_finish(struct sk_buff *skb)
+static int br_nf_pre_routing_finish(struct sock *sk, struct sk_buff *skb)
 {
        struct net_device *dev = skb->dev;
        struct iphdr *iph = ip_hdr(skb);
-       struct nf_bridge_info *nf_bridge = skb->nf_bridge;
+       struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
        struct rtable *rt;
        int err;
        int frag_max_size;
@@ -399,9 +416,9 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb)
        frag_max_size = IPCB(skb)->frag_max_size;
        BR_INPUT_SKB_CB(skb)->frag_max_size = frag_max_size;
 
-       if (nf_bridge->mask & BRNF_PKT_TYPE) {
+       if (nf_bridge->pkt_otherhost) {
                skb->pkt_type = PACKET_OTHERHOST;
-               nf_bridge->mask ^= BRNF_PKT_TYPE;
+               nf_bridge->pkt_otherhost = false;
        }
        nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
        if (dnat_took_place(skb)) {
@@ -440,7 +457,7 @@ bridged_dnat:
                                nf_bridge_push_encap_header(skb);
                                NF_HOOK_THRESH(NFPROTO_BRIDGE,
                                               NF_BR_PRE_ROUTING,
-                                              skb, skb->dev, NULL,
+                                              sk, skb, skb->dev, NULL,
                                               br_nf_pre_routing_finish_bridge,
                                               1);
                                return 0;
@@ -460,7 +477,8 @@ bridged_dnat:
        skb->dev = nf_bridge->physindev;
        nf_bridge_update_protocol(skb);
        nf_bridge_push_encap_header(skb);
-       NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
+       NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, sk, skb,
+                      skb->dev, NULL,
                       br_handle_frame_finish, 1);
 
        return 0;
@@ -483,20 +501,21 @@ static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct
 /* Some common code for IPv4/IPv6 */
 static struct net_device *setup_pre_routing(struct sk_buff *skb)
 {
-       struct nf_bridge_info *nf_bridge = skb->nf_bridge;
+       struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
 
        if (skb->pkt_type == PACKET_OTHERHOST) {
                skb->pkt_type = PACKET_HOST;
-               nf_bridge->mask |= BRNF_PKT_TYPE;
+               nf_bridge->pkt_otherhost = true;
        }
 
        nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING;
        nf_bridge->physindev = skb->dev;
        skb->dev = brnf_get_logical_dev(skb, skb->dev);
+
        if (skb->protocol == htons(ETH_P_8021Q))
-               nf_bridge->mask |= BRNF_8021Q;
+               nf_bridge->orig_proto = BRNF_PROTO_8021Q;
        else if (skb->protocol == htons(ETH_P_PPP_SES))
-               nf_bridge->mask |= BRNF_PPPoE;
+               nf_bridge->orig_proto = BRNF_PROTO_PPPOE;
 
        /* Must drop socket now because of tproxy. */
        skb_orphan(skb);
@@ -562,9 +581,7 @@ bad:
  * to ip6tables, which doesn't support NAT, so things are fairly simple. */
 static unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops,
                                           struct sk_buff *skb,
-                                          const struct net_device *in,
-                                          const struct net_device *out,
-                                          int (*okfn)(struct sk_buff *))
+                                          const struct nf_hook_state *state)
 {
        const struct ipv6hdr *hdr;
        u32 pkt_len;
@@ -598,7 +615,8 @@ static unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops,
                return NF_DROP;
 
        skb->protocol = htons(ETH_P_IPV6);
-       NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
+       NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, state->sk, skb,
+               skb->dev, NULL,
                br_nf_pre_routing_finish_ipv6);
 
        return NF_STOLEN;
@@ -612,9 +630,7 @@ static unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops,
  * address to be able to detect DNAT afterwards. */
 static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
                                      struct sk_buff *skb,
-                                     const struct net_device *in,
-                                     const struct net_device *out,
-                                     int (*okfn)(struct sk_buff *))
+                                     const struct nf_hook_state *state)
 {
        struct net_bridge_port *p;
        struct net_bridge *br;
@@ -623,7 +639,7 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
        if (unlikely(!pskb_may_pull(skb, len)))
                return NF_DROP;
 
-       p = br_port_get_rcu(in);
+       p = br_port_get_rcu(state->in);
        if (p == NULL)
                return NF_DROP;
        br = p->br;
@@ -633,7 +649,7 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
                        return NF_ACCEPT;
 
                nf_bridge_pull_encap_header_rcsum(skb);
-               return br_nf_pre_routing_ipv6(ops, skb, in, out, okfn);
+               return br_nf_pre_routing_ipv6(ops, skb, state);
        }
 
        if (!brnf_call_iptables && !br->nf_call_iptables)
@@ -655,7 +671,8 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
 
        skb->protocol = htons(ETH_P_IP);
 
-       NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
+       NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->sk, skb,
+               skb->dev, NULL,
                br_nf_pre_routing_finish);
 
        return NF_STOLEN;
@@ -671,25 +688,30 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
  * prevent this from happening. */
 static unsigned int br_nf_local_in(const struct nf_hook_ops *ops,
                                   struct sk_buff *skb,
-                                  const struct net_device *in,
-                                  const struct net_device *out,
-                                  int (*okfn)(struct sk_buff *))
+                                  const struct nf_hook_state *state)
 {
        br_drop_fake_rtable(skb);
        return NF_ACCEPT;
 }
 
 /* PF_BRIDGE/FORWARD *************************************************/
-static int br_nf_forward_finish(struct sk_buff *skb)
+static int br_nf_forward_finish(struct sock *sk, struct sk_buff *skb)
 {
-       struct nf_bridge_info *nf_bridge = skb->nf_bridge;
+       struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
        struct net_device *in;
 
        if (!IS_ARP(skb) && !IS_VLAN_ARP(skb)) {
+               int frag_max_size;
+
+               if (skb->protocol == htons(ETH_P_IP)) {
+                       frag_max_size = IPCB(skb)->frag_max_size;
+                       BR_INPUT_SKB_CB(skb)->frag_max_size = frag_max_size;
+               }
+
                in = nf_bridge->physindev;
-               if (nf_bridge->mask & BRNF_PKT_TYPE) {
+               if (nf_bridge->pkt_otherhost) {
                        skb->pkt_type = PACKET_OTHERHOST;
-                       nf_bridge->mask ^= BRNF_PKT_TYPE;
+                       nf_bridge->pkt_otherhost = false;
                }
                nf_bridge_update_protocol(skb);
        } else {
@@ -697,8 +719,8 @@ static int br_nf_forward_finish(struct sk_buff *skb)
        }
        nf_bridge_push_encap_header(skb);
 
-       NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, in,
-                      skb->dev, br_forward_finish, 1);
+       NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_FORWARD, sk, skb,
+                      in, skb->dev, br_forward_finish, 1);
        return 0;
 }
 
@@ -710,9 +732,7 @@ static int br_nf_forward_finish(struct sk_buff *skb)
  * bridge ports. */
 static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
                                     struct sk_buff *skb,
-                                    const struct net_device *in,
-                                    const struct net_device *out,
-                                    int (*okfn)(struct sk_buff *))
+                                    const struct nf_hook_state *state)
 {
        struct nf_bridge_info *nf_bridge;
        struct net_device *parent;
@@ -726,7 +746,11 @@ static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
        if (!nf_bridge_unshare(skb))
                return NF_DROP;
 
-       parent = bridge_parent(out);
+       nf_bridge = nf_bridge_info_get(skb);
+       if (!nf_bridge)
+               return NF_DROP;
+
+       parent = bridge_parent(state->out);
        if (!parent)
                return NF_DROP;
 
@@ -739,14 +763,19 @@ static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
 
        nf_bridge_pull_encap_header(skb);
 
-       nf_bridge = skb->nf_bridge;
        if (skb->pkt_type == PACKET_OTHERHOST) {
                skb->pkt_type = PACKET_HOST;
-               nf_bridge->mask |= BRNF_PKT_TYPE;
+               nf_bridge->pkt_otherhost = true;
        }
 
-       if (pf == NFPROTO_IPV4 && br_parse_ip_options(skb))
-               return NF_DROP;
+       if (pf == NFPROTO_IPV4) {
+               int frag_max = BR_INPUT_SKB_CB(skb)->frag_max_size;
+
+               if (br_parse_ip_options(skb))
+                       return NF_DROP;
+
+               IPCB(skb)->frag_max_size = frag_max;
+       }
 
        nf_bridge->physoutdev = skb->dev;
        if (pf == NFPROTO_IPV4)
@@ -754,23 +783,22 @@ static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
        else
                skb->protocol = htons(ETH_P_IPV6);
 
-       NF_HOOK(pf, NF_INET_FORWARD, skb, brnf_get_logical_dev(skb, in), parent,
-               br_nf_forward_finish);
+       NF_HOOK(pf, NF_INET_FORWARD, NULL, skb,
+               brnf_get_logical_dev(skb, state->in),
+               parent, br_nf_forward_finish);
 
        return NF_STOLEN;
 }
 
 static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
                                      struct sk_buff *skb,
-                                     const struct net_device *in,
-                                     const struct net_device *out,
-                                     int (*okfn)(struct sk_buff *))
+                                     const struct nf_hook_state *state)
 {
        struct net_bridge_port *p;
        struct net_bridge *br;
        struct net_device **d = (struct net_device **)(skb->cb);
 
-       p = br_port_get_rcu(out);
+       p = br_port_get_rcu(state->out);
        if (p == NULL)
                return NF_ACCEPT;
        br = p->br;
@@ -789,81 +817,84 @@ static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
                        nf_bridge_push_encap_header(skb);
                return NF_ACCEPT;
        }
-       *d = (struct net_device *)in;
-       NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, skb, (struct net_device *)in,
-               (struct net_device *)out, br_nf_forward_finish);
+       *d = state->in;
+       NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, state->sk, skb,
+               state->in, state->out, br_nf_forward_finish);
 
        return NF_STOLEN;
 }
 
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
-static bool nf_bridge_copy_header(struct sk_buff *skb)
+static int br_nf_push_frag_xmit(struct sock *sk, struct sk_buff *skb)
 {
+       struct brnf_frag_data *data;
        int err;
-       unsigned int header_size;
-
-       nf_bridge_update_protocol(skb);
-       header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
-       err = skb_cow_head(skb, header_size);
-       if (err)
-               return false;
 
-       skb_copy_to_linear_data_offset(skb, -header_size,
-                                      skb->nf_bridge->data, header_size);
-       __skb_push(skb, nf_bridge_encap_header_len(skb));
-       return true;
-}
+       data = this_cpu_ptr(&brnf_frag_data_storage);
+       err = skb_cow_head(skb, data->size);
 
-static int br_nf_push_frag_xmit(struct sk_buff *skb)
-{
-       if (!nf_bridge_copy_header(skb)) {
+       if (err) {
                kfree_skb(skb);
                return 0;
        }
 
-       return br_dev_queue_push_xmit(skb);
+       skb_copy_to_linear_data_offset(skb, -data->size, data->mac, data->size);
+       __skb_push(skb, data->encap_size);
+
+       return br_dev_queue_push_xmit(sk, skb);
 }
 
-static int br_nf_dev_queue_xmit(struct sk_buff *skb)
+static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
 {
        int ret;
        int frag_max_size;
        unsigned int mtu_reserved;
 
        if (skb_is_gso(skb) || skb->protocol != htons(ETH_P_IP))
-               return br_dev_queue_push_xmit(skb);
+               return br_dev_queue_push_xmit(sk, skb);
 
        mtu_reserved = nf_bridge_mtu_reduction(skb);
        /* This is wrong! We should preserve the original fragment
         * boundaries by preserving frag_list rather than refragmenting.
         */
        if (skb->len + mtu_reserved > skb->dev->mtu) {
+               struct brnf_frag_data *data;
+
                frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
                if (br_parse_ip_options(skb))
                        /* Drop invalid packet */
                        return NF_DROP;
                IPCB(skb)->frag_max_size = frag_max_size;
-               ret = ip_fragment(skb, br_nf_push_frag_xmit);
-       } else
-               ret = br_dev_queue_push_xmit(skb);
+
+               nf_bridge_update_protocol(skb);
+
+               data = this_cpu_ptr(&brnf_frag_data_storage);
+               data->encap_size = nf_bridge_encap_header_len(skb);
+               data->size = ETH_HLEN + data->encap_size;
+
+               skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
+                                                data->size);
+
+               ret = ip_fragment(sk, skb, br_nf_push_frag_xmit);
+       } else {
+               ret = br_dev_queue_push_xmit(sk, skb);
+       }
 
        return ret;
 }
 #else
-static int br_nf_dev_queue_xmit(struct sk_buff *skb)
+static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
 {
-        return br_dev_queue_push_xmit(skb);
+        return br_dev_queue_push_xmit(sk, skb);
 }
 #endif
 
 /* PF_BRIDGE/POST_ROUTING ********************************************/
 static unsigned int br_nf_post_routing(const struct nf_hook_ops *ops,
                                       struct sk_buff *skb,
-                                      const struct net_device *in,
-                                      const struct net_device *out,
-                                      int (*okfn)(struct sk_buff *))
+                                      const struct nf_hook_state *state)
 {
-       struct nf_bridge_info *nf_bridge = skb->nf_bridge;
+       struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
        struct net_device *realoutdev = bridge_parent(skb->dev);
        u_int8_t pf;
 
@@ -889,17 +920,17 @@ static unsigned int br_nf_post_routing(const struct nf_hook_ops *ops,
         * about the value of skb->pkt_type. */
        if (skb->pkt_type == PACKET_OTHERHOST) {
                skb->pkt_type = PACKET_HOST;
-               nf_bridge->mask |= BRNF_PKT_TYPE;
+               nf_bridge->pkt_otherhost = true;
        }
 
        nf_bridge_pull_encap_header(skb);
-       nf_bridge_save_header(skb);
        if (pf == NFPROTO_IPV4)
                skb->protocol = htons(ETH_P_IP);
        else
                skb->protocol = htons(ETH_P_IPV6);
 
-       NF_HOOK(pf, NF_INET_POST_ROUTING, skb, NULL, realoutdev,
+       NF_HOOK(pf, NF_INET_POST_ROUTING, state->sk, skb,
+               NULL, realoutdev,
                br_nf_dev_queue_xmit);
 
        return NF_STOLEN;
@@ -910,9 +941,7 @@ static unsigned int br_nf_post_routing(const struct nf_hook_ops *ops,
  * for the second time. */
 static unsigned int ip_sabotage_in(const struct nf_hook_ops *ops,
                                   struct sk_buff *skb,
-                                  const struct net_device *in,
-                                  const struct net_device *out,
-                                  int (*okfn)(struct sk_buff *))
+                                  const struct nf_hook_state *state)
 {
        if (skb->nf_bridge &&
            !(skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) {
@@ -933,15 +962,18 @@ static unsigned int ip_sabotage_in(const struct nf_hook_ops *ops,
  */
 static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
 {
-       struct nf_bridge_info *nf_bridge = skb->nf_bridge;
+       struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
 
        skb_pull(skb, ETH_HLEN);
        nf_bridge->mask &= ~BRNF_BRIDGED_DNAT;
 
-       skb_copy_to_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN),
-                                      skb->nf_bridge->data, ETH_HLEN-ETH_ALEN);
+       BUILD_BUG_ON(sizeof(nf_bridge->neigh_header) != (ETH_HLEN - ETH_ALEN));
+
+       skb_copy_to_linear_data_offset(skb, -(ETH_HLEN - ETH_ALEN),
+                                      nf_bridge->neigh_header,
+                                      ETH_HLEN - ETH_ALEN);
        skb->dev = nf_bridge->physindev;
-       br_handle_frame_finish(skb);
+       br_handle_frame_finish(NULL, skb);
 }
 
 static int br_nf_dev_xmit(struct sk_buff *skb)
index e1115a224a9528f8d6884aedcdc846e5ed013183..0e4ddb81610d90ff51a45835424cef547bba73bf 100644 (file)
@@ -305,8 +305,8 @@ static int br_fill_ifinfo(struct sk_buff *skb,
            nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
            (dev->addr_len &&
             nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
-           (dev->ifindex != dev->iflink &&
-            nla_put_u32(skb, IFLA_LINK, dev->iflink)))
+           (dev->ifindex != dev_get_iflink(dev) &&
+            nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
                goto nla_put_failure;
 
        if (event == RTM_NEWLINK && port) {
index b46fa0c5b8ece865017e23b29e18047f239edbf5..6ca0251cb478bf3147501b325a381a4081dd5149 100644 (file)
@@ -410,10 +410,10 @@ int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
 
 /* br_forward.c */
 void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb);
-int br_dev_queue_push_xmit(struct sk_buff *skb);
+int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb);
 void br_forward(const struct net_bridge_port *to,
                struct sk_buff *skb, struct sk_buff *skb0);
-int br_forward_finish(struct sk_buff *skb);
+int br_forward_finish(struct sock *sk, struct sk_buff *skb);
 void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast);
 void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
                      struct sk_buff *skb2, bool unicast);
@@ -431,7 +431,7 @@ void br_port_flags_change(struct net_bridge_port *port, unsigned long mask);
 void br_manage_promisc(struct net_bridge *br);
 
 /* br_input.c */
-int br_handle_frame_finish(struct sk_buff *skb);
+int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb);
 rx_handler_result_t br_handle_frame(struct sk_buff **pskb);
 
 static inline bool br_rx_handler_check_rcu(const struct net_device *dev)
index bdb459d21ad8e2d5191023c5b096de39b6a78c90..534fc4cd263ef21dd517402efde9585e6f720f83 100644 (file)
@@ -54,8 +54,9 @@ static void br_send_bpdu(struct net_bridge_port *p,
 
        skb_reset_mac_header(skb);
 
-       NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
-               dev_queue_xmit);
+       NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, NULL, skb,
+               NULL, skb->dev,
+               dev_queue_xmit_sk);
 }
 
 static inline void br_set_ticks(unsigned char *dest, int j)
index ce205aabf9c5333e2ea5286ef93a4d5a1d977cd9..8a3f63b2e8073d8081df5fbaac3bf63c348c0447 100644 (file)
@@ -58,20 +58,18 @@ static const struct ebt_table frame_filter = {
 
 static unsigned int
 ebt_in_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
-           const struct net_device *in, const struct net_device *out,
-           int (*okfn)(struct sk_buff *))
+           const struct nf_hook_state *state)
 {
-       return ebt_do_table(ops->hooknum, skb, in, out,
-                           dev_net(in)->xt.frame_filter);
+       return ebt_do_table(ops->hooknum, skb, state->in, state->out,
+                           dev_net(state->in)->xt.frame_filter);
 }
 
 static unsigned int
 ebt_out_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
-            const struct net_device *in, const struct net_device *out,
-            int (*okfn)(struct sk_buff *))
+            const struct nf_hook_state *state)
 {
-       return ebt_do_table(ops->hooknum, skb, in, out,
-                           dev_net(out)->xt.frame_filter);
+       return ebt_do_table(ops->hooknum, skb, state->in, state->out,
+                           dev_net(state->out)->xt.frame_filter);
 }
 
 static struct nf_hook_ops ebt_ops_filter[] __read_mostly = {
index a0ac2984fb6c1e1864efdb4bb05150acbf57ba16..c5ef5b1ab6786814830983d76ef46c6fd0051f51 100644 (file)
@@ -58,20 +58,18 @@ static struct ebt_table frame_nat = {
 
 static unsigned int
 ebt_nat_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
-          const struct net_device *in, const struct net_device *out,
-          int (*okfn)(struct sk_buff *))
+          const struct nf_hook_state *state)
 {
-       return ebt_do_table(ops->hooknum, skb, in, out,
-                           dev_net(in)->xt.frame_nat);
+       return ebt_do_table(ops->hooknum, skb, state->in, state->out,
+                           dev_net(state->in)->xt.frame_nat);
 }
 
 static unsigned int
 ebt_nat_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
-           const struct net_device *in, const struct net_device *out,
-           int (*okfn)(struct sk_buff *))
+           const struct nf_hook_state *state)
 {
-       return ebt_do_table(ops->hooknum, skb, in, out,
-                           dev_net(out)->xt.frame_nat);
+       return ebt_do_table(ops->hooknum, skb, state->in, state->out,
+                           dev_net(state->out)->xt.frame_nat);
 }
 
 static struct nf_hook_ops ebt_ops_nat[] __read_mostly = {
index 19473a9371b8a65ed6b1e8727a5c2b321862886b..a343e62442b1304eca4e23abeb2a3df92283552e 100644 (file)
@@ -67,47 +67,43 @@ EXPORT_SYMBOL_GPL(nft_bridge_ip6hdr_validate);
 static inline void nft_bridge_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
                                               const struct nf_hook_ops *ops,
                                               struct sk_buff *skb,
-                                              const struct net_device *in,
-                                              const struct net_device *out)
+                                              const struct nf_hook_state *state)
 {
        if (nft_bridge_iphdr_validate(skb))
-               nft_set_pktinfo_ipv4(pkt, ops, skb, in, out);
+               nft_set_pktinfo_ipv4(pkt, ops, skb, state);
        else
-               nft_set_pktinfo(pkt, ops, skb, in, out);
+               nft_set_pktinfo(pkt, ops, skb, state);
 }
 
 static inline void nft_bridge_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
-                                             const struct nf_hook_ops *ops,
-                                             struct sk_buff *skb,
-                                             const struct net_device *in,
-                                             const struct net_device *out)
+                                              const struct nf_hook_ops *ops,
+                                              struct sk_buff *skb,
+                                              const struct nf_hook_state *state)
 {
 #if IS_ENABLED(CONFIG_IPV6)
        if (nft_bridge_ip6hdr_validate(skb) &&
-           nft_set_pktinfo_ipv6(pkt, ops, skb, in, out) == 0)
+           nft_set_pktinfo_ipv6(pkt, ops, skb, state) == 0)
                return;
 #endif
-       nft_set_pktinfo(pkt, ops, skb, in, out);
+       nft_set_pktinfo(pkt, ops, skb, state);
 }
 
 static unsigned int
 nft_do_chain_bridge(const struct nf_hook_ops *ops,
                    struct sk_buff *skb,
-                   const struct net_device *in,
-                   const struct net_device *out,
-                   int (*okfn)(struct sk_buff *))
+                   const struct nf_hook_state *state)
 {
        struct nft_pktinfo pkt;
 
        switch (eth_hdr(skb)->h_proto) {
        case htons(ETH_P_IP):
-               nft_bridge_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+               nft_bridge_set_pktinfo_ipv4(&pkt, ops, skb, state);
                break;
        case htons(ETH_P_IPV6):
-               nft_bridge_set_pktinfo_ipv6(&pkt, ops, skb, in, out);
+               nft_bridge_set_pktinfo_ipv6(&pkt, ops, skb, state);
                break;
        default:
-               nft_set_pktinfo(&pkt, ops, skb, in, out);
+               nft_set_pktinfo(&pkt, ops, skb, state);
                break;
        }
 
index 54a2fdf0f4574a4db6ba23193a511ffcf09293da..ae8141f409d9dc24d60d603133cc463e2564f895 100644 (file)
@@ -371,6 +371,8 @@ static int nft_reject_bridge_dump(struct sk_buff *skb,
                if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
                        goto nla_put_failure;
                break;
+       default:
+               break;
        }
 
        return 0;
index 63ffdb0f3a23fa6bea6b9cb65bd7a63ec0953d84..31b9748cbb4ec6c1caa9ddc9e3a80609077a81eb 100644 (file)
@@ -74,6 +74,12 @@ MODULE_ALIAS("can-proto-1");
  * storing the single filter in dfilter, to avoid using dynamic memory.
  */
 
+struct uniqframe {
+       ktime_t tstamp;
+       const struct sk_buff *skb;
+       unsigned int join_rx_count;
+};
+
 struct raw_sock {
        struct sock sk;
        int bound;
@@ -82,10 +88,12 @@ struct raw_sock {
        int loopback;
        int recv_own_msgs;
        int fd_frames;
+       int join_filters;
        int count;                 /* number of active filters */
        struct can_filter dfilter; /* default/single filter */
        struct can_filter *filter; /* pointer to filter(s) */
        can_err_mask_t err_mask;
+       struct uniqframe __percpu *uniq;
 };
 
 /*
@@ -123,6 +131,26 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
        if (!ro->fd_frames && oskb->len != CAN_MTU)
                return;
 
+       /* eliminate multiple filter matches for the same skb */
+       if (this_cpu_ptr(ro->uniq)->skb == oskb &&
+           ktime_equal(this_cpu_ptr(ro->uniq)->tstamp, oskb->tstamp)) {
+               if (ro->join_filters) {
+                       this_cpu_inc(ro->uniq->join_rx_count);
+                       /* drop frame until all enabled filters matched */
+                       if (this_cpu_ptr(ro->uniq)->join_rx_count < ro->count)
+                               return;
+               } else {
+                       return;
+               }
+       } else {
+               this_cpu_ptr(ro->uniq)->skb = oskb;
+               this_cpu_ptr(ro->uniq)->tstamp = oskb->tstamp;
+               this_cpu_ptr(ro->uniq)->join_rx_count = 1;
+               /* drop first frame to check all enabled filters? */
+               if (ro->join_filters && ro->count > 1)
+                       return;
+       }
+
        /* clone the given skb to be able to enqueue it into the rcv queue */
        skb = skb_clone(oskb, GFP_ATOMIC);
        if (!skb)
@@ -296,6 +324,12 @@ static int raw_init(struct sock *sk)
        ro->loopback         = 1;
        ro->recv_own_msgs    = 0;
        ro->fd_frames        = 0;
+       ro->join_filters     = 0;
+
+       /* alloc_percpu provides zero'ed memory */
+       ro->uniq = alloc_percpu(struct uniqframe);
+       if (unlikely(!ro->uniq))
+               return -ENOMEM;
 
        /* set notifier */
        ro->notifier.notifier_call = raw_notifier;
@@ -339,6 +373,7 @@ static int raw_release(struct socket *sock)
        ro->ifindex = 0;
        ro->bound   = 0;
        ro->count   = 0;
+       free_percpu(ro->uniq);
 
        sock_orphan(sk);
        sock->sk = NULL;
@@ -583,6 +618,15 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
 
                break;
 
+       case CAN_RAW_JOIN_FILTERS:
+               if (optlen != sizeof(ro->join_filters))
+                       return -EINVAL;
+
+               if (copy_from_user(&ro->join_filters, optval, optlen))
+                       return -EFAULT;
+
+               break;
+
        default:
                return -ENOPROTOOPT;
        }
@@ -647,6 +691,12 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
                val = &ro->fd_frames;
                break;
 
+       case CAN_RAW_JOIN_FILTERS:
+               if (len > sizeof(int))
+                       len = sizeof(int);
+               val = &ro->join_filters;
+               break;
+
        default:
                return -ENOPROTOOPT;
        }
index 65492b0354c0cb66f7897dc4af73971977336b10..b2775f06c7102a00afbd170daae05468d660b5b1 100644 (file)
@@ -659,6 +659,27 @@ __setup("netdev=", netdev_boot_setup);
 
 *******************************************************************************/
 
+/**
+ *     dev_get_iflink  - get 'iflink' value of a interface
+ *     @dev: targeted interface
+ *
+ *     Indicates the ifindex the interface is linked to.
+ *     Physical interfaces have the same 'ifindex' and 'iflink' values.
+ */
+
+int dev_get_iflink(const struct net_device *dev)
+{
+       if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
+               return dev->netdev_ops->ndo_get_iflink(dev);
+
+       /* If dev->rtnl_link_ops is set, it's a virtual interface. */
+       if (dev->rtnl_link_ops)
+               return 0;
+
+       return dev->ifindex;
+}
+EXPORT_SYMBOL(dev_get_iflink);
+
 /**
  *     __dev_get_by_name       - find a device by its name
  *     @net: the applicable net namespace
@@ -2849,14 +2870,16 @@ static void skb_update_prio(struct sk_buff *skb)
 #define skb_update_prio(skb)
 #endif
 
-static DEFINE_PER_CPU(int, xmit_recursion);
+DEFINE_PER_CPU(int, xmit_recursion);
+EXPORT_SYMBOL(xmit_recursion);
+
 #define RECURSION_LIMIT 10
 
 /**
  *     dev_loopback_xmit - loop back @skb
  *     @skb: buffer to transmit
  */
-int dev_loopback_xmit(struct sk_buff *skb)
+int dev_loopback_xmit(struct sock *sk, struct sk_buff *skb)
 {
        skb_reset_mac_header(skb);
        __skb_pull(skb, skb_network_offset(skb));
@@ -2994,11 +3017,11 @@ out:
        return rc;
 }
 
-int dev_queue_xmit(struct sk_buff *skb)
+int dev_queue_xmit_sk(struct sock *sk, struct sk_buff *skb)
 {
        return __dev_queue_xmit(skb, NULL);
 }
-EXPORT_SYMBOL(dev_queue_xmit);
+EXPORT_SYMBOL(dev_queue_xmit_sk);
 
 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
 {
@@ -3830,13 +3853,13 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
  *     NET_RX_SUCCESS: no congestion
  *     NET_RX_DROP: packet was dropped
  */
-int netif_receive_skb(struct sk_buff *skb)
+int netif_receive_skb_sk(struct sock *sk, struct sk_buff *skb)
 {
        trace_netif_receive_skb_entry(skb);
 
        return netif_receive_skb_internal(skb);
 }
-EXPORT_SYMBOL(netif_receive_skb);
+EXPORT_SYMBOL(netif_receive_skb_sk);
 
 /* Network device is going away, flush any packets still pending
  * Called with irqs disabled.
@@ -6314,8 +6337,6 @@ int register_netdevice(struct net_device *dev)
        spin_lock_init(&dev->addr_list_lock);
        netdev_set_addr_lockdep_class(dev);
 
-       dev->iflink = -1;
-
        ret = dev_get_valid_name(net, dev, dev->name);
        if (ret < 0)
                goto out;
@@ -6345,9 +6366,6 @@ int register_netdevice(struct net_device *dev)
        else if (__dev_get_by_index(net, dev->ifindex))
                goto err_uninit;
 
-       if (dev->iflink == -1)
-               dev->iflink = dev->ifindex;
-
        /* Transfer changeable features to wanted_features and enable
         * software offloads (GSO and GRO).
         */
@@ -7060,12 +7078,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
        dev_net_set(dev, net);
 
        /* If there is an ifindex conflict assign a new one */
-       if (__dev_get_by_index(net, dev->ifindex)) {
-               int iflink = (dev->iflink == dev->ifindex);
+       if (__dev_get_by_index(net, dev->ifindex))
                dev->ifindex = dev_new_index(net);
-               if (iflink)
-                       dev->iflink = dev->ifindex;
-       }
 
        /* Send a netdev-add uevent to the new namespace */
        kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
index 68ea6950cad1f646906cf45ba78819447a8618eb..9a12668f7d62720c6ca18f09d13c45ea3e2ca2b2 100644 (file)
@@ -165,9 +165,9 @@ void fib_rules_unregister(struct fib_rules_ops *ops)
 
        spin_lock(&net->rules_mod_lock);
        list_del_rcu(&ops->list);
-       fib_rules_cleanup_ops(ops);
        spin_unlock(&net->rules_mod_lock);
 
+       fib_rules_cleanup_ops(ops);
        kfree_rcu(ops, rcu);
 }
 EXPORT_SYMBOL_GPL(fib_rules_unregister);
index 444a07e4f68d7eca3e1c324ae91991d32779ae4d..b669e75d2b3624fb54935fc41b6d5df3d53aabe4 100644 (file)
@@ -1175,7 +1175,9 @@ int sk_attach_bpf(u32 ufd, struct sock *sk)
        return 0;
 }
 
-static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+#define BPF_RECOMPUTE_CSUM(flags)      ((flags) & 1)
+
+static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
 {
        struct sk_buff *skb = (struct sk_buff *) (long) r1;
        unsigned int offset = (unsigned int) r2;
@@ -1192,7 +1194,7 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
         *
         * so check for invalid 'offset' and too large 'len'
         */
-       if (offset > 0xffff || len > sizeof(buf))
+       if (unlikely(offset > 0xffff || len > sizeof(buf)))
                return -EFAULT;
 
        if (skb_cloned(skb) && !skb_clone_writable(skb, offset + len))
@@ -1202,7 +1204,8 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
        if (unlikely(!ptr))
                return -EFAULT;
 
-       skb_postpull_rcsum(skb, ptr, len);
+       if (BPF_RECOMPUTE_CSUM(flags))
+               skb_postpull_rcsum(skb, ptr, len);
 
        memcpy(ptr, from, len);
 
@@ -1210,7 +1213,7 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
                /* skb_store_bits cannot return -EFAULT here */
                skb_store_bits(skb, offset, ptr, len);
 
-       if (skb->ip_summed == CHECKSUM_COMPLETE)
+       if (BPF_RECOMPUTE_CSUM(flags) && skb->ip_summed == CHECKSUM_COMPLETE)
                skb->csum = csum_add(skb->csum, csum_partial(ptr, len, 0));
        return 0;
 }
@@ -1223,6 +1226,99 @@ const struct bpf_func_proto bpf_skb_store_bytes_proto = {
        .arg2_type      = ARG_ANYTHING,
        .arg3_type      = ARG_PTR_TO_STACK,
        .arg4_type      = ARG_CONST_STACK_SIZE,
+       .arg5_type      = ARG_ANYTHING,
+};
+
+#define BPF_HEADER_FIELD_SIZE(flags)   ((flags) & 0x0f)
+#define BPF_IS_PSEUDO_HEADER(flags)    ((flags) & 0x10)
+
+static u64 bpf_l3_csum_replace(u64 r1, u64 offset, u64 from, u64 to, u64 flags)
+{
+       struct sk_buff *skb = (struct sk_buff *) (long) r1;
+       __sum16 sum, *ptr;
+
+       if (unlikely(offset > 0xffff))
+               return -EFAULT;
+
+       if (skb_cloned(skb) && !skb_clone_writable(skb, offset + sizeof(sum)))
+               return -EFAULT;
+
+       ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
+       if (unlikely(!ptr))
+               return -EFAULT;
+
+       switch (BPF_HEADER_FIELD_SIZE(flags)) {
+       case 2:
+               csum_replace2(ptr, from, to);
+               break;
+       case 4:
+               csum_replace4(ptr, from, to);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (ptr == &sum)
+               /* skb_store_bits guaranteed to not return -EFAULT here */
+               skb_store_bits(skb, offset, ptr, sizeof(sum));
+
+       return 0;
+}
+
+const struct bpf_func_proto bpf_l3_csum_replace_proto = {
+       .func           = bpf_l3_csum_replace,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_ANYTHING,
+       .arg3_type      = ARG_ANYTHING,
+       .arg4_type      = ARG_ANYTHING,
+       .arg5_type      = ARG_ANYTHING,
+};
+
+static u64 bpf_l4_csum_replace(u64 r1, u64 offset, u64 from, u64 to, u64 flags)
+{
+       struct sk_buff *skb = (struct sk_buff *) (long) r1;
+       u32 is_pseudo = BPF_IS_PSEUDO_HEADER(flags);
+       __sum16 sum, *ptr;
+
+       if (unlikely(offset > 0xffff))
+               return -EFAULT;
+
+       if (skb_cloned(skb) && !skb_clone_writable(skb, offset + sizeof(sum)))
+               return -EFAULT;
+
+       ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
+       if (unlikely(!ptr))
+               return -EFAULT;
+
+       switch (BPF_HEADER_FIELD_SIZE(flags)) {
+       case 2:
+               inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);
+               break;
+       case 4:
+               inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (ptr == &sum)
+               /* skb_store_bits guaranteed to not return -EFAULT here */
+               skb_store_bits(skb, offset, ptr, sizeof(sum));
+
+       return 0;
+}
+
+const struct bpf_func_proto bpf_l4_csum_replace_proto = {
+       .func           = bpf_l4_csum_replace,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_ANYTHING,
+       .arg3_type      = ARG_ANYTHING,
+       .arg4_type      = ARG_ANYTHING,
+       .arg5_type      = ARG_ANYTHING,
 };
 
 static const struct bpf_func_proto *
@@ -1250,6 +1346,10 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
        switch (func_id) {
        case BPF_FUNC_skb_store_bytes:
                return &bpf_skb_store_bytes_proto;
+       case BPF_FUNC_l3_csum_replace:
+               return &bpf_l3_csum_replace_proto;
+       case BPF_FUNC_l4_csum_replace:
+               return &bpf_l4_csum_replace_proto;
        default:
                return sk_filter_func_proto(func_id);
        }
@@ -1304,6 +1404,13 @@ static u32 sk_filter_convert_ctx_access(int dst_reg, int src_reg, int ctx_off,
                                      offsetof(struct sk_buff, vlan_proto));
                break;
 
+       case offsetof(struct __sk_buff, priority):
+               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, priority) != 4);
+
+               *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+                                     offsetof(struct sk_buff, priority));
+               break;
+
        case offsetof(struct __sk_buff, mark):
                return convert_skb_access(SKF_AD_MARK, dst_reg, src_reg, insn);
 
index 49a9e3e06c085dbcb545e766c96186fba2dac45a..982861607f883e15f8c81921b72330985349bef9 100644 (file)
@@ -40,7 +40,7 @@ static DEFINE_SPINLOCK(lweventlist_lock);
 static unsigned char default_operstate(const struct net_device *dev)
 {
        if (!netif_carrier_ok(dev))
-               return (dev->ifindex != dev->iflink ?
+               return (dev->ifindex != dev_get_iflink(dev) ?
                        IF_OPER_LOWERLAYERDOWN : IF_OPER_DOWN);
 
        if (netif_dormant(dev))
@@ -89,7 +89,7 @@ static bool linkwatch_urgent_event(struct net_device *dev)
        if (!netif_running(dev))
                return false;
 
-       if (dev->ifindex != dev->iflink)
+       if (dev->ifindex != dev_get_iflink(dev))
                return true;
 
        if (dev->priv_flags & IFF_TEAM_PORT)
index cc5cf689809c6646bc567a8380a6bfe5346bb959..4238d6da5c60dc7ac7def10fb4e3ddda0a9377e6 100644 (file)
@@ -109,11 +109,19 @@ NETDEVICE_SHOW_RO(dev_id, fmt_hex);
 NETDEVICE_SHOW_RO(dev_port, fmt_dec);
 NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec);
 NETDEVICE_SHOW_RO(addr_len, fmt_dec);
-NETDEVICE_SHOW_RO(iflink, fmt_dec);
 NETDEVICE_SHOW_RO(ifindex, fmt_dec);
 NETDEVICE_SHOW_RO(type, fmt_dec);
 NETDEVICE_SHOW_RO(link_mode, fmt_dec);
 
+static ssize_t iflink_show(struct device *dev, struct device_attribute *attr,
+                          char *buf)
+{
+       struct net_device *ndev = to_net_dev(dev);
+
+       return sprintf(buf, fmt_dec, dev_get_iflink(ndev));
+}
+static DEVICE_ATTR_RO(iflink);
+
 static ssize_t format_name_assign_type(const struct net_device *dev, char *buf)
 {
        return sprintf(buf, fmt_dec, dev->name_assign_type);
index e5e96b0f6717456c7f40322d4899eba0e5d54010..a3abb719221f690e2648a7a91126aef48f60219b 100644 (file)
@@ -148,9 +148,11 @@ static void ops_free_list(const struct pernet_operations *ops,
        }
 }
 
+static void rtnl_net_notifyid(struct net *net, struct net *peer, int cmd,
+                             int id);
 static int alloc_netid(struct net *net, struct net *peer, int reqid)
 {
-       int min = 0, max = 0;
+       int min = 0, max = 0, id;
 
        ASSERT_RTNL();
 
@@ -159,7 +161,11 @@ static int alloc_netid(struct net *net, struct net *peer, int reqid)
                max = reqid + 1;
        }
 
-       return idr_alloc(&net->netns_ids, peer, min, max, GFP_KERNEL);
+       id = idr_alloc(&net->netns_ids, peer, min, max, GFP_KERNEL);
+       if (id >= 0)
+               rtnl_net_notifyid(net, peer, RTM_NEWNSID, id);
+
+       return id;
 }
 
 /* This function is used by idr_for_each(). If net is equal to peer, the
@@ -198,8 +204,10 @@ static int __peernet2id(struct net *net, struct net *peer, bool alloc)
  */
 int peernet2id(struct net *net, struct net *peer)
 {
-       int id = __peernet2id(net, peer, true);
+       bool alloc = atomic_read(&peer->count) == 0 ? false : true;
+       int id;
 
+       id = __peernet2id(net, peer, alloc);
        return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
 }
 EXPORT_SYMBOL(peernet2id);
@@ -357,8 +365,10 @@ static void cleanup_net(struct work_struct *work)
                for_each_net(tmp) {
                        int id = __peernet2id(tmp, net, false);
 
-                       if (id >= 0)
+                       if (id >= 0) {
+                               rtnl_net_notifyid(tmp, net, RTM_DELNSID, id);
                                idr_remove(&tmp->netns_ids, id);
+                       }
                }
                idr_destroy(&net->netns_ids);
 
@@ -529,7 +539,8 @@ static int rtnl_net_get_size(void)
 }
 
 static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
-                        int cmd, struct net *net, struct net *peer)
+                        int cmd, struct net *net, struct net *peer,
+                        int nsid)
 {
        struct nlmsghdr *nlh;
        struct rtgenmsg *rth;
@@ -544,9 +555,13 @@ static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
        rth = nlmsg_data(nlh);
        rth->rtgen_family = AF_UNSPEC;
 
-       id = __peernet2id(net, peer, false);
-       if  (id < 0)
-               id = NETNSA_NSID_NOT_ASSIGNED;
+       if (nsid >= 0) {
+               id = nsid;
+       } else {
+               id = __peernet2id(net, peer, false);
+               if  (id < 0)
+                       id = NETNSA_NSID_NOT_ASSIGNED;
+       }
        if (nla_put_s32(skb, NETNSA_NSID, id))
                goto nla_put_failure;
 
@@ -563,8 +578,8 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh)
        struct net *net = sock_net(skb->sk);
        struct nlattr *tb[NETNSA_MAX + 1];
        struct sk_buff *msg;
-       int err = -ENOBUFS;
        struct net *peer;
+       int err;
 
        err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
                          rtnl_net_policy);
@@ -587,7 +602,7 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh)
        }
 
        err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
-                           RTM_GETNSID, net, peer);
+                           RTM_GETNSID, net, peer, -1);
        if (err < 0)
                goto err_out;
 
@@ -601,6 +616,75 @@ out:
        return err;
 }
 
+struct rtnl_net_dump_cb {
+       struct net *net;
+       struct sk_buff *skb;
+       struct netlink_callback *cb;
+       int idx;
+       int s_idx;
+};
+
+static int rtnl_net_dumpid_one(int id, void *peer, void *data)
+{
+       struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
+       int ret;
+
+       if (net_cb->idx < net_cb->s_idx)
+               goto cont;
+
+       ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid,
+                           net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI,
+                           RTM_NEWNSID, net_cb->net, peer, id);
+       if (ret < 0)
+               return ret;
+
+cont:
+       net_cb->idx++;
+       return 0;
+}
+
+static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       struct net *net = sock_net(skb->sk);
+       struct rtnl_net_dump_cb net_cb = {
+               .net = net,
+               .skb = skb,
+               .cb = cb,
+               .idx = 0,
+               .s_idx = cb->args[0],
+       };
+
+       ASSERT_RTNL();
+
+       idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
+
+       cb->args[0] = net_cb.idx;
+       return skb->len;
+}
+
+static void rtnl_net_notifyid(struct net *net, struct net *peer, int cmd,
+                             int id)
+{
+       struct sk_buff *msg;
+       int err = -ENOMEM;
+
+       msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
+       if (!msg)
+               goto out;
+
+       err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, peer, id);
+       if (err < 0)
+               goto err_out;
+
+       rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0);
+       return;
+
+err_out:
+       nlmsg_free(msg);
+out:
+       rtnl_set_sk_err(net, RTNLGRP_NSID, err);
+}
+
 static int __init net_ns_init(void)
 {
        struct net_generic *ng;
@@ -635,7 +719,8 @@ static int __init net_ns_init(void)
        register_pernet_subsys(&net_ns_ops);
 
        rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, NULL);
-       rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, NULL, NULL);
+       rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid,
+                     NULL);
 
        return 0;
 }
index b96ac2109c825d7f3668f30e2ca5c3648ea12074..7a836152359b2afe5b32a469ba5125bfd4910450 100644 (file)
@@ -1055,8 +1055,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
 #ifdef CONFIG_RPS
            nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
 #endif
-           (dev->ifindex != dev->iflink &&
-            nla_put_u32(skb, IFLA_LINK, dev->iflink)) ||
+           (dev->ifindex != dev_get_iflink(dev) &&
+            nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
            (upper_dev &&
             nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex)) ||
            nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
@@ -1932,7 +1932,7 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
 EXPORT_SYMBOL(rtnl_configure_link);
 
 struct net_device *rtnl_create_link(struct net *net,
-       char *ifname, unsigned char name_assign_type,
+       const char *ifname, unsigned char name_assign_type,
        const struct rtnl_link_ops *ops, struct nlattr *tb[])
 {
        int err;
@@ -1991,10 +1991,10 @@ static int rtnl_group_changelink(const struct sk_buff *skb,
                struct ifinfomsg *ifm,
                struct nlattr **tb)
 {
-       struct net_device *dev;
+       struct net_device *dev, *aux;
        int err;
 
-       for_each_netdev(net, dev) {
+       for_each_netdev_safe(net, dev, aux) {
                if (dev->group == group) {
                        err = do_setlink(skb, dev, ifm, tb, NULL, 0);
                        if (err < 0)
@@ -2404,7 +2404,7 @@ EXPORT_SYMBOL(rtmsg_ifinfo);
 
 static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
                                   struct net_device *dev,
-                                  u8 *addr, u32 pid, u32 seq,
+                                  u8 *addr, u16 vid, u32 pid, u32 seq,
                                   int type, unsigned int flags,
                                   int nlflags)
 {
@@ -2426,6 +2426,9 @@ static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
 
        if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
                goto nla_put_failure;
+       if (vid)
+               if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
+                       goto nla_put_failure;
 
        nlmsg_end(skb, nlh);
        return 0;
@@ -2440,7 +2443,7 @@ static inline size_t rtnl_fdb_nlmsg_size(void)
        return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN);
 }
 
-static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, int type)
+static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type)
 {
        struct net *net = dev_net(dev);
        struct sk_buff *skb;
@@ -2450,7 +2453,8 @@ static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, int type)
        if (!skb)
                goto errout;
 
-       err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF, 0);
+       err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
+                                     0, 0, type, NTF_SELF, 0);
        if (err < 0) {
                kfree_skb(skb);
                goto errout;
@@ -2585,7 +2589,7 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
                                               nlh->nlmsg_flags);
 
                if (!err) {
-                       rtnl_fdb_notify(dev, addr, RTM_NEWNEIGH);
+                       rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH);
                        ndm->ndm_flags &= ~NTF_SELF;
                }
        }
@@ -2686,7 +2690,7 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
                        err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
 
                if (!err) {
-                       rtnl_fdb_notify(dev, addr, RTM_DELNEIGH);
+                       rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH);
                        ndm->ndm_flags &= ~NTF_SELF;
                }
        }
@@ -2711,7 +2715,7 @@ static int nlmsg_populate_fdb(struct sk_buff *skb,
                if (*idx < cb->args[0])
                        goto skip;
 
-               err = nlmsg_populate_fdb_fill(skb, dev, ha->addr,
+               err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
                                              portid, seq,
                                              RTM_NEWNEIGH, NTF_SELF,
                                              NLM_F_MULTI);
@@ -2754,7 +2758,6 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
        struct net_device *dev;
        struct nlattr *tb[IFLA_MAX+1];
-       struct net_device *bdev = NULL;
        struct net_device *br_dev = NULL;
        const struct net_device_ops *ops = NULL;
        const struct net_device_ops *cops = NULL;
@@ -2778,7 +2781,6 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
                        return -ENODEV;
 
                ops = br_dev->netdev_ops;
-               bdev = br_dev;
        }
 
        for_each_netdev(net, dev) {
@@ -2791,7 +2793,6 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
                                cops = br_dev->netdev_ops;
                        }
 
-                       bdev = dev;
                } else {
                        if (dev != br_dev &&
                            !(dev->priv_flags & IFF_BRIDGE_PORT))
@@ -2801,7 +2802,6 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
                            !(dev->priv_flags & IFF_EBRIDGE))
                                continue;
 
-                       bdev = br_dev;
                        cops = ops;
                }
 
@@ -2863,8 +2863,8 @@ int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
             nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
            (dev->addr_len &&
             nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
-           (dev->ifindex != dev->iflink &&
-            nla_put_u32(skb, IFLA_LINK, dev->iflink)))
+           (dev->ifindex != dev_get_iflink(dev) &&
+            nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
                goto nla_put_failure;
 
        br_afspec = nla_nest_start(skb, IFLA_AF_SPEC);
index cdb939b731aad72f039381d7b41183f1f6d94de5..3b6e5830256ec9e38399729cd352ee3735441198 100644 (file)
@@ -3752,7 +3752,6 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
 }
 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
 
-
 /**
  * skb_partial_csum_set - set up and verify partial csum values for packet
  * @skb: the skb to set
index 119ae464b44a44355e77b7ffb62e56e03acfd875..654e38a9975948f981f35716e7b9eac8569f0f93 100644 (file)
@@ -653,6 +653,25 @@ static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
                sock_reset_flag(sk, bit);
 }
 
+bool sk_mc_loop(struct sock *sk)
+{
+       if (dev_recursion_level())
+               return false;
+       if (!sk)
+               return true;
+       switch (sk->sk_family) {
+       case AF_INET:
+               return inet_sk(sk)->mc_loop;
+#if IS_ENABLED(CONFIG_IPV6)
+       case AF_INET6:
+               return inet6_sk(sk)->mc_loop;
+#endif
+       }
+       WARN_ON(1);
+       return true;
+}
+EXPORT_SYMBOL(sk_mc_loop);
+
 /*
  *     This is meant for all protocols to use and covers goings on
  *     at the socket level. Everything here is generic.
index be1f08cdad29135238c59c52f7c6bbc6548d39d2..4507b188fc5109c6dced018c7b2159e9b1a8c3b2 100644 (file)
@@ -194,7 +194,7 @@ static int dn_neigh_output(struct neighbour *neigh, struct sk_buff *skb)
        return err;
 }
 
-static int dn_neigh_output_packet(struct sk_buff *skb)
+static int dn_neigh_output_packet(struct sock *sk, struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
        struct dn_route *rt = (struct dn_route *)dst;
@@ -206,7 +206,8 @@ static int dn_neigh_output_packet(struct sk_buff *skb)
 /*
  * For talking to broadcast devices: Ethernet & PPP
  */
-static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb)
+static int dn_long_output(struct neighbour *neigh, struct sock *sk,
+                         struct sk_buff *skb)
 {
        struct net_device *dev = neigh->dev;
        int headroom = dev->hard_header_len + sizeof(struct dn_long_packet) + 3;
@@ -245,14 +246,15 @@ static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb)
 
        skb_reset_network_header(skb);
 
-       return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, skb, NULL,
-                      neigh->dev, dn_neigh_output_packet);
+       return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, sk, skb,
+                      NULL, neigh->dev, dn_neigh_output_packet);
 }
 
 /*
  * For talking to pointopoint and multidrop devices: DDCMP and X.25
  */
-static int dn_short_output(struct neighbour *neigh, struct sk_buff *skb)
+static int dn_short_output(struct neighbour *neigh, struct sock *sk,
+                          struct sk_buff *skb)
 {
        struct net_device *dev = neigh->dev;
        int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2;
@@ -284,8 +286,8 @@ static int dn_short_output(struct neighbour *neigh, struct sk_buff *skb)
 
        skb_reset_network_header(skb);
 
-       return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, skb, NULL,
-                      neigh->dev, dn_neigh_output_packet);
+       return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, sk, skb,
+                      NULL, neigh->dev, dn_neigh_output_packet);
 }
 
 /*
@@ -293,7 +295,8 @@ static int dn_short_output(struct neighbour *neigh, struct sk_buff *skb)
  * Phase 3 output is the same as short output, execpt that
  * it clears the area bits before transmission.
  */
-static int dn_phase3_output(struct neighbour *neigh, struct sk_buff *skb)
+static int dn_phase3_output(struct neighbour *neigh, struct sock *sk,
+                           struct sk_buff *skb)
 {
        struct net_device *dev = neigh->dev;
        int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2;
@@ -324,11 +327,11 @@ static int dn_phase3_output(struct neighbour *neigh, struct sk_buff *skb)
 
        skb_reset_network_header(skb);
 
-       return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, skb, NULL,
-                      neigh->dev, dn_neigh_output_packet);
+       return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, sk, skb,
+                      NULL, neigh->dev, dn_neigh_output_packet);
 }
 
-int dn_to_neigh_output(struct sk_buff *skb)
+int dn_to_neigh_output(struct sock *sk, struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
        struct dn_route *rt = (struct dn_route *) dst;
@@ -347,11 +350,11 @@ int dn_to_neigh_output(struct sk_buff *skb)
        rcu_read_unlock();
 
        if (dn->flags & DN_NDFLAG_P3)
-               return dn_phase3_output(neigh, skb);
+               return dn_phase3_output(neigh, sk, skb);
        if (use_long)
-               return dn_long_output(neigh, skb);
+               return dn_long_output(neigh, sk, skb);
        else
-               return dn_short_output(neigh, skb);
+               return dn_short_output(neigh, sk, skb);
 }
 
 /*
@@ -372,7 +375,7 @@ void dn_neigh_pointopoint_hello(struct sk_buff *skb)
 /*
  * Ethernet router hello message received
  */
-int dn_neigh_router_hello(struct sk_buff *skb)
+int dn_neigh_router_hello(struct sock *sk, struct sk_buff *skb)
 {
        struct rtnode_hello_message *msg = (struct rtnode_hello_message *)skb->data;
 
@@ -434,7 +437,7 @@ int dn_neigh_router_hello(struct sk_buff *skb)
 /*
  * Endnode hello message received
  */
-int dn_neigh_endnode_hello(struct sk_buff *skb)
+int dn_neigh_endnode_hello(struct sock *sk, struct sk_buff *skb)
 {
        struct endnode_hello_message *msg = (struct endnode_hello_message *)skb->data;
        struct neighbour *neigh;
index fe5f01485d3370ba4d645e4e640f073a1e26e41c..a321eac9fd0c5755f2d902b468137b413b092d46 100644 (file)
@@ -714,7 +714,7 @@ out:
        return ret;
 }
 
-static int dn_nsp_rx_packet(struct sk_buff *skb)
+static int dn_nsp_rx_packet(struct sock *sk2, struct sk_buff *skb)
 {
        struct dn_skb_cb *cb = DN_SKB_CB(skb);
        struct sock *sk = NULL;
@@ -814,7 +814,8 @@ free_out:
 
 int dn_nsp_rx(struct sk_buff *skb)
 {
-       return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_IN, skb, skb->dev, NULL,
+       return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_IN, NULL, skb,
+                      skb->dev, NULL,
                       dn_nsp_rx_packet);
 }
 
index 9ab0c4ba297f546ab4dd68b63894c14091b9ab3c..03227ffd19ce02c1a506ebd606813edb853a3e75 100644 (file)
@@ -512,7 +512,7 @@ static int dn_return_long(struct sk_buff *skb)
  *
  * Returns: result of input function if route is found, error code otherwise
  */
-static int dn_route_rx_packet(struct sk_buff *skb)
+static int dn_route_rx_packet(struct sock *sk, struct sk_buff *skb)
 {
        struct dn_skb_cb *cb;
        int err;
@@ -573,7 +573,8 @@ static int dn_route_rx_long(struct sk_buff *skb)
        ptr++;
        cb->hops = *ptr++; /* Visit Count */
 
-       return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, skb, skb->dev, NULL,
+       return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, NULL, skb,
+                      skb->dev, NULL,
                       dn_route_rx_packet);
 
 drop_it:
@@ -600,7 +601,8 @@ static int dn_route_rx_short(struct sk_buff *skb)
        ptr += 2;
        cb->hops = *ptr & 0x3f;
 
-       return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, skb, skb->dev, NULL,
+       return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, NULL, skb,
+                      skb->dev, NULL,
                       dn_route_rx_packet);
 
 drop_it:
@@ -608,7 +610,7 @@ drop_it:
        return NET_RX_DROP;
 }
 
-static int dn_route_discard(struct sk_buff *skb)
+static int dn_route_discard(struct sock *sk, struct sk_buff *skb)
 {
        /*
         * I know we drop the packet here, but thats considered success in
@@ -618,7 +620,7 @@ static int dn_route_discard(struct sk_buff *skb)
        return NET_RX_SUCCESS;
 }
 
-static int dn_route_ptp_hello(struct sk_buff *skb)
+static int dn_route_ptp_hello(struct sock *sk, struct sk_buff *skb)
 {
        dn_dev_hello(skb);
        dn_neigh_pointopoint_hello(skb);
@@ -704,22 +706,22 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type
                switch (flags & DN_RT_CNTL_MSK) {
                case DN_RT_PKT_HELO:
                        return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
-                                      skb, skb->dev, NULL,
+                                      NULL, skb, skb->dev, NULL,
                                       dn_route_ptp_hello);
 
                case DN_RT_PKT_L1RT:
                case DN_RT_PKT_L2RT:
                        return NF_HOOK(NFPROTO_DECNET, NF_DN_ROUTE,
-                                      skb, skb->dev, NULL,
+                                      NULL, skb, skb->dev, NULL,
                                       dn_route_discard);
                case DN_RT_PKT_ERTH:
                        return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
-                                      skb, skb->dev, NULL,
+                                      NULL, skb, skb->dev, NULL,
                                       dn_neigh_router_hello);
 
                case DN_RT_PKT_EEDH:
                        return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
-                                      skb, skb->dev, NULL,
+                                      NULL, skb, skb->dev, NULL,
                                       dn_neigh_endnode_hello);
                }
        } else {
@@ -768,7 +770,8 @@ static int dn_output(struct sock *sk, struct sk_buff *skb)
        cb->rt_flags |= DN_RT_F_IE;
        cb->hops = 0;
 
-       return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_OUT, skb, NULL, dev,
+       return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_OUT, sk, skb,
+                      NULL, dev,
                       dn_to_neigh_output);
 
 error:
@@ -816,7 +819,8 @@ static int dn_forward(struct sk_buff *skb)
        if (rt->rt_flags & RTCF_DOREDIRECT)
                cb->rt_flags |= DN_RT_F_IE;
 
-       return NF_HOOK(NFPROTO_DECNET, NF_DN_FORWARD, skb, dev, skb->dev,
+       return NF_HOOK(NFPROTO_DECNET, NF_DN_FORWARD, NULL, skb,
+                      dev, skb->dev,
                       dn_to_neigh_output);
 
 drop:
index faf7cc3483fe0822c26be6b915061ee8fdd8be9a..9d66a0f72f906733878de68e7f2e6bd80932c1b9 100644 (file)
@@ -248,7 +248,9 @@ void __init dn_fib_rules_init(void)
 
 void __exit dn_fib_rules_cleanup(void)
 {
+       rtnl_lock();
        fib_rules_unregister(dn_fib_rules_ops);
+       rtnl_unlock();
        rcu_barrier();
 }
 
index e4d9560a910b0eb96ed3a4ad59d63771f865de3c..af34fc9bdf69768e45e3e772929410fa0eeee41c 100644 (file)
@@ -89,9 +89,7 @@ static void dnrmg_send_peer(struct sk_buff *skb)
 
 static unsigned int dnrmg_hook(const struct nf_hook_ops *ops,
                        struct sk_buff *skb,
-                       const struct net_device *in,
-                       const struct net_device *out,
-                       int (*okfn)(struct sk_buff *))
+                       const struct nf_hook_state *state)
 {
        dnrmg_send_peer(skb);
        return NF_ACCEPT;
index 899772108ee3f92d3d32027f250692c546500417..5eaadabe23a1dbf6c8dcaa8032fd739d5d7d04c8 100644 (file)
@@ -513,12 +513,10 @@ static struct net_device *dev_to_net_device(struct device *dev)
 #ifdef CONFIG_OF
 static int dsa_of_setup_routing_table(struct dsa_platform_data *pd,
                                        struct dsa_chip_data *cd,
-                                       int chip_index,
+                                       int chip_index, int port_index,
                                        struct device_node *link)
 {
-       int ret;
        const __be32 *reg;
-       int link_port_addr;
        int link_sw_addr;
        struct device_node *parent_sw;
        int len;
@@ -531,6 +529,10 @@ static int dsa_of_setup_routing_table(struct dsa_platform_data *pd,
        if (!reg || (len != sizeof(*reg) * 2))
                return -EINVAL;
 
+       /*
+        * Get the destination switch number from the second field of its 'reg'
+        * property, i.e. for "reg = <0x19 1>" sw_addr is '1'.
+        */
        link_sw_addr = be32_to_cpup(reg + 1);
 
        if (link_sw_addr >= pd->nr_chips)
@@ -547,20 +549,9 @@ static int dsa_of_setup_routing_table(struct dsa_platform_data *pd,
                memset(cd->rtable, -1, pd->nr_chips * sizeof(s8));
        }
 
-       reg = of_get_property(link, "reg", NULL);
-       if (!reg) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       link_port_addr = be32_to_cpup(reg);
-
-       cd->rtable[link_sw_addr] = link_port_addr;
+       cd->rtable[link_sw_addr] = port_index;
 
        return 0;
-out:
-       kfree(cd->rtable);
-       return ret;
 }
 
 static void dsa_of_free_platform_data(struct dsa_platform_data *pd)
@@ -670,7 +661,7 @@ static int dsa_of_probe(struct device *dev)
                        if (!strcmp(port_name, "dsa") && link &&
                                        pd->nr_chips > 1) {
                                ret = dsa_of_setup_routing_table(pd, cd,
-                                               chip_index, link);
+                                               chip_index, port_index, link);
                                if (ret)
                                        goto out_free_chip;
                        }
index 3597724ec3d82399fd40b515d27ebada8c6a4cfa..827cda560a552b7b0dca45d49a06816e6dda513b 100644 (file)
@@ -55,13 +55,11 @@ void dsa_slave_mii_bus_init(struct dsa_switch *ds)
 
 
 /* slave device handling ****************************************************/
-static int dsa_slave_init(struct net_device *dev)
+static int dsa_slave_get_iflink(const struct net_device *dev)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
 
-       dev->iflink = p->parent->dst->master_netdev->ifindex;
-
-       return 0;
+       return p->parent->dst->master_netdev->ifindex;
 }
 
 static inline bool dsa_port_is_bridged(struct dsa_slave_priv *p)
@@ -664,7 +662,6 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
 };
 
 static const struct net_device_ops dsa_slave_netdev_ops = {
-       .ndo_init               = dsa_slave_init,
        .ndo_open               = dsa_slave_open,
        .ndo_stop               = dsa_slave_close,
        .ndo_start_xmit         = dsa_slave_xmit,
@@ -675,6 +672,7 @@ static const struct net_device_ops dsa_slave_netdev_ops = {
        .ndo_fdb_del            = dsa_slave_fdb_del,
        .ndo_fdb_dump           = dsa_slave_fdb_dump,
        .ndo_do_ioctl           = dsa_slave_ioctl,
+       .ndo_get_iflink         = dsa_slave_get_iflink,
 };
 
 static const struct swdev_ops dsa_slave_swdev_ops = {
index 64a9c0fdc4aa3d505ec8b0136c1da4f8be438bd2..8b47a4d79d040e39e592d3583affb7fec2d19f3d 100644 (file)
@@ -217,7 +217,7 @@ int inet_listen(struct socket *sock, int backlog)
                 * shutdown() (rather than close()).
                 */
                if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) != 0 &&
-                   inet_csk(sk)->icsk_accept_queue.fastopenq == NULL) {
+                   !inet_csk(sk)->icsk_accept_queue.fastopenq) {
                        if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) != 0)
                                err = fastopen_init_queue(sk, backlog);
                        else if ((sysctl_tcp_fastopen &
@@ -314,11 +314,11 @@ lookup_protocol:
        answer_flags = answer->flags;
        rcu_read_unlock();
 
-       WARN_ON(answer_prot->slab == NULL);
+       WARN_ON(!answer_prot->slab);
 
        err = -ENOBUFS;
        sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot);
-       if (sk == NULL)
+       if (!sk)
                goto out;
 
        err = 0;
@@ -1269,7 +1269,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
                if (udpfrag) {
                        iph->id = htons(id);
                        iph->frag_off = htons(offset >> 3);
-                       if (skb->next != NULL)
+                       if (skb->next)
                                iph->frag_off |= htons(IP_MF);
                        offset += skb->len - nhoff - ihl;
                } else {
index 5f5c674e130ab438881745a9049ff028f900b6eb..933a92820d265e07b8c42300c7be6742565723b5 100644 (file)
@@ -228,7 +228,7 @@ static int arp_constructor(struct neighbour *neigh)
 
        rcu_read_lock();
        in_dev = __in_dev_get_rcu(dev);
-       if (in_dev == NULL) {
+       if (!in_dev) {
                rcu_read_unlock();
                return -EINVAL;
        }
@@ -475,7 +475,7 @@ static inline int arp_fwd_pvlan(struct in_device *in_dev,
  */
 
 /*
- *     Create an arp packet. If (dest_hw == NULL), we create a broadcast
+ *     Create an arp packet. If dest_hw is not set, we create a broadcast
  *     message.
  */
 struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
@@ -495,7 +495,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
         */
 
        skb = alloc_skb(arp_hdr_len(dev) + hlen + tlen, GFP_ATOMIC);
-       if (skb == NULL)
+       if (!skb)
                return NULL;
 
        skb_reserve(skb, hlen);
@@ -503,9 +503,9 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
        arp = (struct arphdr *) skb_put(skb, arp_hdr_len(dev));
        skb->dev = dev;
        skb->protocol = htons(ETH_P_ARP);
-       if (src_hw == NULL)
+       if (!src_hw)
                src_hw = dev->dev_addr;
-       if (dest_hw == NULL)
+       if (!dest_hw)
                dest_hw = dev->broadcast;
 
        /*
@@ -569,7 +569,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
                break;
 #endif
        default:
-               if (target_hw != NULL)
+               if (target_hw)
                        memcpy(arp_ptr, target_hw, dev->addr_len);
                else
                        memset(arp_ptr, 0, dev->addr_len);
@@ -591,7 +591,8 @@ EXPORT_SYMBOL(arp_create);
 void arp_xmit(struct sk_buff *skb)
 {
        /* Send it off, maybe filter it using firewalling first.  */
-       NF_HOOK(NFPROTO_ARP, NF_ARP_OUT, skb, NULL, skb->dev, dev_queue_xmit);
+       NF_HOOK(NFPROTO_ARP, NF_ARP_OUT, NULL, skb,
+               NULL, skb->dev, dev_queue_xmit_sk);
 }
 EXPORT_SYMBOL(arp_xmit);
 
@@ -614,7 +615,7 @@ void arp_send(int type, int ptype, __be32 dest_ip,
 
        skb = arp_create(type, ptype, dest_ip, dev, src_ip,
                         dest_hw, src_hw, target_hw);
-       if (skb == NULL)
+       if (!skb)
                return;
 
        arp_xmit(skb);
@@ -625,7 +626,7 @@ EXPORT_SYMBOL(arp_send);
  *     Process an arp request.
  */
 
-static int arp_process(struct sk_buff *skb)
+static int arp_process(struct sock *sk, struct sk_buff *skb)
 {
        struct net_device *dev = skb->dev;
        struct in_device *in_dev = __in_dev_get_rcu(dev);
@@ -644,7 +645,7 @@ static int arp_process(struct sk_buff *skb)
         * is ARP'able.
         */
 
-       if (in_dev == NULL)
+       if (!in_dev)
                goto out;
 
        arp = arp_hdr(skb);
@@ -808,7 +809,7 @@ static int arp_process(struct sk_buff *skb)
                is_garp = arp->ar_op == htons(ARPOP_REQUEST) && tip == sip &&
                          inet_addr_type(net, sip) == RTN_UNICAST;
 
-               if (n == NULL &&
+               if (!n &&
                    ((arp->ar_op == htons(ARPOP_REPLY)  &&
                      inet_addr_type(net, sip) == RTN_UNICAST) || is_garp))
                        n = __neigh_lookup(&arp_tbl, &sip, dev, 1);
@@ -846,7 +847,7 @@ out:
 
 static void parp_redo(struct sk_buff *skb)
 {
-       arp_process(skb);
+       arp_process(NULL, skb);
 }
 
 
@@ -879,7 +880,8 @@ static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
 
        memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
 
-       return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, skb, dev, NULL, arp_process);
+       return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, NULL, skb,
+                      dev, NULL, arp_process);
 
 consumeskb:
        consume_skb(skb);
@@ -900,7 +902,7 @@ out_of_mem:
 
 static int arp_req_set_proxy(struct net *net, struct net_device *dev, int on)
 {
-       if (dev == NULL) {
+       if (!dev) {
                IPV4_DEVCONF_ALL(net, PROXY_ARP) = on;
                return 0;
        }
@@ -926,7 +928,7 @@ static int arp_req_set_public(struct net *net, struct arpreq *r,
                        return -ENODEV;
        }
        if (mask) {
-               if (pneigh_lookup(&arp_tbl, net, &ip, dev, 1) == NULL)
+               if (!pneigh_lookup(&arp_tbl, net, &ip, dev, 1))
                        return -ENOBUFS;
                return 0;
        }
@@ -947,7 +949,7 @@ static int arp_req_set(struct net *net, struct arpreq *r,
        ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
        if (r->arp_flags & ATF_PERM)
                r->arp_flags |= ATF_COM;
-       if (dev == NULL) {
+       if (!dev) {
                struct rtable *rt = ip_route_output(net, ip, 0, RTO_ONLINK, 0);
 
                if (IS_ERR(rt))
@@ -1067,7 +1069,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
                return arp_req_delete_public(net, r, dev);
 
        ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
-       if (dev == NULL) {
+       if (!dev) {
                struct rtable *rt = ip_route_output(net, ip, 0, RTO_ONLINK, 0);
                if (IS_ERR(rt))
                        return PTR_ERR(rt);
@@ -1116,7 +1118,7 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
        if (r.arp_dev[0]) {
                err = -ENODEV;
                dev = __dev_get_by_name(net, r.arp_dev);
-               if (dev == NULL)
+               if (!dev)
                        goto out;
 
                /* Mmmm... It is wrong... ARPHRD_NETROM==0 */
index e361ea6f3fc8ce0d2e0814109079037a26573d1d..bdb2a07ec363b709197435ac602b74377a600780 100644 (file)
@@ -255,7 +255,7 @@ static int __init cipso_v4_cache_init(void)
        cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS,
                                 sizeof(struct cipso_v4_map_cache_bkt),
                                 GFP_KERNEL);
-       if (cipso_v4_cache == NULL)
+       if (!cipso_v4_cache)
                return -ENOMEM;
 
        for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) {
@@ -339,7 +339,7 @@ static int cipso_v4_cache_check(const unsigned char *key,
                        secattr->cache = entry->lsm_data;
                        secattr->flags |= NETLBL_SECATTR_CACHE;
                        secattr->type = NETLBL_NLTYPE_CIPSOV4;
-                       if (prev_entry == NULL) {
+                       if (!prev_entry) {
                                spin_unlock_bh(&cipso_v4_cache[bkt].lock);
                                return 0;
                        }
@@ -393,10 +393,10 @@ int cipso_v4_cache_add(const unsigned char *cipso_ptr,
        cipso_ptr_len = cipso_ptr[1];
 
        entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
-       if (entry == NULL)
+       if (!entry)
                return -ENOMEM;
        entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC);
-       if (entry->key == NULL) {
+       if (!entry->key) {
                ret_val = -ENOMEM;
                goto cache_add_failure;
        }
@@ -502,7 +502,7 @@ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def,
        atomic_set(&doi_def->refcount, 1);
 
        spin_lock(&cipso_v4_doi_list_lock);
-       if (cipso_v4_doi_search(doi_def->doi) != NULL) {
+       if (cipso_v4_doi_search(doi_def->doi)) {
                spin_unlock(&cipso_v4_doi_list_lock);
                ret_val = -EEXIST;
                goto doi_add_return;
@@ -513,7 +513,7 @@ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def,
 
 doi_add_return:
        audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info);
-       if (audit_buf != NULL) {
+       if (audit_buf) {
                const char *type_str;
                switch (doi_type) {
                case CIPSO_V4_MAP_TRANS:
@@ -547,7 +547,7 @@ doi_add_return:
  */
 void cipso_v4_doi_free(struct cipso_v4_doi *doi_def)
 {
-       if (doi_def == NULL)
+       if (!doi_def)
                return;
 
        switch (doi_def->type) {
@@ -598,7 +598,7 @@ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info)
 
        spin_lock(&cipso_v4_doi_list_lock);
        doi_def = cipso_v4_doi_search(doi);
-       if (doi_def == NULL) {
+       if (!doi_def) {
                spin_unlock(&cipso_v4_doi_list_lock);
                ret_val = -ENOENT;
                goto doi_remove_return;
@@ -617,7 +617,7 @@ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info)
 
 doi_remove_return:
        audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info);
-       if (audit_buf != NULL) {
+       if (audit_buf) {
                audit_log_format(audit_buf,
                                 " cipso_doi=%u res=%u",
                                 doi, ret_val == 0 ? 1 : 0);
@@ -644,7 +644,7 @@ struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi)
 
        rcu_read_lock();
        doi_def = cipso_v4_doi_search(doi);
-       if (doi_def == NULL)
+       if (!doi_def)
                goto doi_getdef_return;
        if (!atomic_inc_not_zero(&doi_def->refcount))
                doi_def = NULL;
@@ -664,7 +664,7 @@ doi_getdef_return:
  */
 void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def)
 {
-       if (doi_def == NULL)
+       if (!doi_def)
                return;
 
        if (!atomic_dec_and_test(&doi_def->refcount))
@@ -1642,7 +1642,7 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
 
        rcu_read_lock();
        doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2]));
-       if (doi_def == NULL) {
+       if (!doi_def) {
                err_offset = 2;
                goto validate_return_locked;
        }
@@ -1736,7 +1736,7 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
                         * not the loopback device drop the packet. Further,
                         * there is no legitimate reason for setting this from
                         * userspace so reject it if skb is NULL. */
-                       if (skb == NULL || !(skb->dev->flags & IFF_LOOPBACK)) {
+                       if (!skb || !(skb->dev->flags & IFF_LOOPBACK)) {
                                err_offset = opt_iter;
                                goto validate_return_locked;
                        }
@@ -1897,7 +1897,7 @@ int cipso_v4_sock_setattr(struct sock *sk,
         * defined yet but it is not a problem as the only users of these
         * "lite" PF_INET sockets are functions which do an accept() call
         * afterwards so we will label the socket as part of the accept(). */
-       if (sk == NULL)
+       if (!sk)
                return 0;
 
        /* We allocate the maximum CIPSO option size here so we are probably
@@ -1905,7 +1905,7 @@ int cipso_v4_sock_setattr(struct sock *sk,
         * on and after all we are only talking about 40 bytes. */
        buf_len = CIPSO_V4_OPT_LEN_MAX;
        buf = kmalloc(buf_len, GFP_ATOMIC);
-       if (buf == NULL) {
+       if (!buf) {
                ret_val = -ENOMEM;
                goto socket_setattr_failure;
        }
@@ -1921,7 +1921,7 @@ int cipso_v4_sock_setattr(struct sock *sk,
         * set the IPOPT_CIPSO option. */
        opt_len = (buf_len + 3) & ~3;
        opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC);
-       if (opt == NULL) {
+       if (!opt) {
                ret_val = -ENOMEM;
                goto socket_setattr_failure;
        }
@@ -1981,7 +1981,7 @@ int cipso_v4_req_setattr(struct request_sock *req,
         * on and after all we are only talking about 40 bytes. */
        buf_len = CIPSO_V4_OPT_LEN_MAX;
        buf = kmalloc(buf_len, GFP_ATOMIC);
-       if (buf == NULL) {
+       if (!buf) {
                ret_val = -ENOMEM;
                goto req_setattr_failure;
        }
@@ -1997,7 +1997,7 @@ int cipso_v4_req_setattr(struct request_sock *req,
         * set the IPOPT_CIPSO option. */
        opt_len = (buf_len + 3) & ~3;
        opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC);
-       if (opt == NULL) {
+       if (!opt) {
                ret_val = -ENOMEM;
                goto req_setattr_failure;
        }
@@ -2102,7 +2102,7 @@ void cipso_v4_sock_delattr(struct sock *sk)
 
        sk_inet = inet_sk(sk);
        opt = rcu_dereference_protected(sk_inet->inet_opt, 1);
-       if (opt == NULL || opt->opt.cipso == 0)
+       if (!opt || opt->opt.cipso == 0)
                return;
 
        hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);
@@ -2128,7 +2128,7 @@ void cipso_v4_req_delattr(struct request_sock *req)
 
        req_inet = inet_rsk(req);
        opt = req_inet->opt;
-       if (opt == NULL || opt->opt.cipso == 0)
+       if (!opt || opt->opt.cipso == 0)
                return;
 
        cipso_v4_delopt(&req_inet->opt);
@@ -2157,7 +2157,7 @@ int cipso_v4_getattr(const unsigned char *cipso,
        doi = get_unaligned_be32(&cipso[2]);
        rcu_read_lock();
        doi_def = cipso_v4_doi_search(doi);
-       if (doi_def == NULL)
+       if (!doi_def)
                goto getattr_return;
        /* XXX - This code assumes only one tag per CIPSO option which isn't
         * really a good assumption to make but since we only support the MAC
index c6473f365ad18ba3aff78ad8cc9806a526b1f179..419d23c53ec756327178f9101ea8287d671c9a47 100644 (file)
@@ -585,7 +585,7 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
 
        ifm = nlmsg_data(nlh);
        in_dev = inetdev_by_index(net, ifm->ifa_index);
-       if (in_dev == NULL) {
+       if (!in_dev) {
                err = -ENODEV;
                goto errout;
        }
@@ -755,21 +755,21 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
 
        ifm = nlmsg_data(nlh);
        err = -EINVAL;
-       if (ifm->ifa_prefixlen > 32 || tb[IFA_LOCAL] == NULL)
+       if (ifm->ifa_prefixlen > 32 || !tb[IFA_LOCAL])
                goto errout;
 
        dev = __dev_get_by_index(net, ifm->ifa_index);
        err = -ENODEV;
-       if (dev == NULL)
+       if (!dev)
                goto errout;
 
        in_dev = __in_dev_get_rtnl(dev);
        err = -ENOBUFS;
-       if (in_dev == NULL)
+       if (!in_dev)
                goto errout;
 
        ifa = inet_alloc_ifa();
-       if (ifa == NULL)
+       if (!ifa)
                /*
                 * A potential indev allocation can be left alive, it stays
                 * assigned to its device and is destroy with it.
@@ -780,7 +780,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
        neigh_parms_data_state_setall(in_dev->arp_parms);
        in_dev_hold(in_dev);
 
-       if (tb[IFA_ADDRESS] == NULL)
+       if (!tb[IFA_ADDRESS])
                tb[IFA_ADDRESS] = tb[IFA_LOCAL];
 
        INIT_HLIST_NODE(&ifa->hash);
@@ -1290,7 +1290,7 @@ __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev,
        __be32 addr = 0;
        struct net_device *dev;
 
-       if (in_dev != NULL)
+       if (in_dev)
                return confirm_addr_indev(in_dev, dst, local, scope);
 
        rcu_read_lock();
@@ -1340,7 +1340,7 @@ static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
                if (named++ == 0)
                        goto skip;
                dot = strchr(old, ':');
-               if (dot == NULL) {
+               if (!dot) {
                        sprintf(old, ":%d", named);
                        dot = old;
                }
@@ -1509,7 +1509,7 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
        u32 preferred, valid;
 
        nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags);
-       if (nlh == NULL)
+       if (!nlh)
                return -EMSGSIZE;
 
        ifm = nlmsg_data(nlh);
@@ -1628,7 +1628,7 @@ static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
 
        net = dev_net(ifa->ifa_dev->dev);
        skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL);
-       if (skb == NULL)
+       if (!skb)
                goto errout;
 
        err = inet_fill_ifaddr(skb, ifa, portid, seq, event, 0);
@@ -1665,7 +1665,7 @@ static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev)
                return -ENODATA;
 
        nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4);
-       if (nla == NULL)
+       if (!nla)
                return -EMSGSIZE;
 
        for (i = 0; i < IPV4_DEVCONF_MAX; i++)
@@ -1754,7 +1754,7 @@ static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
 
        nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
                        flags);
-       if (nlh == NULL)
+       if (!nlh)
                return -EMSGSIZE;
 
        ncm = nlmsg_data(nlh);
@@ -1796,7 +1796,7 @@ void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
        int err = -ENOBUFS;
 
        skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_ATOMIC);
-       if (skb == NULL)
+       if (!skb)
                goto errout;
 
        err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
@@ -1853,10 +1853,10 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
                break;
        default:
                dev = __dev_get_by_index(net, ifindex);
-               if (dev == NULL)
+               if (!dev)
                        goto errout;
                in_dev = __in_dev_get_rtnl(dev);
-               if (in_dev == NULL)
+               if (!in_dev)
                        goto errout;
                devconf = &in_dev->cnf;
                break;
@@ -1864,7 +1864,7 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
 
        err = -ENOBUFS;
        skb = nlmsg_new(inet_netconf_msgsize_devconf(-1), GFP_ATOMIC);
-       if (skb == NULL)
+       if (!skb)
                goto errout;
 
        err = inet_netconf_fill_devconf(skb, ifindex, devconf,
@@ -2215,7 +2215,7 @@ static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf)
 {
        struct devinet_sysctl_table *t = cnf->sysctl;
 
-       if (t == NULL)
+       if (!t)
                return;
 
        cnf->sysctl = NULL;
@@ -2276,16 +2276,16 @@ static __net_init int devinet_init_net(struct net *net)
 
        if (!net_eq(net, &init_net)) {
                all = kmemdup(all, sizeof(ipv4_devconf), GFP_KERNEL);
-               if (all == NULL)
+               if (!all)
                        goto err_alloc_all;
 
                dflt = kmemdup(dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL);
-               if (dflt == NULL)
+               if (!dflt)
                        goto err_alloc_dflt;
 
 #ifdef CONFIG_SYSCTL
                tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
-               if (tbl == NULL)
+               if (!tbl)
                        goto err_alloc_ctl;
 
                tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
@@ -2305,7 +2305,7 @@ static __net_init int devinet_init_net(struct net *net)
 
        err = -ENOMEM;
        forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
-       if (forw_hdr == NULL)
+       if (!forw_hdr)
                goto err_reg_ctl;
        net->ipv4.forw_hdr = forw_hdr;
 #endif
index 60173d4d3a0e335a91d2e9a463eeef6f6b88adfe..421a80b09b62358dad5a0fa35d99db73d28472a7 100644 (file)
@@ -553,7 +553,7 @@ static int esp_init_authenc(struct xfrm_state *x)
        int err;
 
        err = -EINVAL;
-       if (x->ealg == NULL)
+       if (!x->ealg)
                goto error;
 
        err = -ENAMETOOLONG;
index 718b0a16ea409ef06fd889d397ff17657b7e1790..872494e6e6eb7996185a99a8b05915f861a73ec4 100644 (file)
@@ -53,11 +53,11 @@ static int __net_init fib4_rules_init(struct net *net)
        struct fib_table *local_table, *main_table;
 
        main_table  = fib_trie_table(RT_TABLE_MAIN, NULL);
-       if (main_table == NULL)
+       if (!main_table)
                return -ENOMEM;
 
        local_table = fib_trie_table(RT_TABLE_LOCAL, main_table);
-       if (local_table == NULL)
+       if (!local_table)
                goto fail;
 
        hlist_add_head_rcu(&local_table->tb_hlist,
@@ -486,7 +486,7 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
                        for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next)
                                if (strcmp(ifa->ifa_label, devname) == 0)
                                        break;
-                       if (ifa == NULL)
+                       if (!ifa)
                                return -ENODEV;
                        cfg->fc_prefsrc = ifa->ifa_local;
                }
@@ -514,7 +514,7 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
                int len = 0;
 
                mx = kzalloc(3 * nla_total_size(4), GFP_KERNEL);
-               if (mx == NULL)
+               if (!mx)
                        return -ENOMEM;
 
                if (rt->rt_flags & RTF_MTU)
@@ -676,7 +676,7 @@ static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
                goto errout;
 
        tb = fib_get_table(net, cfg.fc_table);
-       if (tb == NULL) {
+       if (!tb) {
                err = -ESRCH;
                goto errout;
        }
@@ -698,7 +698,7 @@ static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
                goto errout;
 
        tb = fib_new_table(net, cfg.fc_table);
-       if (tb == NULL) {
+       if (!tb) {
                err = -ENOBUFS;
                goto errout;
        }
@@ -779,7 +779,7 @@ static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifad
        else
                tb = fib_new_table(net, RT_TABLE_LOCAL);
 
-       if (tb == NULL)
+       if (!tb)
                return;
 
        cfg.fc_table = tb->tb_id;
@@ -806,7 +806,7 @@ void fib_add_ifaddr(struct in_ifaddr *ifa)
 
        if (ifa->ifa_flags & IFA_F_SECONDARY) {
                prim = inet_ifa_byprefix(in_dev, prefix, mask);
-               if (prim == NULL) {
+               if (!prim) {
                        pr_warn("%s: bug: prim == NULL\n", __func__);
                        return;
                }
@@ -860,7 +860,7 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
 
        if (ifa->ifa_flags & IFA_F_SECONDARY) {
                prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
-               if (prim == NULL) {
+               if (!prim) {
                        pr_warn("%s: bug: prim == NULL\n", __func__);
                        return;
                }
@@ -1030,7 +1030,7 @@ static void nl_fib_input(struct sk_buff *skb)
                return;
 
        skb = netlink_skb_clone(skb, GFP_KERNEL);
-       if (skb == NULL)
+       if (!skb)
                return;
        nlh = nlmsg_hdr(skb);
 
@@ -1051,7 +1051,7 @@ static int __net_init nl_fib_lookup_init(struct net *net)
        };
 
        sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, &cfg);
-       if (sk == NULL)
+       if (!sk)
                return -EAFNOSUPPORT;
        net->ipv4.fibnl = sk;
        return 0;
@@ -1089,7 +1089,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
        case NETDEV_DOWN:
                fib_del_ifaddr(ifa, NULL);
                atomic_inc(&net->ipv4.dev_addr_genid);
-               if (ifa->ifa_dev->ifa_list == NULL) {
+               if (!ifa->ifa_dev->ifa_list) {
                        /* Last address was deleted from this interface.
                         * Disable IP.
                         */
@@ -1157,7 +1157,7 @@ static int __net_init ip_fib_net_init(struct net *net)
        size = max_t(size_t, size, L1_CACHE_BYTES);
 
        net->ipv4.fib_table_hash = kzalloc(size, GFP_KERNEL);
-       if (net->ipv4.fib_table_hash == NULL)
+       if (!net->ipv4.fib_table_hash)
                return -ENOMEM;
 
        err = fib4_rules_init(net);
@@ -1175,13 +1175,11 @@ static void ip_fib_net_exit(struct net *net)
        unsigned int i;
 
        rtnl_lock();
-
 #ifdef CONFIG_IP_MULTIPLE_TABLES
        RCU_INIT_POINTER(net->ipv4.fib_local, NULL);
        RCU_INIT_POINTER(net->ipv4.fib_main, NULL);
        RCU_INIT_POINTER(net->ipv4.fib_default, NULL);
 #endif
-
        for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
                struct hlist_head *head = &net->ipv4.fib_table_hash[i];
                struct hlist_node *tmp;
index 8162dd8e86d753bb5e932cd6094c233a5da9a962..56151982f74efb26dab4abad429f473ba8b06cba 100644 (file)
@@ -153,7 +153,7 @@ static struct fib_table *fib_empty_table(struct net *net)
        u32 id;
 
        for (id = 1; id <= RT_TABLE_MAX; id++)
-               if (fib_get_table(net, id) == NULL)
+               if (!fib_get_table(net, id))
                        return fib_new_table(net, id);
        return NULL;
 }
@@ -184,7 +184,7 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
                        struct fib_table *table;
 
                        table = fib_empty_table(net);
-                       if (table == NULL) {
+                       if (!table) {
                                err = -ENOBUFS;
                                goto errout;
                        }
index eac5aec7772a08a7883ab5c669c14afb6f85e1d9..8d695b6659c715f89e06c31d9890532b34b2727f 100644 (file)
@@ -390,7 +390,7 @@ void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
        int err = -ENOBUFS;
 
        skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL);
-       if (skb == NULL)
+       if (!skb)
                goto errout;
 
        err = fib_dump_info(skb, info->portid, seq, event, tb_id,
@@ -503,7 +503,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
        }
 
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
-       if (cfg->fc_mp == NULL)
+       if (!cfg->fc_mp)
                return 0;
 
        rtnh = cfg->fc_mp;
@@ -646,7 +646,7 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
                rcu_read_lock();
                err = -ENODEV;
                in_dev = inetdev_by_index(net, nh->nh_oif);
-               if (in_dev == NULL)
+               if (!in_dev)
                        goto out;
                err = -ENETDOWN;
                if (!(in_dev->dev->flags & IFF_UP))
@@ -803,7 +803,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
        }
 
        fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
-       if (fi == NULL)
+       if (!fi)
                goto failure;
        fib_info_cnt++;
        if (cfg->fc_mx) {
@@ -921,7 +921,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
                nh->nh_scope = RT_SCOPE_NOWHERE;
                nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif);
                err = -ENODEV;
-               if (nh->nh_dev == NULL)
+               if (!nh->nh_dev)
                        goto failure;
        } else {
                change_nexthops(fi) {
@@ -995,7 +995,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
        struct rtmsg *rtm;
 
        nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
-       if (nlh == NULL)
+       if (!nlh)
                return -EMSGSIZE;
 
        rtm = nlmsg_data(nlh);
@@ -1045,12 +1045,12 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
                struct nlattr *mp;
 
                mp = nla_nest_start(skb, RTA_MULTIPATH);
-               if (mp == NULL)
+               if (!mp)
                        goto nla_put_failure;
 
                for_nexthops(fi) {
                        rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
-                       if (rtnh == NULL)
+                       if (!rtnh)
                                goto nla_put_failure;
 
                        rtnh->rtnh_flags = nh->nh_flags & 0xFF;
@@ -1093,7 +1093,7 @@ int fib_sync_down_addr(struct net *net, __be32 local)
        struct hlist_head *head = &fib_info_laddrhash[hash];
        struct fib_info *fi;
 
-       if (fib_info_laddrhash == NULL || local == 0)
+       if (!fib_info_laddrhash || local == 0)
                return 0;
 
        hlist_for_each_entry(fi, head, fib_lhash) {
@@ -1182,7 +1182,7 @@ void fib_select_default(struct fib_result *res)
 
                fib_alias_accessed(fa);
 
-               if (fi == NULL) {
+               if (!fi) {
                        if (next_fi != res->fi)
                                break;
                } else if (!fib_detect_death(fi, order, &last_resort,
@@ -1195,7 +1195,7 @@ void fib_select_default(struct fib_result *res)
                order++;
        }
 
-       if (order <= 0 || fi == NULL) {
+       if (order <= 0 || !fi) {
                tb->tb_default = -1;
                goto out;
        }
@@ -1251,7 +1251,7 @@ int fib_sync_up(struct net_device *dev)
                                alive++;
                                continue;
                        }
-                       if (nexthop_nh->nh_dev == NULL ||
+                       if (!nexthop_nh->nh_dev ||
                            !(nexthop_nh->nh_dev->flags & IFF_UP))
                                continue;
                        if (nexthop_nh->nh_dev != dev ||
index 2c7c299ee2b923e8a7d5394f49868d305dd5df74..e13fcc602da20ee44dfd505ab1115bbcc0e13375 100644 (file)
@@ -391,9 +391,9 @@ static void put_child(struct key_vector *tn, unsigned long i,
        BUG_ON(i >= child_length(tn));
 
        /* update emptyChildren, overflow into fullChildren */
-       if (n == NULL && chi != NULL)
+       if (!n && chi)
                empty_child_inc(tn);
-       if (n != NULL && chi == NULL)
+       if (n && !chi)
                empty_child_dec(tn);
 
        /* update fullChildren */
@@ -528,7 +528,7 @@ static struct key_vector *inflate(struct trie *t,
                unsigned long j, k;
 
                /* An empty child */
-               if (inode == NULL)
+               if (!inode)
                        continue;
 
                /* A leaf or an internal node with skipped bits */
@@ -1154,7 +1154,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
                        }
                        err = -ENOBUFS;
                        new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
-                       if (new_fa == NULL)
+                       if (!new_fa)
                                goto out;
 
                        fi_drop = fa->fa_info;
@@ -1204,7 +1204,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
 
        err = -ENOBUFS;
        new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
-       if (new_fa == NULL)
+       if (!new_fa)
                goto out;
 
        new_fa->fa_info = fi;
@@ -1975,7 +1975,7 @@ struct fib_table *fib_trie_table(u32 id, struct fib_table *alias)
                sz += sizeof(struct trie);
 
        tb = kzalloc(sz, GFP_KERNEL);
-       if (tb == NULL)
+       if (!tb)
                return NULL;
 
        tb->tb_id = id;
index 5a4828ba05ad7997998e400d10bc8b1dfbe99db0..b77f5e84c623f055fe277ea2178a29589fadaf1b 100644 (file)
@@ -136,7 +136,7 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
 
        skb_set_inner_protocol(skb, htons(ETH_P_TEB));
 
-       return udp_tunnel_xmit_skb(rt, skb, src, dst,
+       return udp_tunnel_xmit_skb(rt, gs->sock->sk, skb, src, dst,
                                   tos, ttl, df, src_port, dst_port, xnet,
                                   !csum);
 }
@@ -196,7 +196,7 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
 
        rcu_read_lock();
        ptype = gro_find_receive_by_type(type);
-       if (ptype == NULL) {
+       if (!ptype) {
                flush = 1;
                goto out_unlock;
        }
@@ -230,7 +230,7 @@ static int geneve_gro_complete(struct sk_buff *skb, int nhoff,
 
        rcu_read_lock();
        ptype = gro_find_complete_by_type(type);
-       if (ptype != NULL)
+       if (ptype)
                err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
 
        rcu_read_unlock();
index 51973ddc05a68463f8f39e779491374f12e5191a..5aa46d4b44efb99702ccd89005528f20ae422a0e 100644 (file)
@@ -149,7 +149,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
 
        rcu_read_lock();
        ptype = gro_find_receive_by_type(type);
-       if (ptype == NULL)
+       if (!ptype)
                goto out_unlock;
 
        grehlen = GRE_HEADER_SECTION;
@@ -243,7 +243,7 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff)
 
        rcu_read_lock();
        ptype = gro_find_complete_by_type(type);
-       if (ptype != NULL)
+       if (ptype)
                err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
 
        rcu_read_unlock();
index 5e564014a0b75d04a8f64d48c6d3a14fe6df18a1..f5203fba623638d94b03435db86ac4ed696adba8 100644 (file)
@@ -399,7 +399,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
                return;
 
        sk = icmp_xmit_lock(net);
-       if (sk == NULL)
+       if (!sk)
                return;
        inet = inet_sk(sk);
 
@@ -609,7 +609,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
                                                 skb_in->data,
                                                 sizeof(_inner_type),
                                                 &_inner_type);
-                       if (itp == NULL)
+                       if (!itp)
                                goto out;
 
                        /*
@@ -627,7 +627,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
                return;
 
        sk = icmp_xmit_lock(net);
-       if (sk == NULL)
+       if (!sk)
                goto out_free;
 
        /*
index ad09213ac5b2fbe3af068334953807f8e4157df1..a3a697f5ffbaba1b30db8341ea9b51b229ac29df 100644 (file)
@@ -692,7 +692,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
        hlen = LL_RESERVED_SPACE(dev);
        tlen = dev->needed_tailroom;
        skb = alloc_skb(IGMP_SIZE + hlen + tlen, GFP_ATOMIC);
-       if (skb == NULL) {
+       if (!skb) {
                ip_rt_put(rt);
                return -1;
        }
@@ -981,7 +981,7 @@ int igmp_rcv(struct sk_buff *skb)
        int len = skb->len;
        bool dropped = true;
 
-       if (in_dev == NULL)
+       if (!in_dev)
                goto drop;
 
        if (!pskb_may_pull(skb, sizeof(struct igmphdr)))
@@ -1888,7 +1888,7 @@ int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr)
        if (count >= sysctl_igmp_max_memberships)
                goto done;
        iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL);
-       if (iml == NULL)
+       if (!iml)
                goto done;
 
        memcpy(&iml->multi, imr, sizeof(*imr));
@@ -1909,7 +1909,7 @@ static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
        struct ip_sf_socklist *psf = rtnl_dereference(iml->sflist);
        int err;
 
-       if (psf == NULL) {
+       if (!psf) {
                /* any-source empty exclude case */
                return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
                        iml->sfmode, 0, NULL, 0);
@@ -2360,7 +2360,7 @@ void ip_mc_drop_socket(struct sock *sk)
        struct ip_mc_socklist *iml;
        struct net *net = sock_net(sk);
 
-       if (inet->mc_list == NULL)
+       if (!inet->mc_list)
                return;
 
        rtnl_lock();
@@ -2370,7 +2370,7 @@ void ip_mc_drop_socket(struct sock *sk)
                inet->mc_list = iml->next_rcu;
                in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
                (void) ip_mc_leave_src(sk, iml, in_dev);
-               if (in_dev != NULL)
+               if (in_dev)
                        ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
                /* decrease mem now to avoid the memleak warning */
                atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
@@ -2587,13 +2587,13 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
        for_each_netdev_rcu(net, state->dev) {
                struct in_device *idev;
                idev = __in_dev_get_rcu(state->dev);
-               if (unlikely(idev == NULL))
+               if (unlikely(!idev))
                        continue;
                im = rcu_dereference(idev->mc_list);
-               if (likely(im != NULL)) {
+               if (likely(im)) {
                        spin_lock_bh(&im->lock);
                        psf = im->sources;
-                       if (likely(psf != NULL)) {
+                       if (likely(psf)) {
                                state->im = im;
                                state->idev = idev;
                                break;
@@ -2663,7 +2663,7 @@ static void igmp_mcf_seq_stop(struct seq_file *seq, void *v)
        __releases(rcu)
 {
        struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
-       if (likely(state->im != NULL)) {
+       if (likely(state->im)) {
                spin_unlock_bh(&state->im->lock);
                state->im = NULL;
        }
index 79c0c9439fdc7dd0b68421a6b229c869f37f7a01..5c3dd6267ed3557f2f139f83002fd7b1feaab237 100644 (file)
@@ -673,7 +673,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
 {
        struct sock *newsk = sk_clone_lock(sk, priority);
 
-       if (newsk != NULL) {
+       if (newsk) {
                struct inet_connection_sock *newicsk = inet_csk(newsk);
 
                newsk->sk_state = TCP_SYN_RECV;
@@ -843,7 +843,7 @@ void inet_csk_listen_stop(struct sock *sk)
                sk_acceptq_removed(sk);
                reqsk_put(req);
        }
-       if (queue->fastopenq != NULL) {
+       if (queue->fastopenq) {
                /* Free all the reqs queued in rskq_rst_head. */
                spin_lock_bh(&queue->fastopenq->lock);
                acc_req = queue->fastopenq->rskq_rst_head;
@@ -875,7 +875,7 @@ int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
 
-       if (icsk->icsk_af_ops->compat_getsockopt != NULL)
+       if (icsk->icsk_af_ops->compat_getsockopt)
                return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
                                                            optval, optlen);
        return icsk->icsk_af_ops->getsockopt(sk, level, optname,
@@ -888,7 +888,7 @@ int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
 
-       if (icsk->icsk_af_ops->compat_setsockopt != NULL)
+       if (icsk->icsk_af_ops->compat_setsockopt)
                return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
                                                            optval, optlen);
        return icsk->icsk_af_ops->setsockopt(sk, level, optname,
index e7920352646aed0a0680557babecfb586283ce92..5e346a082e5ff05b58cfebb64917ee26001d809d 100644 (file)
@@ -385,7 +385,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
        }
 
        q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
-       if (q == NULL)
+       if (!q)
                return NULL;
 
        q->net = nf;
@@ -406,7 +406,7 @@ static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
        struct inet_frag_queue *q;
 
        q = inet_frag_alloc(nf, f, arg);
-       if (q == NULL)
+       if (!q)
                return NULL;
 
        return inet_frag_intern(nf, q, f, arg);
index 0fb841b9d83409c133d20d2144cc2edcef5a31c5..d4630bf2d9aad1fd9070a11323b1cd0f7c0b9949 100644 (file)
@@ -64,7 +64,7 @@ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
 {
        struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
 
-       if (tb != NULL) {
+       if (tb) {
                write_pnet(&tb->ib_net, net);
                tb->port      = snum;
                tb->fastreuse = 0;
index f38e387448fb5596c64cdcb07e7cb06d3e624451..118f0f195820fa98554bafa5e1ddbd0da7c002c7 100644 (file)
@@ -173,7 +173,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat
        struct inet_timewait_sock *tw =
                kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
                                 GFP_ATOMIC);
-       if (tw != NULL) {
+       if (tw) {
                const struct inet_sock *inet = inet_sk(sk);
 
                kmemcheck_annotate_bitfield(tw, flags);
index d9bc28ac5d1b97340e79aae1eefcbac3f463251a..939992c456f3bb0d1505c6d4e9af8cc5b4da1ecc 100644 (file)
@@ -57,7 +57,7 @@ static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
 }
 
 
-static int ip_forward_finish(struct sk_buff *skb)
+static int ip_forward_finish(struct sock *sk, struct sk_buff *skb)
 {
        struct ip_options *opt  = &(IPCB(skb)->opt);
 
@@ -68,7 +68,7 @@ static int ip_forward_finish(struct sk_buff *skb)
                ip_forward_options(skb);
 
        skb_sender_cpu_clear(skb);
-       return dst_output(skb);
+       return dst_output_sk(sk, skb);
 }
 
 int ip_forward(struct sk_buff *skb)
@@ -136,8 +136,8 @@ int ip_forward(struct sk_buff *skb)
 
        skb->priority = rt_tos2priority(iph->tos);
 
-       return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev,
-                      rt->dst.dev, ip_forward_finish);
+       return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, NULL, skb,
+                      skb->dev, rt->dst.dev, ip_forward_finish);
 
 sr_failed:
        /*
index 145a50c4d56630a5fc97283d85c3fa29e10ab476..cc1da6d9cb351de56c7f357faebe32cdbb6f7c27 100644 (file)
@@ -372,7 +372,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
                goto err;
 
        err = -ENOMEM;
-       if (pskb_pull(skb, ihl) == NULL)
+       if (!pskb_pull(skb, ihl))
                goto err;
 
        err = pskb_trim_rcsum(skb, end - offset);
@@ -537,7 +537,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
                qp->q.fragments = head;
        }
 
-       WARN_ON(head == NULL);
+       WARN_ON(!head);
        WARN_ON(FRAG_CB(head)->offset != 0);
 
        /* Allocate a new buffer for the datagram. */
@@ -559,7 +559,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
                struct sk_buff *clone;
                int i, plen = 0;
 
-               if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
+               clone = alloc_skb(0, GFP_ATOMIC);
+               if (!clone)
                        goto out_nomem;
                clone->next = head->next;
                head->next = clone;
@@ -638,7 +639,8 @@ int ip_defrag(struct sk_buff *skb, u32 user)
        IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
 
        /* Lookup (or create) queue header */
-       if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) {
+       qp = ip_find(net, ip_hdr(skb), user);
+       if (qp) {
                int ret;
 
                spin_lock(&qp->q.lock);
@@ -754,7 +756,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
        table = ip4_frags_ns_ctl_table;
        if (!net_eq(net, &init_net)) {
                table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
-               if (table == NULL)
+               if (!table)
                        goto err_alloc;
 
                table[0].data = &net->ipv4.frags.high_thresh;
@@ -770,7 +772,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
        }
 
        hdr = register_net_sysctl(net, "net/ipv4", table);
-       if (hdr == NULL)
+       if (!hdr)
                goto err_reg;
 
        net->ipv4.frags_hdr = hdr;
index 0eb2a040a83012fb92dd9f4f0639e5a5ad2dc797..5fd706473c733402c9aad9c6c30466549de8c54d 100644 (file)
@@ -182,7 +182,7 @@ static int ipgre_err(struct sk_buff *skb, u32 info,
        t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
                             iph->daddr, iph->saddr, tpi->key);
 
-       if (t == NULL)
+       if (!t)
                return PACKET_REJECT;
 
        if (t->parms.iph.daddr == 0 ||
@@ -423,7 +423,7 @@ static int ipgre_open(struct net_device *dev)
                        return -EADDRNOTAVAIL;
                dev = rt->dst.dev;
                ip_rt_put(rt);
-               if (__in_dev_get_rtnl(dev) == NULL)
+               if (!__in_dev_get_rtnl(dev))
                        return -EADDRNOTAVAIL;
                t->mlink = dev->ifindex;
                ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
@@ -456,6 +456,7 @@ static const struct net_device_ops ipgre_netdev_ops = {
        .ndo_do_ioctl           = ipgre_tunnel_ioctl,
        .ndo_change_mtu         = ip_tunnel_change_mtu,
        .ndo_get_stats64        = ip_tunnel_get_stats64,
+       .ndo_get_iflink         = ip_tunnel_get_iflink,
 };
 
 #define GRE_FEATURES (NETIF_F_SG |             \
@@ -686,6 +687,7 @@ static const struct net_device_ops gre_tap_netdev_ops = {
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_change_mtu         = ip_tunnel_change_mtu,
        .ndo_get_stats64        = ip_tunnel_get_stats64,
+       .ndo_get_iflink         = ip_tunnel_get_iflink,
 };
 
 static void ipgre_tap_setup(struct net_device *dev)
index 3d4da2c16b6a3c6d6bc41d8fec0ed182037b0801..2db4c8773c1b405da48758db66969060df2f0812 100644 (file)
@@ -187,7 +187,7 @@ bool ip_call_ra_chain(struct sk_buff *skb)
        return false;
 }
 
-static int ip_local_deliver_finish(struct sk_buff *skb)
+static int ip_local_deliver_finish(struct sock *sk, struct sk_buff *skb)
 {
        struct net *net = dev_net(skb->dev);
 
@@ -203,7 +203,7 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
                raw = raw_local_deliver(skb, protocol);
 
                ipprot = rcu_dereference(inet_protos[protocol]);
-               if (ipprot != NULL) {
+               if (ipprot) {
                        int ret;
 
                        if (!ipprot->no_policy) {
@@ -253,7 +253,8 @@ int ip_local_deliver(struct sk_buff *skb)
                        return 0;
        }
 
-       return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN, skb, skb->dev, NULL,
+       return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN, NULL, skb,
+                      skb->dev, NULL,
                       ip_local_deliver_finish);
 }
 
@@ -309,12 +310,12 @@ drop:
 int sysctl_ip_early_demux __read_mostly = 1;
 EXPORT_SYMBOL(sysctl_ip_early_demux);
 
-static int ip_rcv_finish(struct sk_buff *skb)
+static int ip_rcv_finish(struct sock *sk, struct sk_buff *skb)
 {
        const struct iphdr *iph = ip_hdr(skb);
        struct rtable *rt;
 
-       if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
+       if (sysctl_ip_early_demux && !skb_dst(skb) && !skb->sk) {
                const struct net_protocol *ipprot;
                int protocol = iph->protocol;
 
@@ -387,7 +388,8 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
 
        IP_UPD_PO_STATS_BH(dev_net(dev), IPSTATS_MIB_IN, skb->len);
 
-       if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
+       skb = skb_share_check(skb, GFP_ATOMIC);
+       if (!skb) {
                IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
                goto out;
        }
@@ -450,7 +452,8 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
        /* Must drop socket now because of tproxy. */
        skb_orphan(skb);
 
-       return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, dev, NULL,
+       return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, NULL, skb,
+                      dev, NULL,
                       ip_rcv_finish);
 
 csum_error:
index 5b3d91be2db0c8f1a78606727c703475dd61b598..bd246792360b4b8dcda2c13328ea5f01bb603e06 100644 (file)
@@ -264,7 +264,7 @@ int ip_options_compile(struct net *net,
        unsigned char *iph;
        int optlen, l;
 
-       if (skb != NULL) {
+       if (skb) {
                rt = skb_rtable(skb);
                optptr = (unsigned char *)&(ip_hdr(skb)[1]);
        } else
index 8259e777b2492b3c75363712255f126bf8d6d15c..c65b93a7b7113660d9f946128c0a4acee810de0f 100644 (file)
@@ -91,14 +91,19 @@ void ip_send_check(struct iphdr *iph)
 }
 EXPORT_SYMBOL(ip_send_check);
 
-int __ip_local_out(struct sk_buff *skb)
+int __ip_local_out_sk(struct sock *sk, struct sk_buff *skb)
 {
        struct iphdr *iph = ip_hdr(skb);
 
        iph->tot_len = htons(skb->len);
        ip_send_check(iph);
-       return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
-                      skb_dst(skb)->dev, dst_output);
+       return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, sk, skb, NULL,
+                      skb_dst(skb)->dev, dst_output_sk);
+}
+
+int __ip_local_out(struct sk_buff *skb)
+{
+       return __ip_local_out_sk(skb->sk, skb);
 }
 
 int ip_local_out_sk(struct sock *sk, struct sk_buff *skb)
@@ -163,7 +168,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
 }
 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
 
-static inline int ip_finish_output2(struct sk_buff *skb)
+static inline int ip_finish_output2(struct sock *sk, struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
        struct rtable *rt = (struct rtable *)dst;
@@ -182,7 +187,7 @@ static inline int ip_finish_output2(struct sk_buff *skb)
                struct sk_buff *skb2;
 
                skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
-               if (skb2 == NULL) {
+               if (!skb2) {
                        kfree_skb(skb);
                        return -ENOMEM;
                }
@@ -211,7 +216,7 @@ static inline int ip_finish_output2(struct sk_buff *skb)
        return -EINVAL;
 }
 
-static int ip_finish_output_gso(struct sk_buff *skb)
+static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb)
 {
        netdev_features_t features;
        struct sk_buff *segs;
@@ -220,7 +225,7 @@ static int ip_finish_output_gso(struct sk_buff *skb)
        /* common case: locally created skb or seglen is <= mtu */
        if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) ||
              skb_gso_network_seglen(skb) <= ip_skb_dst_mtu(skb))
-               return ip_finish_output2(skb);
+               return ip_finish_output2(sk, skb);
 
        /* Slowpath -  GSO segment length is exceeding the dst MTU.
         *
@@ -243,7 +248,7 @@ static int ip_finish_output_gso(struct sk_buff *skb)
                int err;
 
                segs->next = NULL;
-               err = ip_fragment(segs, ip_finish_output2);
+               err = ip_fragment(sk, segs, ip_finish_output2);
 
                if (err && ret == 0)
                        ret = err;
@@ -253,22 +258,22 @@ static int ip_finish_output_gso(struct sk_buff *skb)
        return ret;
 }
 
-static int ip_finish_output(struct sk_buff *skb)
+static int ip_finish_output(struct sock *sk, struct sk_buff *skb)
 {
 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
        /* Policy lookup after SNAT yielded a new policy */
-       if (skb_dst(skb)->xfrm != NULL) {
+       if (skb_dst(skb)->xfrm) {
                IPCB(skb)->flags |= IPSKB_REROUTED;
-               return dst_output(skb);
+               return dst_output_sk(sk, skb);
        }
 #endif
        if (skb_is_gso(skb))
-               return ip_finish_output_gso(skb);
+               return ip_finish_output_gso(sk, skb);
 
        if (skb->len > ip_skb_dst_mtu(skb))
-               return ip_fragment(skb, ip_finish_output2);
+               return ip_fragment(sk, skb, ip_finish_output2);
 
-       return ip_finish_output2(skb);
+       return ip_finish_output2(sk, skb);
 }
 
 int ip_mc_output(struct sock *sk, struct sk_buff *skb)
@@ -307,7 +312,7 @@ int ip_mc_output(struct sock *sk, struct sk_buff *skb)
                        struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
                        if (newskb)
                                NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
-                                       newskb, NULL, newskb->dev,
+                                       sk, newskb, NULL, newskb->dev,
                                        dev_loopback_xmit);
                }
 
@@ -322,11 +327,11 @@ int ip_mc_output(struct sock *sk, struct sk_buff *skb)
        if (rt->rt_flags&RTCF_BROADCAST) {
                struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
                if (newskb)
-                       NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb,
+                       NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, newskb,
                                NULL, newskb->dev, dev_loopback_xmit);
        }
 
-       return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL,
+       return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb, NULL,
                            skb->dev, ip_finish_output,
                            !(IPCB(skb)->flags & IPSKB_REROUTED));
 }
@@ -340,7 +345,8 @@ int ip_output(struct sock *sk, struct sk_buff *skb)
        skb->dev = dev;
        skb->protocol = htons(ETH_P_IP);
 
-       return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, dev,
+       return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb,
+                           NULL, dev,
                            ip_finish_output,
                            !(IPCB(skb)->flags & IPSKB_REROUTED));
 }
@@ -376,12 +382,12 @@ int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
        inet_opt = rcu_dereference(inet->inet_opt);
        fl4 = &fl->u.ip4;
        rt = skb_rtable(skb);
-       if (rt != NULL)
+       if (rt)
                goto packet_routed;
 
        /* Make sure we can route this packet. */
        rt = (struct rtable *)__sk_dst_check(sk, 0);
-       if (rt == NULL) {
+       if (!rt) {
                __be32 daddr;
 
                /* Use correct destination address if we have options. */
@@ -449,7 +455,6 @@ no_route:
 }
 EXPORT_SYMBOL(ip_queue_xmit);
 
-
 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
 {
        to->pkt_type = from->pkt_type;
@@ -480,7 +485,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
  *     single device frame, and queue such a frame for sending.
  */
 
-int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
+int ip_fragment(struct sock *sk, struct sk_buff *skb,
+               int (*output)(struct sock *, struct sk_buff *))
 {
        struct iphdr *iph;
        int ptr;
@@ -587,13 +593,13 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
                                        ip_options_fragment(frag);
                                offset += skb->len - hlen;
                                iph->frag_off = htons(offset>>3);
-                               if (frag->next != NULL)
+                               if (frag->next)
                                        iph->frag_off |= htons(IP_MF);
                                /* Ready, complete checksum */
                                ip_send_check(iph);
                        }
 
-                       err = output(skb);
+                       err = output(sk, skb);
 
                        if (!err)
                                IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
@@ -730,7 +736,7 @@ slow_path:
 
                ip_send_check(iph);
 
-               err = output(skb2);
+               err = output(sk, skb2);
                if (err)
                        goto fail;
 
@@ -790,12 +796,13 @@ static inline int ip_ufo_append_data(struct sock *sk,
         * device, so create one single skb packet containing complete
         * udp datagram
         */
-       if ((skb = skb_peek_tail(queue)) == NULL) {
+       skb = skb_peek_tail(queue);
+       if (!skb) {
                skb = sock_alloc_send_skb(sk,
                        hh_len + fragheaderlen + transhdrlen + 20,
                        (flags & MSG_DONTWAIT), &err);
 
-               if (skb == NULL)
+               if (!skb)
                        return err;
 
                /* reserve space for Hardware header */
@@ -812,7 +819,6 @@ static inline int ip_ufo_append_data(struct sock *sk,
 
                skb->csum = 0;
 
-
                __skb_queue_tail(queue, skb);
        } else if (skb_is_gso(skb)) {
                goto append;
@@ -961,10 +967,10 @@ alloc_new_skb:
                                        skb = sock_wmalloc(sk,
                                                           alloclen + hh_len + 15, 1,
                                                           sk->sk_allocation);
-                               if (unlikely(skb == NULL))
+                               if (unlikely(!skb))
                                        err = -ENOBUFS;
                        }
-                       if (skb == NULL)
+                       if (!skb)
                                goto error;
 
                        /*
@@ -1088,10 +1094,10 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
         */
        opt = ipc->opt;
        if (opt) {
-               if (cork->opt == NULL) {
+               if (!cork->opt) {
                        cork->opt = kmalloc(sizeof(struct ip_options) + 40,
                                            sk->sk_allocation);
-                       if (unlikely(cork->opt == NULL))
+                       if (unlikely(!cork->opt))
                                return -ENOBUFS;
                }
                memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
@@ -1198,7 +1204,8 @@ ssize_t   ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
                return -EMSGSIZE;
        }
 
-       if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
+       skb = skb_peek_tail(&sk->sk_write_queue);
+       if (!skb)
                return -EINVAL;
 
        cork->length += size;
@@ -1209,7 +1216,6 @@ ssize_t   ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
                skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
        }
 
-
        while (size > 0) {
                int i;
 
@@ -1329,7 +1335,8 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
        __be16 df = 0;
        __u8 ttl;
 
-       if ((skb = __skb_dequeue(queue)) == NULL)
+       skb = __skb_dequeue(queue);
+       if (!skb)
                goto out;
        tail_skb = &(skb_shinfo(skb)->frag_list);
 
index f6a0d54b308ac8a758724fab9bb92afddcfcb333..7cfb0893f2636bcc87537da3014643362f72b10f 100644 (file)
@@ -351,7 +351,7 @@ int ip_ra_control(struct sock *sk, unsigned char on,
                        return 0;
                }
        }
-       if (new_ra == NULL) {
+       if (!new_ra) {
                spin_unlock_bh(&ip_ra_lock);
                return -ENOBUFS;
        }
@@ -387,7 +387,7 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
                                   skb_network_header(skb);
        serr->port = port;
 
-       if (skb_pull(skb, payload - skb->data) != NULL) {
+       if (skb_pull(skb, payload - skb->data)) {
                skb_reset_transport_header(skb);
                if (sock_queue_err_skb(sk, skb) == 0)
                        return;
@@ -482,7 +482,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
 
        err = -EAGAIN;
        skb = sock_dequeue_err_skb(sk);
-       if (skb == NULL)
+       if (!skb)
                goto out;
 
        copied = skb->len;
index 2cd08280c77bc33cac90c62e0f6f8f36343a768d..4c2c3ba4ba6595c788940e3ec2c2dadfe7abe2f0 100644 (file)
@@ -389,7 +389,6 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
                hlen = tdev->hard_header_len + tdev->needed_headroom;
                mtu = tdev->mtu;
        }
-       dev->iflink = tunnel->parms.link;
 
        dev->needed_headroom = t_hlen + hlen;
        mtu -= (dev->hard_header_len + t_hlen);
@@ -655,7 +654,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
        if (dst == 0) {
                /* NBMA tunnel */
 
-               if (skb_dst(skb) == NULL) {
+               if (!skb_dst(skb)) {
                        dev->stats.tx_fifo_errors++;
                        goto tx_error;
                }
@@ -673,7 +672,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
 
                        neigh = dst_neigh_lookup(skb_dst(skb),
                                                 &ipv6_hdr(skb)->daddr);
-                       if (neigh == NULL)
+                       if (!neigh)
                                goto tx_error;
 
                        addr6 = (const struct in6_addr *)&neigh->primary_key;
@@ -783,7 +782,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
                return;
        }
 
-       err = iptunnel_xmit(skb->sk, rt, skb, fl4.saddr, fl4.daddr, protocol,
+       err = iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol,
                            tos, ttl, df, !net_eq(tunnel->net, dev_net(dev)));
        iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
 
@@ -844,7 +843,7 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
        case SIOCGETTUNNEL:
                if (dev == itn->fb_tunnel_dev) {
                        t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
-                       if (t == NULL)
+                       if (!t)
                                t = netdev_priv(dev);
                }
                memcpy(p, &t->parms, sizeof(*p));
@@ -877,7 +876,7 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
                        break;
                }
                if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
-                       if (t != NULL) {
+                       if (t) {
                                if (t->dev != dev) {
                                        err = -EEXIST;
                                        break;
@@ -915,7 +914,7 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
                if (dev == itn->fb_tunnel_dev) {
                        err = -ENOENT;
                        t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
-                       if (t == NULL)
+                       if (!t)
                                goto done;
                        err = -EPERM;
                        if (t == netdev_priv(itn->fb_tunnel_dev))
@@ -980,6 +979,14 @@ struct net *ip_tunnel_get_link_net(const struct net_device *dev)
 }
 EXPORT_SYMBOL(ip_tunnel_get_link_net);
 
+int ip_tunnel_get_iflink(const struct net_device *dev)
+{
+       struct ip_tunnel *tunnel = netdev_priv(dev);
+
+       return tunnel->parms.link;
+}
+EXPORT_SYMBOL(ip_tunnel_get_iflink);
+
 int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
                                  struct rtnl_link_ops *ops, char *devname)
 {
index 8c4dcc46acd2932e062faf546331c938b160b0d1..ce63ab21b6cda87f8caea29967a4651b7f78c909 100644 (file)
@@ -74,7 +74,8 @@ int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
        iph->daddr      =       dst;
        iph->saddr      =       src;
        iph->ttl        =       ttl;
-       __ip_select_ident(sock_net(sk), iph, skb_shinfo(skb)->gso_segs ?: 1);
+       __ip_select_ident(dev_net(rt->dst.dev), iph,
+                         skb_shinfo(skb)->gso_segs ?: 1);
 
        err = ip_local_out_sk(sk, skb);
        if (unlikely(net_xmit_eval(err)))
index 5a6e27054f0a87e051cd995ab50952273d09c2b5..9f7269f3c54af2ecbc74db4ec2c0f71d5184dc1c 100644 (file)
@@ -60,7 +60,7 @@ static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi,
 
        tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
                                  iph->saddr, iph->daddr, 0);
-       if (tunnel != NULL) {
+       if (tunnel) {
                if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
                        goto drop;
 
@@ -341,6 +341,7 @@ static const struct net_device_ops vti_netdev_ops = {
        .ndo_do_ioctl   = vti_tunnel_ioctl,
        .ndo_change_mtu = ip_tunnel_change_mtu,
        .ndo_get_stats64 = ip_tunnel_get_stats64,
+       .ndo_get_iflink = ip_tunnel_get_iflink,
 };
 
 static void vti_tunnel_setup(struct net_device *dev)
@@ -361,7 +362,6 @@ static int vti_tunnel_init(struct net_device *dev)
        dev->hard_header_len    = LL_MAX_HEADER + sizeof(struct iphdr);
        dev->mtu                = ETH_DATA_LEN;
        dev->flags              = IFF_NOARP;
-       dev->iflink             = 0;
        dev->addr_len           = 4;
        dev->features           |= NETIF_F_LLTX;
        netif_keep_dst(dev);
index c0855d50a3fa775831ada20254b74d94f5d410ae..d97f4f2787f5f85c5f073df5e5276daf03c4ba6a 100644 (file)
@@ -63,7 +63,7 @@ static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x)
        struct xfrm_state *t;
 
        t = xfrm_state_alloc(net);
-       if (t == NULL)
+       if (!t)
                goto out;
 
        t->id.proto = IPPROTO_IPIP;
index b26376ef87f616d249dbfebbd6705eb818fefd05..8e7328c6a390a9bc064a67e4ca4263c891239378 100644 (file)
@@ -504,7 +504,8 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
        if (!net_eq(dev_net(dev), &init_net))
                goto drop;
 
-       if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
+       skb = skb_share_check(skb, GFP_ATOMIC);
+       if (!skb)
                return NET_RX_DROP;
 
        if (!pskb_may_pull(skb, sizeof(struct arphdr)))
@@ -958,7 +959,8 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
        if (skb->pkt_type == PACKET_OTHERHOST)
                goto drop;
 
-       if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
+       skb = skb_share_check(skb, GFP_ATOMIC);
+       if (!skb)
                return NET_RX_DROP;
 
        if (!pskb_may_pull(skb,
index bfbcc85c02ee7b0897826e5909ff551f62cc50b0..ff96396ebec5bdf794cf84776f21505d0b7de737 100644 (file)
@@ -144,7 +144,7 @@ static int ipip_err(struct sk_buff *skb, u32 info)
        err = -ENOENT;
        t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
                             iph->daddr, iph->saddr, 0);
-       if (t == NULL)
+       if (!t)
                goto out;
 
        if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
@@ -272,6 +272,7 @@ static const struct net_device_ops ipip_netdev_ops = {
        .ndo_do_ioctl   = ipip_tunnel_ioctl,
        .ndo_change_mtu = ip_tunnel_change_mtu,
        .ndo_get_stats64 = ip_tunnel_get_stats64,
+       .ndo_get_iflink = ip_tunnel_get_iflink,
 };
 
 #define IPIP_FEATURES (NETIF_F_SG |            \
@@ -286,7 +287,6 @@ static void ipip_tunnel_setup(struct net_device *dev)
 
        dev->type               = ARPHRD_TUNNEL;
        dev->flags              = IFF_NOARP;
-       dev->iflink             = 0;
        dev->addr_len           = 4;
        dev->features           |= NETIF_F_LLTX;
        netif_keep_dst(dev);
index b4a545d24adbf5c88b490c2bc7cccea3f901eb29..3a2c0162c3badeed716599e538ed06426ddc7199 100644 (file)
@@ -189,7 +189,7 @@ static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
        }
 
        mrt = ipmr_get_table(rule->fr_net, rule->table);
-       if (mrt == NULL)
+       if (!mrt)
                return -EAGAIN;
        res->mrt = mrt;
        return 0;
@@ -253,7 +253,7 @@ static int __net_init ipmr_rules_init(struct net *net)
        INIT_LIST_HEAD(&net->ipv4.mr_tables);
 
        mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
-       if (mrt == NULL) {
+       if (!mrt) {
                err = -ENOMEM;
                goto err1;
        }
@@ -266,7 +266,7 @@ static int __net_init ipmr_rules_init(struct net *net)
        return 0;
 
 err2:
-       kfree(mrt);
+       ipmr_free_table(mrt);
 err1:
        fib_rules_unregister(ops);
        return err;
@@ -276,11 +276,13 @@ static void __net_exit ipmr_rules_exit(struct net *net)
 {
        struct mr_table *mrt, *next;
 
+       rtnl_lock();
        list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
                list_del(&mrt->list);
                ipmr_free_table(mrt);
        }
        fib_rules_unregister(net->ipv4.mr_rules_ops);
+       rtnl_unlock();
 }
 #else
 #define ipmr_for_each_table(mrt, net) \
@@ -306,7 +308,10 @@ static int __net_init ipmr_rules_init(struct net *net)
 
 static void __net_exit ipmr_rules_exit(struct net *net)
 {
+       rtnl_lock();
        ipmr_free_table(net->ipv4.mrt);
+       net->ipv4.mrt = NULL;
+       rtnl_unlock();
 }
 #endif
 
@@ -316,11 +321,11 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
        unsigned int i;
 
        mrt = ipmr_get_table(net, id);
-       if (mrt != NULL)
+       if (mrt)
                return mrt;
 
        mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
-       if (mrt == NULL)
+       if (!mrt)
                return NULL;
        write_pnet(&mrt->net, net);
        mrt->id = id;
@@ -422,7 +427,7 @@ struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
                        dev->flags |= IFF_MULTICAST;
 
                        in_dev = __in_dev_get_rtnl(dev);
-                       if (in_dev == NULL)
+                       if (!in_dev)
                                goto failure;
 
                        ipv4_devconf_setall(in_dev);
@@ -473,8 +478,14 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 }
 
+static int reg_vif_get_iflink(const struct net_device *dev)
+{
+       return 0;
+}
+
 static const struct net_device_ops reg_vif_netdev_ops = {
        .ndo_start_xmit = reg_vif_xmit,
+       .ndo_get_iflink = reg_vif_get_iflink,
 };
 
 static void reg_vif_setup(struct net_device *dev)
@@ -500,7 +511,7 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
 
        dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
 
-       if (dev == NULL)
+       if (!dev)
                return NULL;
 
        dev_net_set(dev, net);
@@ -509,7 +520,6 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
                free_netdev(dev);
                return NULL;
        }
-       dev->iflink = 0;
 
        rcu_read_lock();
        in_dev = __in_dev_get_rcu(dev);
@@ -757,7 +767,7 @@ static int vif_add(struct net *net, struct mr_table *mrt,
        case 0:
                if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
                        dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
-                       if (dev && __in_dev_get_rtnl(dev) == NULL) {
+                       if (dev && !__in_dev_get_rtnl(dev)) {
                                dev_put(dev);
                                return -EADDRNOTAVAIL;
                        }
@@ -801,7 +811,7 @@ static int vif_add(struct net *net, struct mr_table *mrt,
        v->pkt_out = 0;
        v->link = dev->ifindex;
        if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER))
-               v->link = dev->iflink;
+               v->link = dev_get_iflink(dev);
 
        /* And finish update writing critical data */
        write_lock_bh(&mrt_lock);
@@ -1003,7 +1013,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
 
        rcu_read_lock();
        mroute_sk = rcu_dereference(mrt->mroute_sk);
-       if (mroute_sk == NULL) {
+       if (!mroute_sk) {
                rcu_read_unlock();
                kfree_skb(skb);
                return -EINVAL;
@@ -1156,7 +1166,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
                return -EINVAL;
 
        c = ipmr_cache_alloc();
-       if (c == NULL)
+       if (!c)
                return -ENOMEM;
 
        c->mfc_origin = mfc->mfcc_origin.s_addr;
@@ -1278,7 +1288,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
                return -EOPNOTSUPP;
 
        mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
-       if (mrt == NULL)
+       if (!mrt)
                return -ENOENT;
 
        if (optname != MRT_INIT) {
@@ -1441,7 +1451,7 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int
                return -EOPNOTSUPP;
 
        mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
-       if (mrt == NULL)
+       if (!mrt)
                return -ENOENT;
 
        if (optname != MRT_VERSION &&
@@ -1487,7 +1497,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
        struct mr_table *mrt;
 
        mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
-       if (mrt == NULL)
+       if (!mrt)
                return -ENOENT;
 
        switch (cmd) {
@@ -1561,7 +1571,7 @@ int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
        struct mr_table *mrt;
 
        mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
-       if (mrt == NULL)
+       if (!mrt)
                return -ENOENT;
 
        switch (cmd) {
@@ -1669,7 +1679,7 @@ static void ip_encap(struct net *net, struct sk_buff *skb,
        nf_reset(skb);
 }
 
-static inline int ipmr_forward_finish(struct sk_buff *skb)
+static inline int ipmr_forward_finish(struct sock *sk, struct sk_buff *skb)
 {
        struct ip_options *opt = &(IPCB(skb)->opt);
 
@@ -1679,7 +1689,7 @@ static inline int ipmr_forward_finish(struct sk_buff *skb)
        if (unlikely(opt->optlen))
                ip_forward_options(skb);
 
-       return dst_output(skb);
+       return dst_output_sk(sk, skb);
 }
 
 /*
@@ -1696,7 +1706,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
        struct flowi4 fl4;
        int    encap = 0;
 
-       if (vif->dev == NULL)
+       if (!vif->dev)
                goto out_free;
 
 #ifdef CONFIG_IP_PIMSM
@@ -1778,7 +1788,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
         * not mrouter) cannot join to more than one interface - it will
         * result in receiving multiple packets.
         */
-       NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, dev,
+       NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, NULL, skb,
+               skb->dev, dev,
                ipmr_forward_finish);
        return;
 
@@ -1987,7 +1998,7 @@ int ip_mr_input(struct sk_buff *skb)
 
        /* already under rcu_read_lock() */
        cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
-       if (cache == NULL) {
+       if (!cache) {
                int vif = ipmr_find_vif(mrt, skb->dev);
 
                if (vif >= 0)
@@ -1998,13 +2009,13 @@ int ip_mr_input(struct sk_buff *skb)
        /*
         *      No usable cache entry
         */
-       if (cache == NULL) {
+       if (!cache) {
                int vif;
 
                if (local) {
                        struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
                        ip_local_deliver(skb);
-                       if (skb2 == NULL)
+                       if (!skb2)
                                return -ENOBUFS;
                        skb = skb2;
                }
@@ -2063,7 +2074,7 @@ static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
                reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
        read_unlock(&mrt_lock);
 
-       if (reg_dev == NULL)
+       if (!reg_dev)
                return 1;
 
        skb->mac_header = skb->network_header;
@@ -2193,18 +2204,18 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
        int err;
 
        mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
-       if (mrt == NULL)
+       if (!mrt)
                return -ENOENT;
 
        rcu_read_lock();
        cache = ipmr_cache_find(mrt, saddr, daddr);
-       if (cache == NULL && skb->dev) {
+       if (!cache && skb->dev) {
                int vif = ipmr_find_vif(mrt, skb->dev);
 
                if (vif >= 0)
                        cache = ipmr_cache_find_any(mrt, daddr, vif);
        }
-       if (cache == NULL) {
+       if (!cache) {
                struct sk_buff *skb2;
                struct iphdr *iph;
                struct net_device *dev;
@@ -2262,7 +2273,7 @@ static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
        int err;
 
        nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
-       if (nlh == NULL)
+       if (!nlh)
                return -EMSGSIZE;
 
        rtm = nlmsg_data(nlh);
@@ -2327,7 +2338,7 @@ static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
 
        skb = nlmsg_new(mroute_msgsize(mfc->mfc_parent >= MAXVIFS, mrt->maxvif),
                        GFP_ATOMIC);
-       if (skb == NULL)
+       if (!skb)
                goto errout;
 
        err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
@@ -2442,7 +2453,7 @@ static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
        struct mr_table *mrt;
 
        mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
-       if (mrt == NULL)
+       if (!mrt)
                return ERR_PTR(-ENOENT);
 
        iter->mrt = mrt;
@@ -2561,7 +2572,7 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
        struct mr_table *mrt;
 
        mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
-       if (mrt == NULL)
+       if (!mrt)
                return ERR_PTR(-ENOENT);
 
        it->mrt = mrt;
index 7ebd6e37875cc95b08d294ff64306925d05e550e..65de0684e22a17862663096da407eee16bc33a31 100644 (file)
@@ -94,7 +94,7 @@ static void nf_ip_saveroute(const struct sk_buff *skb,
 {
        struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
 
-       if (entry->hook == NF_INET_LOCAL_OUT) {
+       if (entry->state.hook == NF_INET_LOCAL_OUT) {
                const struct iphdr *iph = ip_hdr(skb);
 
                rt_info->tos = iph->tos;
@@ -109,7 +109,7 @@ static int nf_ip_reroute(struct sk_buff *skb,
 {
        const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
 
-       if (entry->hook == NF_INET_LOCAL_OUT) {
+       if (entry->state.hook == NF_INET_LOCAL_OUT) {
                const struct iphdr *iph = ip_hdr(skb);
 
                if (!(iph->tos == rt_info->tos &&
index f95b6f93814b95b2c810eff8d4573a996f9a9f63..13bfe84bf3ca5a6aafe6982b8782958b0cce529f 100644 (file)
@@ -248,8 +248,7 @@ struct arpt_entry *arpt_next_entry(const struct arpt_entry *entry)
 
 unsigned int arpt_do_table(struct sk_buff *skb,
                           unsigned int hook,
-                          const struct net_device *in,
-                          const struct net_device *out,
+                          const struct nf_hook_state *state,
                           struct xt_table *table)
 {
        static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
@@ -265,8 +264,8 @@ unsigned int arpt_do_table(struct sk_buff *skb,
        if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
                return NF_DROP;
 
-       indev = in ? in->name : nulldevname;
-       outdev = out ? out->name : nulldevname;
+       indev = state->in ? state->in->name : nulldevname;
+       outdev = state->out ? state->out->name : nulldevname;
 
        local_bh_disable();
        addend = xt_write_recseq_begin();
@@ -281,8 +280,8 @@ unsigned int arpt_do_table(struct sk_buff *skb,
        e = get_entry(table_base, private->hook_entry[hook]);
        back = get_entry(table_base, private->underflow[hook]);
 
-       acpar.in      = in;
-       acpar.out     = out;
+       acpar.in      = state->in;
+       acpar.out     = state->out;
        acpar.hooknum = hook;
        acpar.family  = NFPROTO_ARP;
        acpar.hotdrop = false;
index 802ddecb30b8110474da0e0a34c134aceaece43b..93876d03120ca85fbc1e5aaa689d245d4508f01e 100644 (file)
@@ -28,12 +28,11 @@ static const struct xt_table packet_filter = {
 /* The work comes in here from netfilter.c */
 static unsigned int
 arptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                    const struct net_device *in, const struct net_device *out,
-                    int (*okfn)(struct sk_buff *))
+                    const struct nf_hook_state *state)
 {
-       const struct net *net = dev_net((in != NULL) ? in : out);
+       const struct net *net = dev_net(state->in ? state->in : state->out);
 
-       return arpt_do_table(skb, ops->hooknum, in, out,
+       return arpt_do_table(skb, ops->hooknum, state,
                             net->ipv4.arptable_filter);
 }
 
index cf5e82f39d3b87d7f8163320bffbc26af38d6f98..c69db7fa25ee6376ee3f2bee87d4ce7f09105fb3 100644 (file)
@@ -288,8 +288,7 @@ struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
 unsigned int
 ipt_do_table(struct sk_buff *skb,
             unsigned int hook,
-            const struct net_device *in,
-            const struct net_device *out,
+            const struct nf_hook_state *state,
             struct xt_table *table)
 {
        static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
@@ -306,8 +305,8 @@ ipt_do_table(struct sk_buff *skb,
 
        /* Initialization */
        ip = ip_hdr(skb);
-       indev = in ? in->name : nulldevname;
-       outdev = out ? out->name : nulldevname;
+       indev = state->in ? state->in->name : nulldevname;
+       outdev = state->out ? state->out->name : nulldevname;
        /* We handle fragments by dealing with the first fragment as
         * if it was a normal packet.  All other fragments are treated
         * normally, except that they will NEVER match rules that ask
@@ -317,8 +316,8 @@ ipt_do_table(struct sk_buff *skb,
        acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
        acpar.thoff   = ip_hdrlen(skb);
        acpar.hotdrop = false;
-       acpar.in      = in;
-       acpar.out     = out;
+       acpar.in      = state->in;
+       acpar.out     = state->out;
        acpar.family  = NFPROTO_IPV4;
        acpar.hooknum = hook;
 
@@ -370,7 +369,7 @@ ipt_do_table(struct sk_buff *skb,
 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
                /* The packet is traced: log it */
                if (unlikely(skb->nf_trace))
-                       trace_packet(skb, hook, in, out,
+                       trace_packet(skb, hook, state->in, state->out,
                                     table->name, private, e);
 #endif
                /* Standard target? */
index f75e9df5e0179d3f8a40640dee61b355a3077784..771ab3d01ad3dc303ac999e539a9c8ab5845baf2 100644 (file)
@@ -504,14 +504,12 @@ static void arp_print(struct arp_payload *payload)
 static unsigned int
 arp_mangle(const struct nf_hook_ops *ops,
           struct sk_buff *skb,
-          const struct net_device *in,
-          const struct net_device *out,
-          int (*okfn)(struct sk_buff *))
+          const struct nf_hook_state *state)
 {
        struct arphdr *arp = arp_hdr(skb);
        struct arp_payload *payload;
        struct clusterip_config *c;
-       struct net *net = dev_net(in ? in : out);
+       struct net *net = dev_net(state->in ? state->in : state->out);
 
        /* we don't care about non-ethernet and non-ipv4 ARP */
        if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
@@ -536,10 +534,10 @@ arp_mangle(const struct nf_hook_ops *ops,
         * addresses on different interfacs.  However, in the CLUSTERIP case
         * this wouldn't work, since we didn't subscribe the mcast group on
         * other interfaces */
-       if (c->dev != out) {
+       if (c->dev != state->out) {
                pr_debug("not mangling arp reply on different "
                         "interface: cip'%s'-skb'%s'\n",
-                        c->dev->name, out->name);
+                        c->dev->name, state->out->name);
                clusterip_config_put(c);
                return NF_ACCEPT;
        }
index a313c3fbeb469e0594b2f7bccd788d687184bc61..e9e67793055fce9b20ee836275a3eb4a9437592a 100644 (file)
@@ -300,11 +300,9 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
 
 static unsigned int ipv4_synproxy_hook(const struct nf_hook_ops *ops,
                                       struct sk_buff *skb,
-                                      const struct net_device *in,
-                                      const struct net_device *out,
-                                      int (*okfn)(struct sk_buff *))
+                                      const struct nf_hook_state *nhs)
 {
-       struct synproxy_net *snet = synproxy_pernet(dev_net(in ? : out));
+       struct synproxy_net *snet = synproxy_pernet(dev_net(nhs->in ? : nhs->out));
        enum ip_conntrack_info ctinfo;
        struct nf_conn *ct;
        struct nf_conn_synproxy *synproxy;
index e08a74a243a85d125ccbd043314f06c6b528b368..a0f3beca52d2107b12ae748a4328d5491e7553c3 100644 (file)
@@ -34,8 +34,7 @@ static const struct xt_table packet_filter = {
 
 static unsigned int
 iptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                   const struct net_device *in, const struct net_device *out,
-                   int (*okfn)(struct sk_buff *))
+                   const struct nf_hook_state *state)
 {
        const struct net *net;
 
@@ -45,9 +44,8 @@ iptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
                /* root is playing with raw sockets. */
                return NF_ACCEPT;
 
-       net = dev_net((in != NULL) ? in : out);
-       return ipt_do_table(skb, ops->hooknum, in, out,
-                           net->ipv4.iptable_filter);
+       net = dev_net(state->in ? state->in : state->out);
+       return ipt_do_table(skb, ops->hooknum, state, net->ipv4.iptable_filter);
 }
 
 static struct nf_hook_ops *filter_ops __read_mostly;
index 6a5079c34bb363c34135e9bed5700a645f15b249..62cbb8c5f4a8f246428599186e0e9b498ae460a6 100644 (file)
@@ -37,8 +37,9 @@ static const struct xt_table packet_mangler = {
 };
 
 static unsigned int
-ipt_mangle_out(struct sk_buff *skb, const struct net_device *out)
+ipt_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state)
 {
+       struct net_device *out = state->out;
        unsigned int ret;
        const struct iphdr *iph;
        u_int8_t tos;
@@ -58,7 +59,7 @@ ipt_mangle_out(struct sk_buff *skb, const struct net_device *out)
        daddr = iph->daddr;
        tos = iph->tos;
 
-       ret = ipt_do_table(skb, NF_INET_LOCAL_OUT, NULL, out,
+       ret = ipt_do_table(skb, NF_INET_LOCAL_OUT, state,
                           dev_net(out)->ipv4.iptable_mangle);
        /* Reroute for ANY change. */
        if (ret != NF_DROP && ret != NF_STOLEN) {
@@ -81,18 +82,16 @@ ipt_mangle_out(struct sk_buff *skb, const struct net_device *out)
 static unsigned int
 iptable_mangle_hook(const struct nf_hook_ops *ops,
                     struct sk_buff *skb,
-                    const struct net_device *in,
-                    const struct net_device *out,
-                    int (*okfn)(struct sk_buff *))
+                    const struct nf_hook_state *state)
 {
        if (ops->hooknum == NF_INET_LOCAL_OUT)
-               return ipt_mangle_out(skb, out);
+               return ipt_mangle_out(skb, state);
        if (ops->hooknum == NF_INET_POST_ROUTING)
-               return ipt_do_table(skb, ops->hooknum, in, out,
-                                   dev_net(out)->ipv4.iptable_mangle);
+               return ipt_do_table(skb, ops->hooknum, state,
+                                   dev_net(state->out)->ipv4.iptable_mangle);
        /* PREROUTING/INPUT/FORWARD: */
-       return ipt_do_table(skb, ops->hooknum, in, out,
-                           dev_net(in)->ipv4.iptable_mangle);
+       return ipt_do_table(skb, ops->hooknum, state,
+                           dev_net(state->in)->ipv4.iptable_mangle);
 }
 
 static struct nf_hook_ops *mangle_ops __read_mostly;
index 6b67d7e9a75d69e95d9d25f9b8524bc58d9ef607..0d4d9cdf98a4c0dcb2da3fbebe9264efeb99d4c7 100644 (file)
@@ -30,49 +30,40 @@ static const struct xt_table nf_nat_ipv4_table = {
 
 static unsigned int iptable_nat_do_chain(const struct nf_hook_ops *ops,
                                         struct sk_buff *skb,
-                                        const struct net_device *in,
-                                        const struct net_device *out,
+                                        const struct nf_hook_state *state,
                                         struct nf_conn *ct)
 {
        struct net *net = nf_ct_net(ct);
 
-       return ipt_do_table(skb, ops->hooknum, in, out, net->ipv4.nat_table);
+       return ipt_do_table(skb, ops->hooknum, state, net->ipv4.nat_table);
 }
 
 static unsigned int iptable_nat_ipv4_fn(const struct nf_hook_ops *ops,
                                        struct sk_buff *skb,
-                                       const struct net_device *in,
-                                       const struct net_device *out,
-                                       int (*okfn)(struct sk_buff *))
+                                       const struct nf_hook_state *state)
 {
-       return nf_nat_ipv4_fn(ops, skb, in, out, iptable_nat_do_chain);
+       return nf_nat_ipv4_fn(ops, skb, state, iptable_nat_do_chain);
 }
 
 static unsigned int iptable_nat_ipv4_in(const struct nf_hook_ops *ops,
                                        struct sk_buff *skb,
-                                       const struct net_device *in,
-                                       const struct net_device *out,
-                                       int (*okfn)(struct sk_buff *))
+                                       const struct nf_hook_state *state)
 {
-       return nf_nat_ipv4_in(ops, skb, in, out, iptable_nat_do_chain);
+       return nf_nat_ipv4_in(ops, skb, state, iptable_nat_do_chain);
 }
 
 static unsigned int iptable_nat_ipv4_out(const struct nf_hook_ops *ops,
                                         struct sk_buff *skb,
-                                        const struct net_device *in,
-                                        const struct net_device *out,
-                                        int (*okfn)(struct sk_buff *))
+                                        const struct nf_hook_state *state)
 {
-       return nf_nat_ipv4_out(ops, skb, in, out, iptable_nat_do_chain);
+       return nf_nat_ipv4_out(ops, skb, state, iptable_nat_do_chain);
 }
 
 static unsigned int iptable_nat_ipv4_local_fn(const struct nf_hook_ops *ops,
                                              struct sk_buff *skb,
-                                             const struct net_device *in,
-                                             const struct net_device *out,
-                                             int (*okfn)(struct sk_buff *))
+                                             const struct nf_hook_state *state)
 {
-       return nf_nat_ipv4_local_fn(ops, skb, in, out, iptable_nat_do_chain);
+       return nf_nat_ipv4_local_fn(ops, skb, state, iptable_nat_do_chain);
 }
 
 static struct nf_hook_ops nf_nat_ipv4_ops[] __read_mostly = {
index b2f7e8f98316d2733e3936ead8dec20da2978f73..0356e6da4bb749ba1dcfa07667dcac7b0aa92878 100644 (file)
@@ -21,8 +21,7 @@ static const struct xt_table packet_raw = {
 /* The work comes in here from netfilter.c. */
 static unsigned int
 iptable_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                const struct net_device *in, const struct net_device *out,
-                int (*okfn)(struct sk_buff *))
+                const struct nf_hook_state *state)
 {
        const struct net *net;
 
@@ -32,8 +31,8 @@ iptable_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
                /* root is playing with raw sockets. */
                return NF_ACCEPT;
 
-       net = dev_net((in != NULL) ? in : out);
-       return ipt_do_table(skb, ops->hooknum, in, out, net->ipv4.iptable_raw);
+       net = dev_net(state->in ? state->in : state->out);
+       return ipt_do_table(skb, ops->hooknum, state, net->ipv4.iptable_raw);
 }
 
 static struct nf_hook_ops *rawtable_ops __read_mostly;
index c86647ed2078f660cf0e9f8b69957a6f4c79b1f6..4bce3980ccd935f891c55329c127478dc031ed77 100644 (file)
@@ -38,9 +38,7 @@ static const struct xt_table security_table = {
 
 static unsigned int
 iptable_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                     const struct net_device *in,
-                     const struct net_device *out,
-                     int (*okfn)(struct sk_buff *))
+                     const struct nf_hook_state *state)
 {
        const struct net *net;
 
@@ -50,8 +48,8 @@ iptable_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
                /* Somebody is playing with raw sockets. */
                return NF_ACCEPT;
 
-       net = dev_net((in != NULL) ? in : out);
-       return ipt_do_table(skb, ops->hooknum, in, out,
+       net = dev_net(state->in ? state->in : state->out);
+       return ipt_do_table(skb, ops->hooknum, state,
                            net->ipv4.iptable_security);
 }
 
index 8c8d6642cbb0211f2a14252d95158233b485c256..30ad9554b5e9931ad37329f0ffda6a8aacdf55d1 100644 (file)
@@ -94,9 +94,7 @@ static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
 
 static unsigned int ipv4_helper(const struct nf_hook_ops *ops,
                                struct sk_buff *skb,
-                               const struct net_device *in,
-                               const struct net_device *out,
-                               int (*okfn)(struct sk_buff *))
+                               const struct nf_hook_state *state)
 {
        struct nf_conn *ct;
        enum ip_conntrack_info ctinfo;
@@ -123,9 +121,7 @@ static unsigned int ipv4_helper(const struct nf_hook_ops *ops,
 
 static unsigned int ipv4_confirm(const struct nf_hook_ops *ops,
                                 struct sk_buff *skb,
-                                const struct net_device *in,
-                                const struct net_device *out,
-                                int (*okfn)(struct sk_buff *))
+                                const struct nf_hook_state *state)
 {
        struct nf_conn *ct;
        enum ip_conntrack_info ctinfo;
@@ -149,24 +145,20 @@ out:
 
 static unsigned int ipv4_conntrack_in(const struct nf_hook_ops *ops,
                                      struct sk_buff *skb,
-                                     const struct net_device *in,
-                                     const struct net_device *out,
-                                     int (*okfn)(struct sk_buff *))
+                                     const struct nf_hook_state *state)
 {
-       return nf_conntrack_in(dev_net(in), PF_INET, ops->hooknum, skb);
+       return nf_conntrack_in(dev_net(state->in), PF_INET, ops->hooknum, skb);
 }
 
 static unsigned int ipv4_conntrack_local(const struct nf_hook_ops *ops,
                                         struct sk_buff *skb,
-                                        const struct net_device *in,
-                                        const struct net_device *out,
-                                        int (*okfn)(struct sk_buff *))
+                                        const struct nf_hook_state *state)
 {
        /* root is playing with raw sockets. */
        if (skb->len < sizeof(struct iphdr) ||
            ip_hdrlen(skb) < sizeof(struct iphdr))
                return NF_ACCEPT;
-       return nf_conntrack_in(dev_net(out), PF_INET, ops->hooknum, skb);
+       return nf_conntrack_in(dev_net(state->out), PF_INET, ops->hooknum, skb);
 }
 
 /* Connection tracking may drop packets, but never alters them, so
index 7e5ca6f2d0cd57a7084cd9cd7386c9cdd40ac076..c88b7d4347187260e789612c44f34059092dc41d 100644 (file)
@@ -63,9 +63,7 @@ static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
 
 static unsigned int ipv4_conntrack_defrag(const struct nf_hook_ops *ops,
                                          struct sk_buff *skb,
-                                         const struct net_device *in,
-                                         const struct net_device *out,
-                                         int (*okfn)(struct sk_buff *))
+                                         const struct nf_hook_state *state)
 {
        struct sock *sk = skb->sk;
        struct inet_sock *inet = inet_sk(skb->sk);
index fc37711e11f38be40252086913ff5b913a40ed68..e59cc05c09e96c8f6996e5e0063c4d138d0dee11 100644 (file)
@@ -256,11 +256,10 @@ EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation);
 
 unsigned int
 nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
-              const struct net_device *in, const struct net_device *out,
+              const struct nf_hook_state *state,
               unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                        struct sk_buff *skb,
-                                       const struct net_device *in,
-                                       const struct net_device *out,
+                                       const struct nf_hook_state *state,
                                        struct nf_conn *ct))
 {
        struct nf_conn *ct;
@@ -309,7 +308,7 @@ nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
                if (!nf_nat_initialized(ct, maniptype)) {
                        unsigned int ret;
 
-                       ret = do_chain(ops, skb, in, out, ct);
+                       ret = do_chain(ops, skb, state, ct);
                        if (ret != NF_ACCEPT)
                                return ret;
 
@@ -323,7 +322,8 @@ nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
                        pr_debug("Already setup manip %s for ct %p\n",
                                 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
                                 ct);
-                       if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
+                       if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat,
+                                              state->out))
                                goto oif_changed;
                }
                break;
@@ -332,7 +332,7 @@ nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
                /* ESTABLISHED */
                NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
                             ctinfo == IP_CT_ESTABLISHED_REPLY);
-               if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
+               if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, state->out))
                        goto oif_changed;
        }
 
@@ -346,17 +346,16 @@ EXPORT_SYMBOL_GPL(nf_nat_ipv4_fn);
 
 unsigned int
 nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
-              const struct net_device *in, const struct net_device *out,
+              const struct nf_hook_state *state,
               unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                         struct sk_buff *skb,
-                                        const struct net_device *in,
-                                        const struct net_device *out,
+                                        const struct nf_hook_state *state,
                                         struct nf_conn *ct))
 {
        unsigned int ret;
        __be32 daddr = ip_hdr(skb)->daddr;
 
-       ret = nf_nat_ipv4_fn(ops, skb, in, out, do_chain);
+       ret = nf_nat_ipv4_fn(ops, skb, state, do_chain);
        if (ret != NF_DROP && ret != NF_STOLEN &&
            daddr != ip_hdr(skb)->daddr)
                skb_dst_drop(skb);
@@ -367,11 +366,10 @@ EXPORT_SYMBOL_GPL(nf_nat_ipv4_in);
 
 unsigned int
 nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
-               const struct net_device *in, const struct net_device *out,
+               const struct nf_hook_state *state,
                unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                          struct sk_buff *skb,
-                                         const struct net_device *in,
-                                         const struct net_device *out,
+                                         const struct nf_hook_state *state,
                                          struct nf_conn *ct))
 {
 #ifdef CONFIG_XFRM
@@ -386,7 +384,7 @@ nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
            ip_hdrlen(skb) < sizeof(struct iphdr))
                return NF_ACCEPT;
 
-       ret = nf_nat_ipv4_fn(ops, skb, in, out, do_chain);
+       ret = nf_nat_ipv4_fn(ops, skb, state, do_chain);
 #ifdef CONFIG_XFRM
        if (ret != NF_DROP && ret != NF_STOLEN &&
            !(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
@@ -410,11 +408,10 @@ EXPORT_SYMBOL_GPL(nf_nat_ipv4_out);
 
 unsigned int
 nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                    const struct net_device *in, const struct net_device *out,
+                    const struct nf_hook_state *state,
                     unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                               struct sk_buff *skb,
-                                              const struct net_device *in,
-                                              const struct net_device *out,
+                                              const struct nf_hook_state *state,
                                               struct nf_conn *ct))
 {
        const struct nf_conn *ct;
@@ -427,7 +424,7 @@ nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
            ip_hdrlen(skb) < sizeof(struct iphdr))
                return NF_ACCEPT;
 
-       ret = nf_nat_ipv4_fn(ops, skb, in, out, do_chain);
+       ret = nf_nat_ipv4_fn(ops, skb, state, do_chain);
        if (ret != NF_DROP && ret != NF_STOLEN &&
            (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
                enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
index c5b794da51a91fd8b2236379b532910cafc0568f..3262e41ff76f38a89db3fd7da8c771a51b273abf 100644 (file)
@@ -13,6 +13,7 @@
 #include <net/dst.h>
 #include <net/netfilter/ipv4/nf_reject.h>
 #include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_bridge.h>
 #include <net/netfilter/ipv4/nf_reject.h>
 
 const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
@@ -146,7 +147,8 @@ void nf_send_reset(struct sk_buff *oldskb, int hook)
         */
        if (oldskb->nf_bridge) {
                struct ethhdr *oeth = eth_hdr(oldskb);
-               nskb->dev = oldskb->nf_bridge->physindev;
+
+               nskb->dev = nf_bridge_get_physindev(oldskb);
                niph->tot_len = htons(nskb->len);
                ip_send_check(niph);
                if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
index 19412a4063fbe27de956f4d7c380b8e774d61aae..8412268bbad1852851c6fc3d5337d463d1ff1c51 100644 (file)
 static unsigned int
 nft_do_chain_arp(const struct nf_hook_ops *ops,
                  struct sk_buff *skb,
-                 const struct net_device *in,
-                 const struct net_device *out,
-                 int (*okfn)(struct sk_buff *))
+                 const struct nf_hook_state *state)
 {
        struct nft_pktinfo pkt;
 
-       nft_set_pktinfo(&pkt, ops, skb, in, out);
+       nft_set_pktinfo(&pkt, ops, skb, state);
 
        return nft_do_chain(&pkt, ops);
 }
index 6820c8c40842170039cfad8645ca72d88508daa1..aa180d3a69a5a196e65fdc46cad267944ef6c5cc 100644 (file)
 
 static unsigned int nft_do_chain_ipv4(const struct nf_hook_ops *ops,
                                      struct sk_buff *skb,
-                                     const struct net_device *in,
-                                     const struct net_device *out,
-                                     int (*okfn)(struct sk_buff *))
+                                     const struct nf_hook_state *state)
 {
        struct nft_pktinfo pkt;
 
-       nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+       nft_set_pktinfo_ipv4(&pkt, ops, skb, state);
 
        return nft_do_chain(&pkt, ops);
 }
 
 static unsigned int nft_ipv4_output(const struct nf_hook_ops *ops,
                                    struct sk_buff *skb,
-                                   const struct net_device *in,
-                                   const struct net_device *out,
-                                   int (*okfn)(struct sk_buff *))
+                                   const struct nf_hook_state *state)
 {
        if (unlikely(skb->len < sizeof(struct iphdr) ||
                     ip_hdr(skb)->ihl < sizeof(struct iphdr) / 4)) {
@@ -45,7 +41,7 @@ static unsigned int nft_ipv4_output(const struct nf_hook_ops *ops,
                return NF_ACCEPT;
        }
 
-       return nft_do_chain_ipv4(ops, skb, in, out, okfn);
+       return nft_do_chain_ipv4(ops, skb, state);
 }
 
 struct nft_af_info nft_af_ipv4 __read_mostly = {
index df547bf50078c4016c0e1924da77315cb1e6127a..bf5c30ae14e4e768b61dd758be2acef6e8a0aa86 100644 (file)
 
 static unsigned int nft_nat_do_chain(const struct nf_hook_ops *ops,
                                      struct sk_buff *skb,
-                                     const struct net_device *in,
-                                     const struct net_device *out,
+                                     const struct nf_hook_state *state,
                                      struct nf_conn *ct)
 {
        struct nft_pktinfo pkt;
 
-       nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+       nft_set_pktinfo_ipv4(&pkt, ops, skb, state);
 
        return nft_do_chain(&pkt, ops);
 }
 
 static unsigned int nft_nat_ipv4_fn(const struct nf_hook_ops *ops,
                                    struct sk_buff *skb,
-                                   const struct net_device *in,
-                                   const struct net_device *out,
-                                   int (*okfn)(struct sk_buff *))
+                                   const struct nf_hook_state *state)
 {
-       return nf_nat_ipv4_fn(ops, skb, in, out, nft_nat_do_chain);
+       return nf_nat_ipv4_fn(ops, skb, state, nft_nat_do_chain);
 }
 
 static unsigned int nft_nat_ipv4_in(const struct nf_hook_ops *ops,
                                    struct sk_buff *skb,
-                                   const struct net_device *in,
-                                   const struct net_device *out,
-                                   int (*okfn)(struct sk_buff *))
+                                   const struct nf_hook_state *state)
 {
-       return nf_nat_ipv4_in(ops, skb, in, out, nft_nat_do_chain);
+       return nf_nat_ipv4_in(ops, skb, state, nft_nat_do_chain);
 }
 
 static unsigned int nft_nat_ipv4_out(const struct nf_hook_ops *ops,
                                     struct sk_buff *skb,
-                                    const struct net_device *in,
-                                    const struct net_device *out,
-                                    int (*okfn)(struct sk_buff *))
+                                    const struct nf_hook_state *state)
 {
-       return nf_nat_ipv4_out(ops, skb, in, out, nft_nat_do_chain);
+       return nf_nat_ipv4_out(ops, skb, state, nft_nat_do_chain);
 }
 
 static unsigned int nft_nat_ipv4_local_fn(const struct nf_hook_ops *ops,
                                          struct sk_buff *skb,
-                                         const struct net_device *in,
-                                         const struct net_device *out,
-                                         int (*okfn)(struct sk_buff *))
+                                         const struct nf_hook_state *state)
 {
-       return nf_nat_ipv4_local_fn(ops, skb, in, out, nft_nat_do_chain);
+       return nf_nat_ipv4_local_fn(ops, skb, state, nft_nat_do_chain);
 }
 
 static const struct nf_chain_type nft_chain_nat_ipv4 = {
index 125b66766c0a8799f0137b12352ab2448a0f9979..e335b0afdaf33405f05804e72bcadf48cac5c3ea 100644 (file)
@@ -23,9 +23,7 @@
 
 static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
                                        struct sk_buff *skb,
-                                       const struct net_device *in,
-                                       const struct net_device *out,
-                                       int (*okfn)(struct sk_buff *))
+                                       const struct nf_hook_state *state)
 {
        unsigned int ret;
        struct nft_pktinfo pkt;
@@ -39,7 +37,7 @@ static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
            ip_hdrlen(skb) < sizeof(struct iphdr))
                return NF_ACCEPT;
 
-       nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+       nft_set_pktinfo_ipv4(&pkt, ops, skb, state);
 
        mark = skb->mark;
        iph = ip_hdr(skb);
index 16a5d4d73d7565807490074d9f4df6f0ab90b297..a7621faa96783f31cf178a36a8ac147bedac5f32 100644 (file)
@@ -33,6 +33,8 @@ static void nft_reject_ipv4_eval(const struct nft_expr *expr,
        case NFT_REJECT_TCP_RST:
                nf_send_reset(pkt->skb, pkt->ops->hooknum);
                break;
+       default:
+               break;
        }
 
        data[NFT_REG_VERDICT].verdict = NF_DROP;
index 344e7cdfb8d40a91b3d8fd0eff0da96a14370922..a93f260cf24ca0a9d60346dc085eb51afdb43927 100644 (file)
@@ -516,7 +516,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
                 ntohs(icmph->un.echo.sequence));
 
        sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
-       if (sk == NULL) {
+       if (!sk) {
                pr_debug("no socket, dropping\n");
                return; /* No socket for error */
        }
@@ -971,7 +971,7 @@ bool ping_rcv(struct sk_buff *skb)
        skb_push(skb, skb->data - (u8 *)icmph);
 
        sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
-       if (sk != NULL) {
+       if (sk) {
                struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
 
                pr_debug("rcv on socket %p\n", sk);
index 56946f47d44673c9f93ab4d5d04b0811b8491c8a..c0bb648fb2f98dc5804e413b211276033465a872 100644 (file)
@@ -293,7 +293,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
 
        read_lock(&raw_v4_hashinfo.lock);
        raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]);
-       if (raw_sk != NULL) {
+       if (raw_sk) {
                iph = (const struct iphdr *)skb->data;
                net = dev_net(skb->dev);
 
@@ -363,7 +363,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
        skb = sock_alloc_send_skb(sk,
                                  length + hlen + tlen + 15,
                                  flags & MSG_DONTWAIT, &err);
-       if (skb == NULL)
+       if (!skb)
                goto error;
        skb_reserve(skb, hlen);
 
@@ -412,8 +412,8 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
                icmp_out_count(net, ((struct icmphdr *)
                        skb_transport_header(skb))->type);
 
-       err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
-                     rt->dst.dev, dst_output);
+       err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, sk, skb,
+                     NULL, rt->dst.dev, dst_output_sk);
        if (err > 0)
                err = net_xmit_errno(err);
        if (err)
@@ -872,7 +872,7 @@ static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
 
                spin_lock_bh(&sk->sk_receive_queue.lock);
                skb = skb_peek(&sk->sk_receive_queue);
-               if (skb != NULL)
+               if (skb)
                        amount = skb->len;
                spin_unlock_bh(&sk->sk_receive_queue.lock);
                return put_user(amount, (int __user *)arg);
index 652b92ebd7badd3433282b96122da685d3f24eb1..a78540f28276771e4c8f35024d3ee133c31317ab 100644 (file)
@@ -1056,7 +1056,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
        __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
 
        rt = (struct rtable *)odst;
-       if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
+       if (odst->obsolete && !odst->ops->check(odst, 0)) {
                rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
                if (IS_ERR(rt))
                        goto out;
@@ -1450,7 +1450,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
 
        /* Primary sanity checks. */
 
-       if (in_dev == NULL)
+       if (!in_dev)
                return -EINVAL;
 
        if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
@@ -1553,7 +1553,7 @@ static int __mkroute_input(struct sk_buff *skb,
 
        /* get a working reference to the output device */
        out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
-       if (out_dev == NULL) {
+       if (!out_dev) {
                net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
                return -EINVAL;
        }
@@ -1591,7 +1591,7 @@ static int __mkroute_input(struct sk_buff *skb,
 
        fnhe = find_exception(&FIB_RES_NH(*res), daddr);
        if (do_cache) {
-               if (fnhe != NULL)
+               if (fnhe)
                        rth = rcu_dereference(fnhe->fnhe_rth_input);
                else
                        rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
@@ -2054,7 +2054,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
                     ipv4_is_lbcast(fl4->daddr))) {
                        /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
                        dev_out = __ip_dev_find(net, fl4->saddr, false);
-                       if (dev_out == NULL)
+                       if (!dev_out)
                                goto out;
 
                        /* Special hack: user can direct multicasts
@@ -2087,7 +2087,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
        if (fl4->flowi4_oif) {
                dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
                rth = ERR_PTR(-ENODEV);
-               if (dev_out == NULL)
+               if (!dev_out)
                        goto out;
 
                /* RACE: Check return value of inet_select_addr instead. */
@@ -2299,7 +2299,7 @@ static int rt_fill_info(struct net *net,  __be32 dst, __be32 src,
        u32 metrics[RTAX_MAX];
 
        nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags);
-       if (nlh == NULL)
+       if (!nlh)
                return -EMSGSIZE;
 
        r = nlmsg_data(nlh);
@@ -2421,7 +2421,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
        rtm = nlmsg_data(nlh);
 
        skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
-       if (skb == NULL) {
+       if (!skb) {
                err = -ENOBUFS;
                goto errout;
        }
@@ -2452,7 +2452,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
                struct net_device *dev;
 
                dev = __dev_get_by_index(net, iif);
-               if (dev == NULL) {
+               if (!dev) {
                        err = -ENODEV;
                        goto errout_free;
                }
@@ -2651,7 +2651,7 @@ static __net_init int sysctl_route_net_init(struct net *net)
        tbl = ipv4_route_flush_table;
        if (!net_eq(net, &init_net)) {
                tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
-               if (tbl == NULL)
+               if (!tbl)
                        goto err_dup;
 
                /* Don't export sysctls to unprivileged users */
@@ -2661,7 +2661,7 @@ static __net_init int sysctl_route_net_init(struct net *net)
        tbl[0].extra1 = net;
 
        net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
-       if (net->ipv4.route_hdr == NULL)
+       if (!net->ipv4.route_hdr)
                goto err_reg;
        return 0;
 
index fdf899163d4412af8bc1df82de74d410de2f7c15..c3852a7ff3c7630f4114cbc33a51a35fa3645e8c 100644 (file)
@@ -909,7 +909,7 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
                int i;
 
                table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
-               if (table == NULL)
+               if (!table)
                        goto err_alloc;
 
                /* Update the variables to point into the current struct net */
@@ -918,7 +918,7 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
        }
 
        net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
-       if (net->ipv4.ipv4_hdr == NULL)
+       if (!net->ipv4.ipv4_hdr)
                goto err_reg;
 
        net->ipv4.sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
@@ -956,7 +956,7 @@ static __init int sysctl_ipv4_init(void)
        struct ctl_table_header *hdr;
 
        hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
-       if (hdr == NULL)
+       if (!hdr)
                return -ENOMEM;
 
        if (register_pernet_subsys(&ipv4_sysctl_ops)) {
index dbd51cefaf023a452894055d1e50d78218aedee9..094a6822c71d8cc69b1be28a9c6bb511f8f8b87b 100644 (file)
@@ -496,7 +496,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
 
        /* Connected or passive Fast Open socket? */
        if (sk->sk_state != TCP_SYN_SENT &&
-           (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk != NULL)) {
+           (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk)) {
                int target = sock_rcvlowat(sk, 0, INT_MAX);
 
                if (tp->urg_seq == tp->copied_seq &&
@@ -1028,7 +1028,7 @@ static inline int select_size(const struct sock *sk, bool sg)
 
 void tcp_free_fastopen_req(struct tcp_sock *tp)
 {
-       if (tp->fastopen_req != NULL) {
+       if (tp->fastopen_req) {
                kfree(tp->fastopen_req);
                tp->fastopen_req = NULL;
        }
@@ -1042,12 +1042,12 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
 
        if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
                return -EOPNOTSUPP;
-       if (tp->fastopen_req != NULL)
+       if (tp->fastopen_req)
                return -EALREADY; /* Another Fast Open is in progress */
 
        tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request),
                                   sk->sk_allocation);
-       if (unlikely(tp->fastopen_req == NULL))
+       if (unlikely(!tp->fastopen_req))
                return -ENOBUFS;
        tp->fastopen_req->data = msg;
        tp->fastopen_req->size = size;
@@ -2138,7 +2138,7 @@ adjudge_to_death:
                 * aborted (e.g., closed with unread data) before 3WHS
                 * finishes.
                 */
-               if (req != NULL)
+               if (req)
                        reqsk_fastopen_remove(sk, req, false);
                inet_csk_destroy_sock(sk);
        }
@@ -2776,7 +2776,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
                break;
 
        case TCP_FASTOPEN:
-               if (icsk->icsk_accept_queue.fastopenq != NULL)
+               if (icsk->icsk_accept_queue.fastopenq)
                        val = icsk->icsk_accept_queue.fastopenq->max_qlen;
                else
                        val = 0;
@@ -2960,7 +2960,7 @@ void tcp_done(struct sock *sk)
 
        tcp_set_state(sk, TCP_CLOSE);
        tcp_clear_xmit_timers(sk);
-       if (req != NULL)
+       if (req)
                reqsk_fastopen_remove(sk, req, false);
 
        sk->sk_shutdown = SHUTDOWN_MASK;
index 86dc119a38156b794c10ef5084ebb92b4ccde152..79b34a0f4a4ae519c3f66c511c989b92fca02b09 100644 (file)
@@ -29,7 +29,7 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
                r->idiag_rqueue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
                r->idiag_wqueue = tp->write_seq - tp->snd_una;
        }
-       if (info != NULL)
+       if (info)
                tcp_get_info(sk, info);
 }
 
index 2eb887ec0ce3ba9b69b58b7a4681172280f5896e..e3d87aca6be8fafe02bec5a8f862a88a6fe79d50 100644 (file)
@@ -141,7 +141,7 @@ static bool tcp_fastopen_create_child(struct sock *sk,
        req->sk = NULL;
 
        child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
-       if (child == NULL)
+       if (!child)
                return false;
 
        spin_lock(&queue->fastopenq->lock);
@@ -214,7 +214,7 @@ static bool tcp_fastopen_create_child(struct sock *sk,
        sk->sk_data_ready(sk);
        bh_unlock_sock(child);
        sock_put(child);
-       WARN_ON(req->sk == NULL);
+       WARN_ON(!req->sk);
        return true;
 }
 
@@ -233,7 +233,7 @@ static bool tcp_fastopen_queue_check(struct sock *sk)
         * temporarily vs a server not supporting Fast Open at all.
         */
        fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
-       if (fastopenq == NULL || fastopenq->max_qlen == 0)
+       if (!fastopenq || fastopenq->max_qlen == 0)
                return false;
 
        if (fastopenq->qlen >= fastopenq->max_qlen) {
@@ -303,6 +303,7 @@ fastopen:
        } else if (foc->len > 0) /* Client presents an invalid cookie */
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
 
+       valid_foc.exp = foc->exp;
        *foc = valid_foc;
        return false;
 }
index 18b80e8bc5336564560b7897a939bbbb2d83e5ed..031cf72cd05c8094de8a9a76cd4cffa13e45c0d5 100644 (file)
@@ -866,7 +866,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
 /* This must be called before lost_out is incremented */
 static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
 {
-       if ((tp->retransmit_skb_hint == NULL) ||
+       if (!tp->retransmit_skb_hint ||
            before(TCP_SKB_CB(skb)->seq,
                   TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
                tp->retransmit_skb_hint = skb;
@@ -1256,7 +1256,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
                fack_count += pcount;
 
                /* Lost marker hint past SACKed? Tweak RFC3517 cnt */
-               if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) &&
+               if (!tcp_is_fack(tp) && tp->lost_skb_hint &&
                    before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq))
                        tp->lost_cnt_hint += pcount;
 
@@ -1535,7 +1535,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
                if (!before(TCP_SKB_CB(skb)->seq, end_seq))
                        break;
 
-               if ((next_dup != NULL) &&
+               if (next_dup  &&
                    before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) {
                        in_sack = tcp_match_skb_to_sack(sk, skb,
                                                        next_dup->start_seq,
@@ -1551,7 +1551,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
                if (in_sack <= 0) {
                        tmp = tcp_shift_skb_data(sk, skb, state,
                                                 start_seq, end_seq, dup_sack);
-                       if (tmp != NULL) {
+                       if (tmp) {
                                if (tmp != skb) {
                                        skb = tmp;
                                        continue;
@@ -1614,7 +1614,7 @@ static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
                                                struct tcp_sacktag_state *state,
                                                u32 skip_to_seq)
 {
-       if (next_dup == NULL)
+       if (!next_dup)
                return skb;
 
        if (before(next_dup->start_seq, skip_to_seq)) {
@@ -1783,7 +1783,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
                        if (tcp_highest_sack_seq(tp) == cache->end_seq) {
                                /* ...but better entrypoint exists! */
                                skb = tcp_highest_sack(sk);
-                               if (skb == NULL)
+                               if (!skb)
                                        break;
                                state.fack_count = tp->fackets_out;
                                cache++;
@@ -1798,7 +1798,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
 
                if (!before(start_seq, tcp_highest_sack_seq(tp))) {
                        skb = tcp_highest_sack(sk);
-                       if (skb == NULL)
+                       if (!skb)
                                break;
                        state.fack_count = tp->fackets_out;
                }
@@ -3105,10 +3105,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
                        if (!first_ackt.v64)
                                first_ackt = last_ackt;
 
-                       if (!(sacked & TCPCB_SACKED_ACKED))
+                       if (!(sacked & TCPCB_SACKED_ACKED)) {
                                reord = min(pkts_acked, reord);
-                       if (!after(scb->end_seq, tp->high_seq))
-                               flag |= FLAG_ORIG_SACK_ACKED;
+                               if (!after(scb->end_seq, tp->high_seq))
+                                       flag |= FLAG_ORIG_SACK_ACKED;
+                       }
                }
 
                if (sacked & TCPCB_SACKED_ACKED)
@@ -3602,6 +3603,23 @@ old_ack:
        return 0;
 }
 
+static void tcp_parse_fastopen_option(int len, const unsigned char *cookie,
+                                     bool syn, struct tcp_fastopen_cookie *foc,
+                                     bool exp_opt)
+{
+       /* Valid only in SYN or SYN-ACK with an even length.  */
+       if (!foc || !syn || len < 0 || (len & 1))
+               return;
+
+       if (len >= TCP_FASTOPEN_COOKIE_MIN &&
+           len <= TCP_FASTOPEN_COOKIE_MAX)
+               memcpy(foc->val, cookie, len);
+       else if (len != 0)
+               len = -1;
+       foc->len = len;
+       foc->exp = exp_opt;
+}
+
 /* Look for tcp options. Normally only called on SYN and SYNACK packets.
  * But, this can also be called on packets in the established flow when
  * the fast version below fails.
@@ -3691,21 +3709,22 @@ void tcp_parse_options(const struct sk_buff *skb,
                                 */
                                break;
 #endif
+                       case TCPOPT_FASTOPEN:
+                               tcp_parse_fastopen_option(
+                                       opsize - TCPOLEN_FASTOPEN_BASE,
+                                       ptr, th->syn, foc, false);
+                               break;
+
                        case TCPOPT_EXP:
                                /* Fast Open option shares code 254 using a
-                                * 16 bits magic number. It's valid only in
-                                * SYN or SYN-ACK with an even size.
+                                * 16 bits magic number.
                                 */
-                               if (opsize < TCPOLEN_EXP_FASTOPEN_BASE ||
-                                   get_unaligned_be16(ptr) != TCPOPT_FASTOPEN_MAGIC ||
-                                   foc == NULL || !th->syn || (opsize & 1))
-                                       break;
-                               foc->len = opsize - TCPOLEN_EXP_FASTOPEN_BASE;
-                               if (foc->len >= TCP_FASTOPEN_COOKIE_MIN &&
-                                   foc->len <= TCP_FASTOPEN_COOKIE_MAX)
-                                       memcpy(foc->val, ptr + 2, foc->len);
-                               else if (foc->len != 0)
-                                       foc->len = -1;
+                               if (opsize >= TCPOLEN_EXP_FASTOPEN_BASE &&
+                                   get_unaligned_be16(ptr) ==
+                                   TCPOPT_FASTOPEN_MAGIC)
+                                       tcp_parse_fastopen_option(opsize -
+                                               TCPOLEN_EXP_FASTOPEN_BASE,
+                                               ptr + 2, th->syn, foc, true);
                                break;
 
                        }
@@ -4669,7 +4688,7 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
        struct sk_buff *head;
        u32 start, end;
 
-       if (skb == NULL)
+       if (!skb)
                return;
 
        start = TCP_SKB_CB(skb)->seq;
@@ -5124,7 +5143,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       if (unlikely(sk->sk_rx_dst == NULL))
+       if (unlikely(!sk->sk_rx_dst))
                inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
        /*
         *      Header prediction.
@@ -5321,7 +5340,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
 
        tcp_set_state(sk, TCP_ESTABLISHED);
 
-       if (skb != NULL) {
+       if (skb) {
                icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
                security_inet_conn_established(sk, skb);
        }
@@ -5359,8 +5378,8 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *data = tp->syn_data ? tcp_write_queue_head(sk) : NULL;
-       u16 mss = tp->rx_opt.mss_clamp;
-       bool syn_drop;
+       u16 mss = tp->rx_opt.mss_clamp, try_exp = 0;
+       bool syn_drop = false;
 
        if (mss == tp->rx_opt.user_mss) {
                struct tcp_options_received opt;
@@ -5372,16 +5391,25 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
                mss = opt.mss_clamp;
        }
 
-       if (!tp->syn_fastopen)  /* Ignore an unsolicited cookie */
+       if (!tp->syn_fastopen) {
+               /* Ignore an unsolicited cookie */
                cookie->len = -1;
+       } else if (tp->total_retrans) {
+               /* SYN timed out and the SYN-ACK neither has a cookie nor
+                * acknowledges data. Presumably the remote received only
+                * the retransmitted (regular) SYNs: either the original
+                * SYN-data or the corresponding SYN-ACK was dropped.
+                */
+               syn_drop = (cookie->len < 0 && data);
+       } else if (cookie->len < 0 && !tp->syn_data) {
+               /* We requested a cookie but didn't get it. If we did not use
+                * the (old) exp opt format then try so next time (try_exp=1).
+                * Otherwise we go back to use the RFC7413 opt (try_exp=2).
+                */
+               try_exp = tp->syn_fastopen_exp ? 2 : 1;
+       }
 
-       /* The SYN-ACK neither has cookie nor acknowledges the data. Presumably
-        * the remote receives only the retransmitted (regular) SYNs: either
-        * the original SYN-data or the corresponding SYN-ACK is lost.
-        */
-       syn_drop = (cookie->len <= 0 && data && tp->total_retrans);
-
-       tcp_fastopen_cache_set(sk, mss, cookie, syn_drop);
+       tcp_fastopen_cache_set(sk, mss, cookie, syn_drop, try_exp);
 
        if (data) { /* Retransmit unacked data in SYN */
                tcp_for_write_queue_from(data, sk) {
@@ -5690,11 +5718,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
        }
 
        req = tp->fastopen_rsk;
-       if (req != NULL) {
+       if (req) {
                WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
                    sk->sk_state != TCP_FIN_WAIT1);
 
-               if (tcp_check_req(sk, skb, req, true) == NULL)
+               if (!tcp_check_req(sk, skb, req, true))
                        goto discard;
        }
 
@@ -5780,7 +5808,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                 * ACK we have received, this would have acknowledged
                 * our SYNACK so stop the SYNACK timer.
                 */
-               if (req != NULL) {
+               if (req) {
                        /* Return RST if ack_seq is invalid.
                         * Note that RFC793 only says to generate a
                         * DUPACK for it but for TCP Fast Open it seems
index 5aababa20a212068d7ef5acf74c85ddb3d99f61f..37578d52897e58942b6e6b8ef4e8db6f0f245b85 100644 (file)
@@ -122,7 +122,7 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
           and use initial timestamp retrieved from peer table.
         */
        if (tcptw->tw_ts_recent_stamp &&
-           (twp == NULL || (sysctl_tcp_tw_reuse &&
+           (!twp || (sysctl_tcp_tw_reuse &&
                             get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
                tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
                if (tp->write_seq == 0)
@@ -494,7 +494,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
                /* Only in fast or simultaneous open. If a fast open socket is
                 * is already accepted it is treated as a connected one below.
                 */
-               if (fastopen && fastopen->sk == NULL)
+               if (fastopen && !fastopen->sk)
                        break;
 
                if (!sock_owned_by_user(sk)) {
@@ -897,9 +897,9 @@ EXPORT_SYMBOL(tcp_md5_do_lookup);
 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
                                         const struct sock *addr_sk)
 {
-       union tcp_md5_addr *addr;
+       const union tcp_md5_addr *addr;
 
-       addr = (union tcp_md5_addr *)&sk->sk_daddr;
+       addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
        return tcp_md5_do_lookup(sk, addr, AF_INET);
 }
 EXPORT_SYMBOL(tcp_v4_md5_lookup);
@@ -1305,7 +1305,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
        /* Copy over the MD5 key from the original socket */
        key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
                                AF_INET);
-       if (key != NULL) {
+       if (key) {
                /*
                 * We're using one, so create a matching key
                 * on the newsk structure. If we fail to get
@@ -1390,7 +1390,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
                sk_mark_napi_id(sk, skb);
                if (dst) {
                        if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
-                           dst->ops->check(dst, 0) == NULL) {
+                           !dst->ops->check(dst, 0)) {
                                dst_release(dst);
                                sk->sk_rx_dst = NULL;
                        }
@@ -1469,7 +1469,7 @@ void tcp_v4_early_demux(struct sk_buff *skb)
                skb->sk = sk;
                skb->destructor = sock_edemux;
                if (sk_fullsock(sk)) {
-                       struct dst_entry *dst = sk->sk_rx_dst;
+                       struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
 
                        if (dst)
                                dst = dst_check(dst, 0);
@@ -1797,7 +1797,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
        if (inet_csk(sk)->icsk_bind_hash)
                inet_put_port(sk);
 
-       BUG_ON(tp->fastopen_rsk != NULL);
+       BUG_ON(tp->fastopen_rsk);
 
        /* If socket is aborted during connect operation */
        tcp_free_fastopen_req(tp);
index 71ec14c87579337a2cb9dc4752ebe37c189e1e20..a51d63a43e33af5fc751e4f0f3369b9394776975 100644 (file)
@@ -28,7 +28,8 @@ static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *s
 
 struct tcp_fastopen_metrics {
        u16     mss;
-       u16     syn_loss:10;            /* Recurring Fast Open SYN losses */
+       u16     syn_loss:10,            /* Recurring Fast Open SYN losses */
+               try_exp:2;              /* Request w/ exp. option (once) */
        unsigned long   last_syn_loss;  /* Last Fast Open SYN loss */
        struct  tcp_fastopen_cookie     cookie;
 };
@@ -131,6 +132,8 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm,
        if (fastopen_clear) {
                tm->tcpm_fastopen.mss = 0;
                tm->tcpm_fastopen.syn_loss = 0;
+               tm->tcpm_fastopen.try_exp = 0;
+               tm->tcpm_fastopen.cookie.exp = false;
                tm->tcpm_fastopen.cookie.len = 0;
        }
 }
@@ -505,7 +508,7 @@ void tcp_init_metrics(struct sock *sk)
        struct tcp_metrics_block *tm;
        u32 val, crtt = 0; /* cached RTT scaled by 8 */
 
-       if (dst == NULL)
+       if (!dst)
                goto reset;
 
        dst_confirm(dst);
@@ -713,6 +716,8 @@ void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
                        if (tfom->mss)
                                *mss = tfom->mss;
                        *cookie = tfom->cookie;
+                       if (cookie->len <= 0 && tfom->try_exp == 1)
+                               cookie->exp = true;
                        *syn_loss = tfom->syn_loss;
                        *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
                } while (read_seqretry(&fastopen_seqlock, seq));
@@ -721,7 +726,8 @@ void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
 }
 
 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
-                           struct tcp_fastopen_cookie *cookie, bool syn_lost)
+                           struct tcp_fastopen_cookie *cookie, bool syn_lost,
+                           u16 try_exp)
 {
        struct dst_entry *dst = __sk_dst_get(sk);
        struct tcp_metrics_block *tm;
@@ -738,6 +744,9 @@ void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
                        tfom->mss = mss;
                if (cookie && cookie->len > 0)
                        tfom->cookie = *cookie;
+               else if (try_exp > tfom->try_exp &&
+                        tfom->cookie.len <= 0 && !tfom->cookie.exp)
+                       tfom->try_exp = try_exp;
                if (syn_lost) {
                        ++tfom->syn_loss;
                        tfom->last_syn_loss = jiffies;
index 274e96fb369b99ece9727db859503a2384fd5ad6..2088fdcca14140f23aa01f60a1675c116f734a57 100644 (file)
@@ -294,7 +294,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
        if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
                tw = inet_twsk_alloc(sk, state);
 
-       if (tw != NULL) {
+       if (tw) {
                struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
                const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
                struct inet_sock *inet = inet_sk(sk);
@@ -332,7 +332,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
                        struct tcp_md5sig_key *key;
                        tcptw->tw_md5_key = NULL;
                        key = tp->af_specific->md5_lookup(sk, sk);
-                       if (key != NULL) {
+                       if (key) {
                                tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
                                if (tcptw->tw_md5_key && !tcp_alloc_md5sig_pool())
                                        BUG();
@@ -454,7 +454,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
 {
        struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
 
-       if (newsk != NULL) {
+       if (newsk) {
                const struct inet_request_sock *ireq = inet_rsk(req);
                struct tcp_request_sock *treq = tcp_rsk(req);
                struct inet_connection_sock *newicsk = inet_csk(newsk);
@@ -628,10 +628,16 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
                                          LINUX_MIB_TCPACKSKIPPEDSYNRECV,
                                          &tcp_rsk(req)->last_oow_ack_time) &&
 
-                   !inet_rtx_syn_ack(sk, req))
-                       mod_timer_pending(&req->rsk_timer, jiffies +
-                               min(TCP_TIMEOUT_INIT << req->num_timeout,
-                                   TCP_RTO_MAX));
+                   !inet_rtx_syn_ack(sk, req)) {
+                       unsigned long expires = jiffies;
+
+                       expires += min(TCP_TIMEOUT_INIT << req->num_timeout,
+                                      TCP_RTO_MAX);
+                       if (!fastopen)
+                               mod_timer_pending(&req->rsk_timer, expires);
+                       else
+                               req->rsk_timer.expires = expires;
+               }
                return NULL;
        }
 
@@ -763,7 +769,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
         * socket is created, wait for troubles.
         */
        child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
-       if (child == NULL)
+       if (!child)
                goto listen_overflow;
 
        inet_csk_reqsk_queue_unlink(sk, req);
index 2e69b8d16e6827ae3e9aa620a9500e495f9af191..e662d85d1635d0269b669bb0f726760be3bae0d2 100644 (file)
@@ -518,17 +518,26 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
 
        if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
                struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
+               u8 *p = (u8 *)ptr;
+               u32 len; /* Fast Open option length */
+
+               if (foc->exp) {
+                       len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
+                       *ptr = htonl((TCPOPT_EXP << 24) | (len << 16) |
+                                    TCPOPT_FASTOPEN_MAGIC);
+                       p += TCPOLEN_EXP_FASTOPEN_BASE;
+               } else {
+                       len = TCPOLEN_FASTOPEN_BASE + foc->len;
+                       *p++ = TCPOPT_FASTOPEN;
+                       *p++ = len;
+               }
 
-               *ptr++ = htonl((TCPOPT_EXP << 24) |
-                              ((TCPOLEN_EXP_FASTOPEN_BASE + foc->len) << 16) |
-                              TCPOPT_FASTOPEN_MAGIC);
-
-               memcpy(ptr, foc->val, foc->len);
-               if ((foc->len & 3) == 2) {
-                       u8 *align = ((u8 *)ptr) + foc->len;
-                       align[0] = align[1] = TCPOPT_NOP;
+               memcpy(p, foc->val, foc->len);
+               if ((len & 3) == 2) {
+                       p[foc->len] = TCPOPT_NOP;
+                       p[foc->len + 1] = TCPOPT_NOP;
                }
-               ptr += (foc->len + 3) >> 2;
+               ptr += (len + 3) >> 2;
        }
 }
 
@@ -565,7 +574,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
        opts->mss = tcp_advertise_mss(sk);
        remaining -= TCPOLEN_MSS_ALIGNED;
 
-       if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
+       if (likely(sysctl_tcp_timestamps && !*md5)) {
                opts->options |= OPTION_TS;
                opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
                opts->tsecr = tp->rx_opt.ts_recent;
@@ -583,13 +592,17 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
        }
 
        if (fastopen && fastopen->cookie.len >= 0) {
-               u32 need = TCPOLEN_EXP_FASTOPEN_BASE + fastopen->cookie.len;
+               u32 need = fastopen->cookie.len;
+
+               need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE :
+                                              TCPOLEN_FASTOPEN_BASE;
                need = (need + 3) & ~3U;  /* Align to 32 bits */
                if (remaining >= need) {
                        opts->options |= OPTION_FAST_OPEN_COOKIE;
                        opts->fastopen_cookie = &fastopen->cookie;
                        remaining -= need;
                        tp->syn_fastopen = 1;
+                       tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0;
                }
        }
 
@@ -642,7 +655,10 @@ static unsigned int tcp_synack_options(struct sock *sk,
                        remaining -= TCPOLEN_SACKPERM_ALIGNED;
        }
        if (foc != NULL && foc->len >= 0) {
-               u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
+               u32 need = foc->len;
+
+               need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE :
+                                  TCPOLEN_FASTOPEN_BASE;
                need = (need + 3) & ~3U;  /* Align to 32 bits */
                if (remaining >= need) {
                        opts->options |= OPTION_FAST_OPEN_COOKIE;
@@ -1148,7 +1164,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
 
        /* Get a new skb... force flag on. */
        buff = sk_stream_alloc_skb(sk, nsize, gfp);
-       if (buff == NULL)
+       if (!buff)
                return -ENOMEM; /* We'll just try again later. */
 
        sk->sk_wmem_queued += buff->truesize;
@@ -1707,7 +1723,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
                return tcp_fragment(sk, skb, len, mss_now, gfp);
 
        buff = sk_stream_alloc_skb(sk, 0, gfp);
-       if (unlikely(buff == NULL))
+       if (unlikely(!buff))
                return -ENOMEM;
 
        sk->sk_wmem_queued += buff->truesize;
@@ -1925,7 +1941,8 @@ static int tcp_mtu_probe(struct sock *sk)
        }
 
        /* We're allowed to probe.  Build it now. */
-       if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
+       nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC);
+       if (!nskb)
                return -1;
        sk->sk_wmem_queued += nskb->truesize;
        sk_mem_charge(sk, nskb->truesize);
@@ -2223,7 +2240,7 @@ void tcp_send_loss_probe(struct sock *sk)
        int mss = tcp_current_mss(sk);
        int err = -1;
 
-       if (tcp_send_head(sk) != NULL) {
+       if (tcp_send_head(sk)) {
                err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
                goto rearm_timer;
        }
@@ -2733,7 +2750,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
                if (skb == tcp_send_head(sk))
                        break;
                /* we could do better than to assign each time */
-               if (hole == NULL)
+               if (!hole)
                        tp->retransmit_skb_hint = skb;
 
                /* Assume this retransmit will generate
@@ -2757,7 +2774,7 @@ begin_fwd:
                        if (!tcp_can_forward_retransmit(sk))
                                break;
                        /* Backtrack if necessary to non-L'ed skb */
-                       if (hole != NULL) {
+                       if (hole) {
                                skb = hole;
                                hole = NULL;
                        }
@@ -2765,7 +2782,7 @@ begin_fwd:
                        goto begin_fwd;
 
                } else if (!(sacked & TCPCB_LOST)) {
-                       if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
+                       if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
                                hole = skb;
                        continue;
 
@@ -2810,7 +2827,7 @@ void tcp_send_fin(struct sock *sk)
         */
        mss_now = tcp_current_mss(sk);
 
-       if (tcp_send_head(sk) != NULL) {
+       if (tcp_send_head(sk)) {
                TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
                TCP_SKB_CB(skb)->end_seq++;
                tp->write_seq++;
@@ -2868,14 +2885,14 @@ int tcp_send_synack(struct sock *sk)
        struct sk_buff *skb;
 
        skb = tcp_write_queue_head(sk);
-       if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
+       if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
                pr_debug("%s: wrong queue state\n", __func__);
                return -EFAULT;
        }
        if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
                if (skb_cloned(skb)) {
                        struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
-                       if (nskb == NULL)
+                       if (!nskb)
                                return -ENOMEM;
                        tcp_unlink_write_queue(skb, sk);
                        __skb_header_release(nskb);
@@ -3014,7 +3031,7 @@ static void tcp_connect_init(struct sock *sk)
                (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
 
 #ifdef CONFIG_TCP_MD5SIG
-       if (tp->af_specific->md5_lookup(sk, sk) != NULL)
+       if (tp->af_specific->md5_lookup(sk, sk))
                tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
 #endif
 
@@ -3300,7 +3317,7 @@ void tcp_send_ack(struct sock *sk)
         * sock.
         */
        buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
-       if (buff == NULL) {
+       if (!buff) {
                inet_csk_schedule_ack(sk);
                inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
@@ -3344,7 +3361,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
 
        /* We don't queue it, tcp_transmit_skb() sets ownership. */
        skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
-       if (skb == NULL)
+       if (!skb)
                return -1;
 
        /* Reserve space for headers and set control bits. */
@@ -3375,8 +3392,8 @@ int tcp_write_wakeup(struct sock *sk)
        if (sk->sk_state == TCP_CLOSE)
                return -1;
 
-       if ((skb = tcp_send_head(sk)) != NULL &&
-           before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
+       skb = tcp_send_head(sk);
+       if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
                int err;
                unsigned int mss = tcp_current_mss(sk);
                unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
index 2568fd282873b7436ca2299e20c283e1affd8688..8c65dc147d8bcfb58e14c20b774711ffbcc30d5a 100644 (file)
@@ -167,7 +167,7 @@ static int tcp_write_timeout(struct sock *sk)
                if (icsk->icsk_retransmits) {
                        dst_negative_advice(sk);
                        if (tp->syn_fastopen || tp->syn_data)
-                               tcp_fastopen_cache_set(sk, 0, NULL, true);
+                               tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
                        if (tp->syn_data)
                                NET_INC_STATS_BH(sock_net(sk),
                                                 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
index 294af16633af63d87f5dfd2b1e28b15ce799fa61..d10b7e0112ebdb8fa61c650725ae7fae68f7e669 100644 (file)
@@ -433,7 +433,6 @@ static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
                              udp_ehash_secret + net_hash_mix(net));
 }
 
-
 /* called with read_rcu_lock() */
 static struct sock *udp4_lib_lookup2(struct net *net,
                __be32 saddr, __be16 sport,
@@ -633,7 +632,7 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
 
        sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
                        iph->saddr, uh->source, skb->dev->ifindex, udptable);
-       if (sk == NULL) {
+       if (!sk) {
                ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
                return; /* No socket for error */
        }
@@ -1011,7 +1010,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        if (connected)
                rt = (struct rtable *)sk_dst_check(sk, 0);
 
-       if (rt == NULL) {
+       if (!rt) {
                struct net *net = sock_net(sk);
 
                fl4 = &fl4_stack;
@@ -1171,7 +1170,6 @@ out:
        return ret;
 }
 
-
 /**
  *     first_packet_length     - return length of first packet in receive queue
  *     @sk: socket
@@ -1355,7 +1353,6 @@ csum_copy_err:
        goto try_again;
 }
 
-
 int udp_disconnect(struct sock *sk, int flags)
 {
        struct inet_sock *inet = inet_sk(sk);
@@ -1522,7 +1519,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 
                /* if we're overly short, let UDP handle it */
                encap_rcv = ACCESS_ONCE(up->encap_rcv);
-               if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) {
+               if (skb->len > sizeof(struct udphdr) && encap_rcv) {
                        int ret;
 
                        /* Verify checksum before giving to encap */
@@ -1579,7 +1576,6 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
            udp_lib_checksum_complete(skb))
                goto csum_error;
 
-
        if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
                UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
                                 is_udplite);
@@ -1609,7 +1605,6 @@ drop:
        return -1;
 }
 
-
 static void flush_stack(struct sock **stack, unsigned int count,
                        struct sk_buff *skb, unsigned int final)
 {
@@ -1619,7 +1614,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
 
        for (i = 0; i < count; i++) {
                sk = stack[i];
-               if (likely(skb1 == NULL))
+               if (likely(!skb1))
                        skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
 
                if (!skb1) {
@@ -1802,7 +1797,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
                                                saddr, daddr, udptable, proto);
 
        sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
-       if (sk != NULL) {
+       if (sk) {
                int ret;
 
                if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
index 2dbfc1f1f7b3c96a2b0195fc9a3be1c1109d0bbe..b763c39ae1d734621a5f6872f9d41d442f476c92 100644 (file)
@@ -58,7 +58,7 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
                goto out_nosk;
 
        err = -ENOENT;
-       if (sk == NULL)
+       if (!sk)
                goto out_nosk;
 
        err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
index 4915d8284a86f9ec6e5536804b24398d205e2a2b..f9386160cbee0288e294ea2cd8ba3b5be65cdbf6 100644 (file)
@@ -285,7 +285,7 @@ void udp_del_offload(struct udp_offload *uo)
        pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port));
 unlock:
        spin_unlock(&udp_offload_lock);
-       if (uo_priv != NULL)
+       if (uo_priv)
                call_rcu(&uo_priv->rcu, udp_offload_free_routine);
 }
 EXPORT_SYMBOL(udp_del_offload);
@@ -394,7 +394,7 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff)
                        break;
        }
 
-       if (uo_priv != NULL) {
+       if (uo_priv) {
                NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
                err = uo_priv->offload->callbacks.gro_complete(skb,
                                nhoff + sizeof(struct udphdr),
index c83b354850563ebc268a349bc25bc8b668435732..6bb98cc193c9a1b532b668b34e9b401357ad96e1 100644 (file)
@@ -75,7 +75,7 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
 }
 EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock);
 
-int udp_tunnel_xmit_skb(struct rtable *rt, struct sk_buff *skb,
+int udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
                        __be32 src, __be32 dst, __u8 tos, __u8 ttl,
                        __be16 df, __be16 src_port, __be16 dst_port,
                        bool xnet, bool nocheck)
@@ -92,7 +92,7 @@ int udp_tunnel_xmit_skb(struct rtable *rt, struct sk_buff *skb,
 
        udp_set_csum(nocheck, skb, src, dst, skb->len);
 
-       return iptunnel_xmit(skb->sk, rt, skb, src, dst, IPPROTO_UDP,
+       return iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP,
                             tos, ttl, df, xnet);
 }
 EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb);
index aac6197b7a7132f31af9a80d960d94d4a9f92290..60b032f58ccc9ffca8617de21c72288b919889b7 100644 (file)
@@ -22,9 +22,9 @@ int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb)
        return xfrm4_extract_header(skb);
 }
 
-static inline int xfrm4_rcv_encap_finish(struct sk_buff *skb)
+static inline int xfrm4_rcv_encap_finish(struct sock *sk, struct sk_buff *skb)
 {
-       if (skb_dst(skb) == NULL) {
+       if (!skb_dst(skb)) {
                const struct iphdr *iph = ip_hdr(skb);
 
                if (ip_route_input_noref(skb, iph->daddr, iph->saddr,
@@ -52,7 +52,8 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async)
        iph->tot_len = htons(skb->len);
        ip_send_check(iph);
 
-       NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
+       NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, NULL, skb,
+               skb->dev, NULL,
                xfrm4_rcv_encap_finish);
        return 0;
 }
index dab73813cb9208dafaae1277e281c2255601a771..2878dbfffeb7e769a32079f1a6b80061136a7efc 100644 (file)
@@ -69,7 +69,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(xfrm4_prepare_output);
 
-int xfrm4_output_finish(struct sk_buff *skb)
+int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb)
 {
        memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
 
@@ -77,26 +77,26 @@ int xfrm4_output_finish(struct sk_buff *skb)
        IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
 #endif
 
-       return xfrm_output(skb);
+       return xfrm_output(sk, skb);
 }
 
-static int __xfrm4_output(struct sk_buff *skb)
+static int __xfrm4_output(struct sock *sk, struct sk_buff *skb)
 {
        struct xfrm_state *x = skb_dst(skb)->xfrm;
 
 #ifdef CONFIG_NETFILTER
        if (!x) {
                IPCB(skb)->flags |= IPSKB_REROUTED;
-               return dst_output(skb);
+               return dst_output_sk(sk, skb);
        }
 #endif
 
-       return x->outer_mode->afinfo->output_finish(skb);
+       return x->outer_mode->afinfo->output_finish(sk, skb);
 }
 
 int xfrm4_output(struct sock *sk, struct sk_buff *skb)
 {
-       return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb,
+       return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb,
                            NULL, skb_dst(skb)->dev, __xfrm4_output,
                            !(IPCB(skb)->flags & IPSKB_REROUTED));
 }
index c224c856247bbfbed37394f2b4ed085d072989d1..bff69746e05f05d936ec8f7a62c34d3f87a55d10 100644 (file)
@@ -298,7 +298,7 @@ static void __net_exit xfrm4_net_exit(struct net *net)
 {
        struct ctl_table *table;
 
-       if (net->ipv4.xfrm4_hdr == NULL)
+       if (!net->ipv4.xfrm4_hdr)
                return;
 
        table = net->ipv4.xfrm4_hdr->ctl_table_arg;
index 5c9e94cb1b2cec57574b47d5cba1599a9f359890..37b70e82bff8ee9b9964a0237df9d66f3b78bcc7 100644 (file)
@@ -4858,8 +4858,8 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
            (dev->addr_len &&
             nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
            nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
-           (dev->ifindex != dev->iflink &&
-            nla_put_u32(skb, IFLA_LINK, dev->iflink)))
+           (dev->ifindex != dev_get_iflink(dev) &&
+            nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
                goto nla_put_failure;
        protoinfo = nla_nest_start(skb, IFLA_PROTINFO);
        if (!protoinfo)
index 61fb184b818d80d3d614bc5ee7011f47b36895ab..2367a16eae58a31e01aa0d1d676090b688102593 100644 (file)
@@ -315,7 +315,9 @@ out_fib6_rules_ops:
 
 static void __net_exit fib6_rules_net_exit(struct net *net)
 {
+       rtnl_lock();
        fib_rules_unregister(net->ipv6.fib6_rules_ops);
+       rtnl_unlock();
 }
 
 static struct pernet_operations fib6_rules_net_ops = {
index 0f4e73da14e49fba9ff122da8a3c6380f6937a85..b5e6cc1d4a7302f3288ba00ee9c75bb327410960 100644 (file)
@@ -760,7 +760,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
 
        skb_set_inner_protocol(skb, protocol);
 
-       ip6tunnel_xmit(skb, dev);
+       ip6tunnel_xmit(NULL, skb, dev);
        if (ndst)
                ip6_tnl_dst_store(tunnel, ndst);
        return 0;
@@ -1216,6 +1216,7 @@ static const struct net_device_ops ip6gre_netdev_ops = {
        .ndo_do_ioctl           = ip6gre_tunnel_ioctl,
        .ndo_change_mtu         = ip6gre_tunnel_change_mtu,
        .ndo_get_stats64        = ip_tunnel_get_stats64,
+       .ndo_get_iflink         = ip6_tnl_get_iflink,
 };
 
 static void ip6gre_dev_free(struct net_device *dev)
@@ -1238,7 +1239,6 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
        if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
                dev->mtu -= 8;
        dev->flags |= IFF_NOARP;
-       dev->iflink = 0;
        dev->addr_len = sizeof(struct in6_addr);
        netif_keep_dst(dev);
 }
@@ -1270,8 +1270,6 @@ static int ip6gre_tunnel_init(struct net_device *dev)
                u64_stats_init(&ip6gre_tunnel_stats->syncp);
        }
 
-       dev->iflink = tunnel->parms.link;
-
        return 0;
 }
 
@@ -1480,8 +1478,6 @@ static int ip6gre_tap_init(struct net_device *dev)
        if (!dev->tstats)
                return -ENOMEM;
 
-       dev->iflink = tunnel->parms.link;
-
        return 0;
 }
 
@@ -1493,6 +1489,7 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = {
        .ndo_validate_addr = eth_validate_addr,
        .ndo_change_mtu = ip6gre_tunnel_change_mtu,
        .ndo_get_stats64 = ip_tunnel_get_stats64,
+       .ndo_get_iflink = ip6_tnl_get_iflink,
 };
 
 static void ip6gre_tap_setup(struct net_device *dev)
@@ -1503,7 +1500,6 @@ static void ip6gre_tap_setup(struct net_device *dev)
        dev->netdev_ops = &ip6gre_tap_netdev_ops;
        dev->destructor = ip6gre_dev_free;
 
-       dev->iflink = 0;
        dev->features |= NETIF_F_NETNS_LOCAL;
 }
 
index fb97f7f8d4ed11497e088d063fbe8d78e80d0ed0..f2e464eba5efdb7b2a8abe3c2cecedae473777e7 100644 (file)
@@ -46,8 +46,7 @@
 #include <net/xfrm.h>
 #include <net/inet_ecn.h>
 
-
-int ip6_rcv_finish(struct sk_buff *skb)
+int ip6_rcv_finish(struct sock *sk, struct sk_buff *skb)
 {
        if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
                const struct inet6_protocol *ipprot;
@@ -183,7 +182,8 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
        /* Must drop socket now because of tproxy. */
        skb_orphan(skb);
 
-       return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, dev, NULL,
+       return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, NULL, skb,
+                      dev, NULL,
                       ip6_rcv_finish);
 err:
        IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS);
@@ -198,7 +198,7 @@ drop:
  */
 
 
-static int ip6_input_finish(struct sk_buff *skb)
+static int ip6_input_finish(struct sock *sk, struct sk_buff *skb)
 {
        struct net *net = dev_net(skb_dst(skb)->dev);
        const struct inet6_protocol *ipprot;
@@ -277,7 +277,8 @@ discard:
 
 int ip6_input(struct sk_buff *skb)
 {
-       return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_IN, skb, skb->dev, NULL,
+       return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_IN, NULL, skb,
+                      skb->dev, NULL,
                       ip6_input_finish);
 }
 
index 84c58da10f5ca10dcf06a1376e8d16f3a8b087af..7fde1f265c90e90f16291e6c861b6e242111c25b 100644 (file)
@@ -56,7 +56,7 @@
 #include <net/checksum.h>
 #include <linux/mroute6.h>
 
-static int ip6_finish_output2(struct sk_buff *skb)
+static int ip6_finish_output2(struct sock *sk, struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
        struct net_device *dev = dst->dev;
@@ -70,7 +70,7 @@ static int ip6_finish_output2(struct sk_buff *skb)
        if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
                struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
 
-               if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) &&
+               if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) &&
                    ((mroute6_socket(dev_net(dev), skb) &&
                     !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
                     ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
@@ -82,7 +82,7 @@ static int ip6_finish_output2(struct sk_buff *skb)
                         */
                        if (newskb)
                                NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
-                                       newskb, NULL, newskb->dev,
+                                       sk, newskb, NULL, newskb->dev,
                                        dev_loopback_xmit);
 
                        if (ipv6_hdr(skb)->hop_limit == 0) {
@@ -122,14 +122,14 @@ static int ip6_finish_output2(struct sk_buff *skb)
        return -EINVAL;
 }
 
-static int ip6_finish_output(struct sk_buff *skb)
+static int ip6_finish_output(struct sock *sk, struct sk_buff *skb)
 {
        if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
            dst_allfrag(skb_dst(skb)) ||
            (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
-               return ip6_fragment(skb, ip6_finish_output2);
+               return ip6_fragment(sk, skb, ip6_finish_output2);
        else
-               return ip6_finish_output2(skb);
+               return ip6_finish_output2(sk, skb);
 }
 
 int ip6_output(struct sock *sk, struct sk_buff *skb)
@@ -143,7 +143,8 @@ int ip6_output(struct sock *sk, struct sk_buff *skb)
                return 0;
        }
 
-       return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev,
+       return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, sk, skb,
+                           NULL, dev,
                            ip6_finish_output,
                            !(IP6CB(skb)->flags & IP6SKB_REROUTED));
 }
@@ -223,8 +224,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
        if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
                IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
                              IPSTATS_MIB_OUT, skb->len);
-               return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
-                              dst->dev, dst_output);
+               return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb,
+                              NULL, dst->dev, dst_output_sk);
        }
 
        skb->dev = dst->dev;
@@ -316,10 +317,10 @@ static int ip6_forward_proxy_check(struct sk_buff *skb)
        return 0;
 }
 
-static inline int ip6_forward_finish(struct sk_buff *skb)
+static inline int ip6_forward_finish(struct sock *sk, struct sk_buff *skb)
 {
        skb_sender_cpu_clear(skb);
-       return dst_output(skb);
+       return dst_output_sk(sk, skb);
 }
 
 static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
@@ -511,7 +512,8 @@ int ip6_forward(struct sk_buff *skb)
 
        IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
        IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
-       return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
+       return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, NULL, skb,
+                      skb->dev, dst->dev,
                       ip6_forward_finish);
 
 error:
@@ -538,11 +540,13 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
        skb_copy_secmark(to, from);
 }
 
-int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
+int ip6_fragment(struct sock *sk, struct sk_buff *skb,
+                int (*output)(struct sock *, struct sk_buff *))
 {
        struct sk_buff *frag;
        struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
-       struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
+       struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
+                               inet6_sk(skb->sk) : NULL;
        struct ipv6hdr *tmp_hdr;
        struct frag_hdr *fh;
        unsigned int mtu, hlen, left, len;
@@ -666,7 +670,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
                                ip6_copy_metadata(frag, skb);
                        }
 
-                       err = output(skb);
+                       err = output(sk, skb);
                        if (!err)
                                IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
                                              IPSTATS_MIB_FRAGCREATES);
@@ -799,7 +803,7 @@ slow_path:
                /*
                 *      Put this fragment into the sending queue.
                 */
-               err = output(frag);
+               err = output(sk, frag);
                if (err)
                        goto fail;
 
index 9bd85f0dff69b2b2a8bb9277bd9eaffac04b042c..5cafd92c231270703af5bf948d131d99c5e9c193 100644 (file)
@@ -1100,7 +1100,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
        ipv6h->nexthdr = proto;
        ipv6h->saddr = fl6->saddr;
        ipv6h->daddr = fl6->daddr;
-       ip6tunnel_xmit(skb, dev);
+       ip6tunnel_xmit(NULL, skb, dev);
        if (ndst)
                ip6_tnl_dst_store(t, ndst);
        return 0;
@@ -1264,8 +1264,6 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
        else
                dev->flags &= ~IFF_POINTOPOINT;
 
-       dev->iflink = p->link;
-
        if (p->flags & IP6_TNL_F_CAP_XMIT) {
                int strict = (ipv6_addr_type(&p->raddr) &
                              (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
@@ -1517,6 +1515,13 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
        return 0;
 }
 
+int ip6_tnl_get_iflink(const struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+
+       return t->parms.link;
+}
+EXPORT_SYMBOL(ip6_tnl_get_iflink);
 
 static const struct net_device_ops ip6_tnl_netdev_ops = {
        .ndo_init       = ip6_tnl_dev_init,
@@ -1525,6 +1530,7 @@ static const struct net_device_ops ip6_tnl_netdev_ops = {
        .ndo_do_ioctl   = ip6_tnl_ioctl,
        .ndo_change_mtu = ip6_tnl_change_mtu,
        .ndo_get_stats  = ip6_get_stats,
+       .ndo_get_iflink = ip6_tnl_get_iflink,
 };
 
 
index 32d9b268e7d85faa6b33a3fa1a3f3fff9a36cad8..bba8903e871fabd73c217efb6250eb743f7d66d8 100644 (file)
@@ -62,7 +62,8 @@ error:
 }
 EXPORT_SYMBOL_GPL(udp_sock_create6);
 
-int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
+int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
+                        struct sk_buff *skb,
                         struct net_device *dev, struct in6_addr *saddr,
                         struct in6_addr *daddr,
                         __u8 prio, __u8 ttl, __be16 src_port,
@@ -97,7 +98,7 @@ int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
        ip6h->daddr       = *daddr;
        ip6h->saddr       = *saddr;
 
-       ip6tunnel_xmit(skb, dev);
+       ip6tunnel_xmit(sk, skb, dev);
        return 0;
 }
 EXPORT_SYMBOL_GPL(udp_tunnel6_xmit_skb);
index 53d90ed68905f1ab0935ba0735bea8487a03e921..b53148444e157f821c86b467b166fc9ce7bd5ccb 100644 (file)
@@ -601,8 +601,6 @@ static void vti6_link_config(struct ip6_tnl *t)
                dev->flags |= IFF_POINTOPOINT;
        else
                dev->flags &= ~IFF_POINTOPOINT;
-
-       dev->iflink = p->link;
 }
 
 /**
@@ -808,6 +806,7 @@ static const struct net_device_ops vti6_netdev_ops = {
        .ndo_do_ioctl   = vti6_ioctl,
        .ndo_change_mtu = vti6_change_mtu,
        .ndo_get_stats64 = ip_tunnel_get_stats64,
+       .ndo_get_iflink = ip6_tnl_get_iflink,
 };
 
 /**
index caf6b99374e678709e6ac34b82f86d286c7342ed..74ceb73c1c9a042b0f8f9f65c264e8426d65f7f3 100644 (file)
@@ -250,7 +250,7 @@ static int __net_init ip6mr_rules_init(struct net *net)
        return 0;
 
 err2:
-       kfree(mrt);
+       ip6mr_free_table(mrt);
 err1:
        fib_rules_unregister(ops);
        return err;
@@ -265,8 +265,8 @@ static void __net_exit ip6mr_rules_exit(struct net *net)
                list_del(&mrt->list);
                ip6mr_free_table(mrt);
        }
-       rtnl_unlock();
        fib_rules_unregister(net->ipv6.mr6_rules_ops);
+       rtnl_unlock();
 }
 #else
 #define ip6mr_for_each_table(mrt, net) \
@@ -334,7 +334,7 @@ static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
 
 static void ip6mr_free_table(struct mr6_table *mrt)
 {
-       del_timer(&mrt->ipmr_expire_timer);
+       del_timer_sync(&mrt->ipmr_expire_timer);
        mroute_clean_tables(mrt);
        kfree(mrt);
 }
@@ -718,8 +718,14 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
        return NETDEV_TX_OK;
 }
 
+static int reg_vif_get_iflink(const struct net_device *dev)
+{
+       return 0;
+}
+
 static const struct net_device_ops reg_vif_netdev_ops = {
        .ndo_start_xmit = reg_vif_xmit,
+       .ndo_get_iflink = reg_vif_get_iflink,
 };
 
 static void reg_vif_setup(struct net_device *dev)
@@ -752,7 +758,6 @@ static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
                free_netdev(dev);
                return NULL;
        }
-       dev->iflink = 0;
 
        if (dev_open(dev))
                goto failure;
@@ -992,7 +997,7 @@ static int mif6_add(struct net *net, struct mr6_table *mrt,
        v->pkt_out = 0;
        v->link = dev->ifindex;
        if (v->flags & MIFF_REGISTER)
-               v->link = dev->iflink;
+               v->link = dev_get_iflink(dev);
 
        /* And finish update writing critical data */
        write_lock_bh(&mrt_lock);
@@ -1981,13 +1986,13 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
 }
 #endif
 
-static inline int ip6mr_forward2_finish(struct sk_buff *skb)
+static inline int ip6mr_forward2_finish(struct sock *sk, struct sk_buff *skb)
 {
        IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
                         IPSTATS_MIB_OUTFORWDATAGRAMS);
        IP6_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
                         IPSTATS_MIB_OUTOCTETS, skb->len);
-       return dst_output(skb);
+       return dst_output_sk(sk, skb);
 }
 
 /*
@@ -2059,7 +2064,8 @@ static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
 
        IP6CB(skb)->flags |= IP6SKB_FORWARDED;
 
-       return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dev,
+       return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, NULL, skb,
+                      skb->dev, dev,
                       ip6mr_forward2_finish);
 
 out_free:
index fac1f27e428e26257a949b3d3d062ddecdda3e87..083b2927fc67aaa3939fff42a1c6ee9f1ca70afe 100644 (file)
@@ -1644,8 +1644,9 @@ static void mld_sendpack(struct sk_buff *skb)
 
        payload_len = skb->len;
 
-       err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
-                     dst_output);
+       err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
+                     net->ipv6.igmp_sk, skb, NULL, skb->dev,
+                     dst_output_sk);
 out:
        if (!err) {
                ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
@@ -2007,8 +2008,8 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
        }
 
        skb_dst_set(skb, dst);
-       err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
-                     dst_output);
+       err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb,
+                     NULL, skb->dev, dst_output_sk);
 out:
        if (!err) {
                ICMP6MSGOUT_INC_STATS(net, idev, type);
index c283827d60e2c67bc6f963eb086341032551d402..96f153c0846b7abcd9b1af995c49067efbe7064d 100644 (file)
@@ -463,8 +463,9 @@ static void ndisc_send_skb(struct sk_buff *skb,
        idev = __in6_dev_get(dst->dev);
        IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
 
-       err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev,
-                     dst_output);
+       err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb,
+                     NULL, dst->dev,
+                     dst_output_sk);
        if (!err) {
                ICMP6MSGOUT_INC_STATS(net, idev, type);
                ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
@@ -1225,7 +1226,14 @@ static void ndisc_router_discovery(struct sk_buff *skb)
        if (rt)
                rt6_set_expires(rt, jiffies + (HZ * lifetime));
        if (ra_msg->icmph.icmp6_hop_limit) {
-               in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
+               /* Only set hop_limit on the interface if it is higher than
+                * the current hop_limit.
+                */
+               if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) {
+                       in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
+               } else {
+                       ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n");
+               }
                if (rt)
                        dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
                                       ra_msg->icmph.icmp6_hop_limit);
index 398377a9d0183d297edff70c53dc5cee0a4ab8f9..d958718b50318911d27ece2b7e9f026ca68e8c97 100644 (file)
@@ -84,7 +84,7 @@ static void nf_ip6_saveroute(const struct sk_buff *skb,
 {
        struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
 
-       if (entry->hook == NF_INET_LOCAL_OUT) {
+       if (entry->state.hook == NF_INET_LOCAL_OUT) {
                const struct ipv6hdr *iph = ipv6_hdr(skb);
 
                rt_info->daddr = iph->daddr;
@@ -98,7 +98,7 @@ static int nf_ip6_reroute(struct sk_buff *skb,
 {
        struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
 
-       if (entry->hook == NF_INET_LOCAL_OUT) {
+       if (entry->state.hook == NF_INET_LOCAL_OUT) {
                const struct ipv6hdr *iph = ipv6_hdr(skb);
                if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) ||
                    !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) ||
index 83f59dc3cccc141a7a64261f27795630379c09a1..1a732a1d3c8e13c58508cef9381d2d32e5a34448 100644 (file)
@@ -317,8 +317,7 @@ ip6t_next_entry(const struct ip6t_entry *entry)
 unsigned int
 ip6t_do_table(struct sk_buff *skb,
              unsigned int hook,
-             const struct net_device *in,
-             const struct net_device *out,
+             const struct nf_hook_state *state,
              struct xt_table *table)
 {
        static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
@@ -333,8 +332,8 @@ ip6t_do_table(struct sk_buff *skb,
        unsigned int addend;
 
        /* Initialization */
-       indev = in ? in->name : nulldevname;
-       outdev = out ? out->name : nulldevname;
+       indev = state->in ? state->in->name : nulldevname;
+       outdev = state->out ? state->out->name : nulldevname;
        /* We handle fragments by dealing with the first fragment as
         * if it was a normal packet.  All other fragments are treated
         * normally, except that they will NEVER match rules that ask
@@ -342,8 +341,8 @@ ip6t_do_table(struct sk_buff *skb,
         * rule is also a fragment-specific rule, non-fragments won't
         * match it. */
        acpar.hotdrop = false;
-       acpar.in      = in;
-       acpar.out     = out;
+       acpar.in      = state->in;
+       acpar.out     = state->out;
        acpar.family  = NFPROTO_IPV6;
        acpar.hooknum = hook;
 
@@ -393,7 +392,7 @@ ip6t_do_table(struct sk_buff *skb,
 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
                /* The packet is traced: log it */
                if (unlikely(skb->nf_trace))
-                       trace_packet(skb, hook, in, out,
+                       trace_packet(skb, hook, state->in, state->out,
                                     table->name, private, e);
 #endif
                /* Standard target? */
index a0d17270117c37793be3cb61c4d767cd57f70611..6edb7b106de769728357174d0657c644f83e41e8 100644 (file)
@@ -315,11 +315,9 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
 
 static unsigned int ipv6_synproxy_hook(const struct nf_hook_ops *ops,
                                       struct sk_buff *skb,
-                                      const struct net_device *in,
-                                      const struct net_device *out,
-                                      int (*okfn)(struct sk_buff *))
+                                      const struct nf_hook_state *nhs)
 {
-       struct synproxy_net *snet = synproxy_pernet(dev_net(in ? : out));
+       struct synproxy_net *snet = synproxy_pernet(dev_net(nhs->in ? : nhs->out));
        enum ip_conntrack_info ctinfo;
        struct nf_conn *ct;
        struct nf_conn_synproxy *synproxy;
index ca7f6c1280861b2977dce643fdea349eb3ec5078..5c33d8abc0774e52a99c20273f3349b0b5374fc7 100644 (file)
@@ -33,13 +33,11 @@ static const struct xt_table packet_filter = {
 /* The work comes in here from netfilter.c. */
 static unsigned int
 ip6table_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                    const struct net_device *in, const struct net_device *out,
-                    int (*okfn)(struct sk_buff *))
+                    const struct nf_hook_state *state)
 {
-       const struct net *net = dev_net((in != NULL) ? in : out);
+       const struct net *net = dev_net(state->in ? state->in : state->out);
 
-       return ip6t_do_table(skb, ops->hooknum, in, out,
-                            net->ipv6.ip6table_filter);
+       return ip6t_do_table(skb, ops->hooknum, state, net->ipv6.ip6table_filter);
 }
 
 static struct nf_hook_ops *filter_ops __read_mostly;
index 307bbb782d147011d689f04c92e0ba5ac7c13074..b551f5b79fe2b7fa62278ae1f7d9327e82795253 100644 (file)
@@ -32,7 +32,7 @@ static const struct xt_table packet_mangler = {
 };
 
 static unsigned int
-ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out)
+ip6t_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state)
 {
        unsigned int ret;
        struct in6_addr saddr, daddr;
@@ -57,8 +57,8 @@ ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out)
        /* flowlabel and prio (includes version, which shouldn't change either */
        flowlabel = *((u_int32_t *)ipv6_hdr(skb));
 
-       ret = ip6t_do_table(skb, NF_INET_LOCAL_OUT, NULL, out,
-                           dev_net(out)->ipv6.ip6table_mangle);
+       ret = ip6t_do_table(skb, NF_INET_LOCAL_OUT, state,
+                           dev_net(state->out)->ipv6.ip6table_mangle);
 
        if (ret != NF_DROP && ret != NF_STOLEN &&
            (!ipv6_addr_equal(&ipv6_hdr(skb)->saddr, &saddr) ||
@@ -77,17 +77,16 @@ ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out)
 /* The work comes in here from netfilter.c. */
 static unsigned int
 ip6table_mangle_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                    const struct net_device *in, const struct net_device *out,
-                    int (*okfn)(struct sk_buff *))
+                    const struct nf_hook_state *state)
 {
        if (ops->hooknum == NF_INET_LOCAL_OUT)
-               return ip6t_mangle_out(skb, out);
+               return ip6t_mangle_out(skb, state);
        if (ops->hooknum == NF_INET_POST_ROUTING)
-               return ip6t_do_table(skb, ops->hooknum, in, out,
-                                    dev_net(out)->ipv6.ip6table_mangle);
+               return ip6t_do_table(skb, ops->hooknum, state,
+                                    dev_net(state->out)->ipv6.ip6table_mangle);
        /* INPUT/FORWARD */
-       return ip6t_do_table(skb, ops->hooknum, in, out,
-                            dev_net(in)->ipv6.ip6table_mangle);
+       return ip6t_do_table(skb, ops->hooknum, state,
+                            dev_net(state->in)->ipv6.ip6table_mangle);
 }
 
 static struct nf_hook_ops *mangle_ops __read_mostly;
index b0634ac996b706a9108b2f1369e4c2be9c1188c4..c3a7f7af0ed4d183d00a5f50307f44fa44399460 100644 (file)
@@ -32,49 +32,40 @@ static const struct xt_table nf_nat_ipv6_table = {
 
 static unsigned int ip6table_nat_do_chain(const struct nf_hook_ops *ops,
                                          struct sk_buff *skb,
-                                         const struct net_device *in,
-                                         const struct net_device *out,
+                                         const struct nf_hook_state *state,
                                          struct nf_conn *ct)
 {
        struct net *net = nf_ct_net(ct);
 
-       return ip6t_do_table(skb, ops->hooknum, in, out, net->ipv6.ip6table_nat);
+       return ip6t_do_table(skb, ops->hooknum, state, net->ipv6.ip6table_nat);
 }
 
 static unsigned int ip6table_nat_fn(const struct nf_hook_ops *ops,
                                    struct sk_buff *skb,
-                                   const struct net_device *in,
-                                   const struct net_device *out,
-                                   int (*okfn)(struct sk_buff *))
+                                   const struct nf_hook_state *state)
 {
-       return nf_nat_ipv6_fn(ops, skb, in, out, ip6table_nat_do_chain);
+       return nf_nat_ipv6_fn(ops, skb, state, ip6table_nat_do_chain);
 }
 
 static unsigned int ip6table_nat_in(const struct nf_hook_ops *ops,
                                    struct sk_buff *skb,
-                                   const struct net_device *in,
-                                   const struct net_device *out,
-                                   int (*okfn)(struct sk_buff *))
+                                   const struct nf_hook_state *state)
 {
-       return nf_nat_ipv6_in(ops, skb, in, out, ip6table_nat_do_chain);
+       return nf_nat_ipv6_in(ops, skb, state, ip6table_nat_do_chain);
 }
 
 static unsigned int ip6table_nat_out(const struct nf_hook_ops *ops,
                                     struct sk_buff *skb,
-                                    const struct net_device *in,
-                                    const struct net_device *out,
-                                    int (*okfn)(struct sk_buff *))
+                                    const struct nf_hook_state *state)
 {
-       return nf_nat_ipv6_out(ops, skb, in, out, ip6table_nat_do_chain);
+       return nf_nat_ipv6_out(ops, skb, state, ip6table_nat_do_chain);
 }
 
 static unsigned int ip6table_nat_local_fn(const struct nf_hook_ops *ops,
                                          struct sk_buff *skb,
-                                         const struct net_device *in,
-                                         const struct net_device *out,
-                                         int (*okfn)(struct sk_buff *))
+                                         const struct nf_hook_state *state)
 {
-       return nf_nat_ipv6_local_fn(ops, skb, in, out, ip6table_nat_do_chain);
+       return nf_nat_ipv6_local_fn(ops, skb, state, ip6table_nat_do_chain);
 }
 
 static struct nf_hook_ops nf_nat_ipv6_ops[] __read_mostly = {
index 5274740acecc93b4550dabdd7f48fac3c04f67ac..0b33caad2b69254e29af5ff38484e37a0dc6c711 100644 (file)
@@ -20,13 +20,11 @@ static const struct xt_table packet_raw = {
 /* The work comes in here from netfilter.c. */
 static unsigned int
 ip6table_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                 const struct net_device *in, const struct net_device *out,
-                 int (*okfn)(struct sk_buff *))
+                 const struct nf_hook_state *state)
 {
-       const struct net *net = dev_net((in != NULL) ? in : out);
+       const struct net *net = dev_net(state->in ? state->in : state->out);
 
-       return ip6t_do_table(skb, ops->hooknum, in, out,
-                            net->ipv6.ip6table_raw);
+       return ip6t_do_table(skb, ops->hooknum, state, net->ipv6.ip6table_raw);
 }
 
 static struct nf_hook_ops *rawtable_ops __read_mostly;
index ab3b0219ecfa436c07eb5cb86af36bd04efbdfb7..fcef83c25f7b3281a92a2d5be27512e057dddfff 100644 (file)
@@ -37,13 +37,11 @@ static const struct xt_table security_table = {
 
 static unsigned int
 ip6table_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                      const struct net_device *in,
-                      const struct net_device *out,
-                      int (*okfn)(struct sk_buff *))
+                      const struct nf_hook_state *state)
 {
-       const struct net *net = dev_net((in != NULL) ? in : out);
+       const struct net *net = dev_net(state->in ? state->in : state->out);
 
-       return ip6t_do_table(skb, ops->hooknum, in, out,
+       return ip6t_do_table(skb, ops->hooknum, state,
                             net->ipv6.ip6table_security);
 }
 
index fba91c6fc7ca38f1a282da09b7323519ddf3b1aa..4ba0c34c627b0e88d3a06fda6532c83a3936315e 100644 (file)
@@ -97,9 +97,7 @@ static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
 
 static unsigned int ipv6_helper(const struct nf_hook_ops *ops,
                                struct sk_buff *skb,
-                               const struct net_device *in,
-                               const struct net_device *out,
-                               int (*okfn)(struct sk_buff *))
+                               const struct nf_hook_state *state)
 {
        struct nf_conn *ct;
        const struct nf_conn_help *help;
@@ -135,9 +133,7 @@ static unsigned int ipv6_helper(const struct nf_hook_ops *ops,
 
 static unsigned int ipv6_confirm(const struct nf_hook_ops *ops,
                                 struct sk_buff *skb,
-                                const struct net_device *in,
-                                const struct net_device *out,
-                                int (*okfn)(struct sk_buff *))
+                                const struct nf_hook_state *state)
 {
        struct nf_conn *ct;
        enum ip_conntrack_info ctinfo;
@@ -171,25 +167,21 @@ out:
 
 static unsigned int ipv6_conntrack_in(const struct nf_hook_ops *ops,
                                      struct sk_buff *skb,
-                                     const struct net_device *in,
-                                     const struct net_device *out,
-                                     int (*okfn)(struct sk_buff *))
+                                     const struct nf_hook_state *state)
 {
-       return nf_conntrack_in(dev_net(in), PF_INET6, ops->hooknum, skb);
+       return nf_conntrack_in(dev_net(state->in), PF_INET6, ops->hooknum, skb);
 }
 
 static unsigned int ipv6_conntrack_local(const struct nf_hook_ops *ops,
                                         struct sk_buff *skb,
-                                        const struct net_device *in,
-                                        const struct net_device *out,
-                                        int (*okfn)(struct sk_buff *))
+                                        const struct nf_hook_state *state)
 {
        /* root is playing with raw sockets. */
        if (skb->len < sizeof(struct ipv6hdr)) {
                net_notice_ratelimited("ipv6_conntrack_local: packet too short\n");
                return NF_ACCEPT;
        }
-       return nf_conntrack_in(dev_net(out), PF_INET6, ops->hooknum, skb);
+       return nf_conntrack_in(dev_net(state->out), PF_INET6, ops->hooknum, skb);
 }
 
 static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
index e70382e4dfb5d05d91d9dad132d04baabd2b48d5..a45db0b4785c1e89f523ce28cb8e4231fbbc85b9 100644 (file)
@@ -54,9 +54,7 @@ static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
 
 static unsigned int ipv6_defrag(const struct nf_hook_ops *ops,
                                struct sk_buff *skb,
-                               const struct net_device *in,
-                               const struct net_device *out,
-                               int (*okfn)(struct sk_buff *))
+                               const struct nf_hook_state *state)
 {
        struct sk_buff *reasm;
 
@@ -77,9 +75,9 @@ static unsigned int ipv6_defrag(const struct nf_hook_ops *ops,
 
        nf_ct_frag6_consume_orig(reasm);
 
-       NF_HOOK_THRESH(NFPROTO_IPV6, ops->hooknum, reasm,
-                      (struct net_device *) in, (struct net_device *) out,
-                      okfn, NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
+       NF_HOOK_THRESH(NFPROTO_IPV6, ops->hooknum, state->sk, reasm,
+                      state->in, state->out,
+                      state->okfn, NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
 
        return NF_STOLEN;
 }
index c5812e1c1ffbfbd6029ba1dce305ca2d6f691f98..e76900e0aa925a26c226f733f9a44e396ea7cc7f 100644 (file)
@@ -263,11 +263,10 @@ EXPORT_SYMBOL_GPL(nf_nat_icmpv6_reply_translation);
 
 unsigned int
 nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
-              const struct net_device *in, const struct net_device *out,
+              const struct nf_hook_state *state,
               unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                        struct sk_buff *skb,
-                                       const struct net_device *in,
-                                       const struct net_device *out,
+                                       const struct nf_hook_state *state,
                                        struct nf_conn *ct))
 {
        struct nf_conn *ct;
@@ -318,7 +317,7 @@ nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
                if (!nf_nat_initialized(ct, maniptype)) {
                        unsigned int ret;
 
-                       ret = do_chain(ops, skb, in, out, ct);
+                       ret = do_chain(ops, skb, state, ct);
                        if (ret != NF_ACCEPT)
                                return ret;
 
@@ -332,7 +331,7 @@ nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
                        pr_debug("Already setup manip %s for ct %p\n",
                                 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
                                 ct);
-                       if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
+                       if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, state->out))
                                goto oif_changed;
                }
                break;
@@ -341,7 +340,7 @@ nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
                /* ESTABLISHED */
                NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
                             ctinfo == IP_CT_ESTABLISHED_REPLY);
-               if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
+               if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, state->out))
                        goto oif_changed;
        }
 
@@ -355,17 +354,16 @@ EXPORT_SYMBOL_GPL(nf_nat_ipv6_fn);
 
 unsigned int
 nf_nat_ipv6_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
-              const struct net_device *in, const struct net_device *out,
+              const struct nf_hook_state *state,
               unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                        struct sk_buff *skb,
-                                       const struct net_device *in,
-                                       const struct net_device *out,
+                                       const struct nf_hook_state *state,
                                        struct nf_conn *ct))
 {
        unsigned int ret;
        struct in6_addr daddr = ipv6_hdr(skb)->daddr;
 
-       ret = nf_nat_ipv6_fn(ops, skb, in, out, do_chain);
+       ret = nf_nat_ipv6_fn(ops, skb, state, do_chain);
        if (ret != NF_DROP && ret != NF_STOLEN &&
            ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr))
                skb_dst_drop(skb);
@@ -376,11 +374,10 @@ EXPORT_SYMBOL_GPL(nf_nat_ipv6_in);
 
 unsigned int
 nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
-               const struct net_device *in, const struct net_device *out,
+               const struct nf_hook_state *state,
                unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                         struct sk_buff *skb,
-                                        const struct net_device *in,
-                                        const struct net_device *out,
+                                        const struct nf_hook_state *state,
                                         struct nf_conn *ct))
 {
 #ifdef CONFIG_XFRM
@@ -394,7 +391,7 @@ nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
        if (skb->len < sizeof(struct ipv6hdr))
                return NF_ACCEPT;
 
-       ret = nf_nat_ipv6_fn(ops, skb, in, out, do_chain);
+       ret = nf_nat_ipv6_fn(ops, skb, state, do_chain);
 #ifdef CONFIG_XFRM
        if (ret != NF_DROP && ret != NF_STOLEN &&
            !(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
@@ -418,11 +415,10 @@ EXPORT_SYMBOL_GPL(nf_nat_ipv6_out);
 
 unsigned int
 nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                    const struct net_device *in, const struct net_device *out,
+                    const struct nf_hook_state *state,
                     unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                              struct sk_buff *skb,
-                                             const struct net_device *in,
-                                             const struct net_device *out,
+                                             const struct nf_hook_state *state,
                                              struct nf_conn *ct))
 {
        const struct nf_conn *ct;
@@ -434,7 +430,7 @@ nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
        if (skb->len < sizeof(struct ipv6hdr))
                return NF_ACCEPT;
 
-       ret = nf_nat_ipv6_fn(ops, skb, in, out, do_chain);
+       ret = nf_nat_ipv6_fn(ops, skb, state, do_chain);
        if (ret != NF_DROP && ret != NF_STOLEN &&
            (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
                enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
index 3afdce03d94e7c2dd27d19fabff9250c0f5aaddc..94b4c6dfb400c90b6c368acb7ecb83649309dce0 100644 (file)
@@ -13,6 +13,7 @@
 #include <net/ip6_checksum.h>
 #include <net/netfilter/ipv6/nf_reject.h>
 #include <linux/netfilter_ipv6.h>
+#include <linux/netfilter_bridge.h>
 #include <net/netfilter/ipv6/nf_reject.h>
 
 const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
@@ -195,7 +196,8 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
         */
        if (oldskb->nf_bridge) {
                struct ethhdr *oeth = eth_hdr(oldskb);
-               nskb->dev = oldskb->nf_bridge->physindev;
+
+               nskb->dev = nf_bridge_get_physindev(oldskb);
                nskb->protocol = htons(ETH_P_IPV6);
                ip6h->payload_len = htons(sizeof(struct tcphdr));
                if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
index 0d812b31277d9eb04133dbc880e0e151e0f2cf1f..c8148ba76d1a765e1ee2ba190961045fad033c6b 100644 (file)
 
 static unsigned int nft_do_chain_ipv6(const struct nf_hook_ops *ops,
                                      struct sk_buff *skb,
-                                     const struct net_device *in,
-                                     const struct net_device *out,
-                                     int (*okfn)(struct sk_buff *))
+                                     const struct nf_hook_state *state)
 {
        struct nft_pktinfo pkt;
 
        /* malformed packet, drop it */
-       if (nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out) < 0)
+       if (nft_set_pktinfo_ipv6(&pkt, ops, skb, state) < 0)
                return NF_DROP;
 
        return nft_do_chain(&pkt, ops);
@@ -33,9 +31,7 @@ static unsigned int nft_do_chain_ipv6(const struct nf_hook_ops *ops,
 
 static unsigned int nft_ipv6_output(const struct nf_hook_ops *ops,
                                    struct sk_buff *skb,
-                                   const struct net_device *in,
-                                   const struct net_device *out,
-                                   int (*okfn)(struct sk_buff *))
+                                   const struct nf_hook_state *state)
 {
        if (unlikely(skb->len < sizeof(struct ipv6hdr))) {
                if (net_ratelimit())
@@ -44,7 +40,7 @@ static unsigned int nft_ipv6_output(const struct nf_hook_ops *ops,
                return NF_ACCEPT;
        }
 
-       return nft_do_chain_ipv6(ops, skb, in, out, okfn);
+       return nft_do_chain_ipv6(ops, skb, state);
 }
 
 struct nft_af_info nft_af_ipv6 __read_mostly = {
index 1c4b75dd425b8e7fe421df37e215534a6eb19584..951bb458b7bd53968f76b6e8431f12214a05b88a 100644 (file)
 
 static unsigned int nft_nat_do_chain(const struct nf_hook_ops *ops,
                                     struct sk_buff *skb,
-                                    const struct net_device *in,
-                                    const struct net_device *out,
+                                    const struct nf_hook_state *state,
                                     struct nf_conn *ct)
 {
        struct nft_pktinfo pkt;
 
-       nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out);
+       nft_set_pktinfo_ipv6(&pkt, ops, skb, state);
 
        return nft_do_chain(&pkt, ops);
 }
 
 static unsigned int nft_nat_ipv6_fn(const struct nf_hook_ops *ops,
                                    struct sk_buff *skb,
-                                   const struct net_device *in,
-                                   const struct net_device *out,
-                                   int (*okfn)(struct sk_buff *))
+                                   const struct nf_hook_state *state)
 {
-       return nf_nat_ipv6_fn(ops, skb, in, out, nft_nat_do_chain);
+       return nf_nat_ipv6_fn(ops, skb, state, nft_nat_do_chain);
 }
 
 static unsigned int nft_nat_ipv6_in(const struct nf_hook_ops *ops,
                                    struct sk_buff *skb,
-                                   const struct net_device *in,
-                                   const struct net_device *out,
-                                   int (*okfn)(struct sk_buff *))
+                                   const struct nf_hook_state *state)
 {
-       return nf_nat_ipv6_in(ops, skb, in, out, nft_nat_do_chain);
+       return nf_nat_ipv6_in(ops, skb, state, nft_nat_do_chain);
 }
 
 static unsigned int nft_nat_ipv6_out(const struct nf_hook_ops *ops,
                                     struct sk_buff *skb,
-                                    const struct net_device *in,
-                                    const struct net_device *out,
-                                    int (*okfn)(struct sk_buff *))
+                                    const struct nf_hook_state *state)
 {
-       return nf_nat_ipv6_out(ops, skb, in, out, nft_nat_do_chain);
+       return nf_nat_ipv6_out(ops, skb, state, nft_nat_do_chain);
 }
 
 static unsigned int nft_nat_ipv6_local_fn(const struct nf_hook_ops *ops,
                                          struct sk_buff *skb,
-                                         const struct net_device *in,
-                                         const struct net_device *out,
-                                         int (*okfn)(struct sk_buff *))
+                                         const struct nf_hook_state *state)
 {
-       return nf_nat_ipv6_local_fn(ops, skb, in, out, nft_nat_do_chain);
+       return nf_nat_ipv6_local_fn(ops, skb, state, nft_nat_do_chain);
 }
 
 static const struct nf_chain_type nft_chain_nat_ipv6 = {
index 42031299585e1be0452b35a264ee010bb9d7f7fb..0dafdaac5e175062b8c81665834390a8aabe5171 100644 (file)
@@ -24,9 +24,7 @@
 
 static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
                                        struct sk_buff *skb,
-                                       const struct net_device *in,
-                                       const struct net_device *out,
-                                       int (*okfn)(struct sk_buff *))
+                                       const struct nf_hook_state *state)
 {
        unsigned int ret;
        struct nft_pktinfo pkt;
@@ -35,7 +33,7 @@ static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
        u32 mark, flowlabel;
 
        /* malformed packet, drop it */
-       if (nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out) < 0)
+       if (nft_set_pktinfo_ipv6(&pkt, ops, skb, state) < 0)
                return NF_DROP;
 
        /* save source/dest address, mark, hoplimit, flowlabel, priority */
index f732859241444118d6cc357843482f70b432c786..71c7be5ee43a0e067e2d653cb37cee3a92e487bc 100644 (file)
@@ -34,6 +34,8 @@ static void nft_reject_ipv6_eval(const struct nft_expr *expr,
        case NFT_REJECT_TCP_RST:
                nf_send_reset6(net, pkt->skb, pkt->ops->hooknum);
                break;
+       default:
+               break;
        }
 
        data[NFT_REG_VERDICT].verdict = NF_DROP;
index 4016a6ef9d61479e9c6c418db608588b385314b6..85892af5736491fef3978bb8b195045e8e6d2e2c 100644 (file)
@@ -136,7 +136,7 @@ int ip6_dst_hoplimit(struct dst_entry *dst)
 EXPORT_SYMBOL(ip6_dst_hoplimit);
 #endif
 
-int __ip6_local_out(struct sk_buff *skb)
+static int __ip6_local_out_sk(struct sock *sk, struct sk_buff *skb)
 {
        int len;
 
@@ -146,19 +146,30 @@ int __ip6_local_out(struct sk_buff *skb)
        ipv6_hdr(skb)->payload_len = htons(len);
        IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
 
-       return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
-                      skb_dst(skb)->dev, dst_output);
+       return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb,
+                      NULL, skb_dst(skb)->dev, dst_output_sk);
+}
+
+int __ip6_local_out(struct sk_buff *skb)
+{
+       return __ip6_local_out_sk(skb->sk, skb);
 }
 EXPORT_SYMBOL_GPL(__ip6_local_out);
 
-int ip6_local_out(struct sk_buff *skb)
+int ip6_local_out_sk(struct sock *sk, struct sk_buff *skb)
 {
        int err;
 
-       err = __ip6_local_out(skb);
+       err = __ip6_local_out_sk(sk, skb);
        if (likely(err == 1))
-               err = dst_output(skb);
+               err = dst_output_sk(sk, skb);
 
        return err;
 }
+EXPORT_SYMBOL_GPL(ip6_local_out_sk);
+
+int ip6_local_out(struct sk_buff *skb)
+{
+       return ip6_local_out_sk(skb->sk, skb);
+}
 EXPORT_SYMBOL_GPL(ip6_local_out);
index 79ccdb4c1b336bca0f9ed72dfdd4ea02fbe6a01a..8072bd4139b7576a797bdebd6c3c5b75c8412582 100644 (file)
@@ -652,8 +652,8 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
                goto error_fault;
 
        IP6_UPD_PO_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
-       err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
-                     rt->dst.dev, dst_output);
+       err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb,
+                     NULL, rt->dst.dev, dst_output_sk);
        if (err > 0)
                err = net_xmit_errno(err);
        if (err)
index e6b9f51b15e8f31abcf7dc62180f3b7afc8427a8..ac35a28599be557cac1114cad35e3c47c41436c1 100644 (file)
@@ -983,7 +983,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
 
        skb_set_inner_ipproto(skb, IPPROTO_IPV6);
 
-       err = iptunnel_xmit(skb->sk, rt, skb, fl4.saddr, fl4.daddr,
+       err = iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr,
                            protocol, tos, ttl, df,
                            !net_eq(tunnel->net, dev_net(dev)));
        iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
@@ -1076,7 +1076,6 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
                if (dev->mtu < IPV6_MIN_MTU)
                        dev->mtu = IPV6_MIN_MTU;
        }
-       dev->iflink = tunnel->parms.link;
 }
 
 static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p)
@@ -1336,6 +1335,7 @@ static const struct net_device_ops ipip6_netdev_ops = {
        .ndo_do_ioctl   = ipip6_tunnel_ioctl,
        .ndo_change_mtu = ipip6_tunnel_change_mtu,
        .ndo_get_stats64 = ip_tunnel_get_stats64,
+       .ndo_get_iflink = ip_tunnel_get_iflink,
 };
 
 static void ipip6_dev_free(struct net_device *dev)
@@ -1366,7 +1366,6 @@ static void ipip6_tunnel_setup(struct net_device *dev)
        dev->mtu                = ETH_DATA_LEN - t_hlen;
        dev->flags              = IFF_NOARP;
        netif_keep_dst(dev);
-       dev->iflink             = 0;
        dev->addr_len           = 4;
        dev->features           |= NETIF_F_LLTX;
        dev->features           |= SIT_FEATURES;
index 7cdad84014342ac8f19f4aa196d24479b04075eb..f73a97f6e68ec8286972fadcf9328e29af123242 100644 (file)
@@ -1348,6 +1348,15 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
        TCP_SKB_CB(skb)->sacked = 0;
 }
 
+static void tcp_v6_restore_cb(struct sk_buff *skb)
+{
+       /* We need to move header back to the beginning if xfrm6_policy_check()
+        * and tcp_v6_fill_cb() are going to be called again.
+        */
+       memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
+               sizeof(struct inet6_skb_parm));
+}
+
 static int tcp_v6_rcv(struct sk_buff *skb)
 {
        const struct tcphdr *th;
@@ -1480,6 +1489,7 @@ do_time_wait:
                        inet_twsk_deschedule(tw, &tcp_death_row);
                        inet_twsk_put(tw);
                        sk = sk2;
+                       tcp_v6_restore_cb(skb);
                        goto process;
                }
                /* Fall through to ACK */
@@ -1488,6 +1498,7 @@ do_time_wait:
                tcp_v6_timewait_ack(sk, skb);
                break;
        case TCP_TW_RST:
+               tcp_v6_restore_cb(skb);
                goto no_tcp_socket;
        case TCP_TW_SUCCESS:
                ;
@@ -1522,7 +1533,7 @@ static void tcp_v6_early_demux(struct sk_buff *skb)
                skb->sk = sk;
                skb->destructor = sock_edemux;
                if (sk_fullsock(sk)) {
-                       struct dst_entry *dst = sk->sk_rx_dst;
+                       struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
 
                        if (dst)
                                dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
index 120aff9aa010f3e3b4d5b04b290324f16eecc5d9..3477c919fcc8eb534c3a438ab6d607a6215897f8 100644 (file)
@@ -120,7 +120,6 @@ static u32 udp6_portaddr_hash(const struct net *net,
        return hash ^ port;
 }
 
-
 int udp_v6_get_port(struct sock *sk, unsigned short snum)
 {
        unsigned int hash2_nulladdr =
@@ -385,7 +384,6 @@ struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be
 }
 EXPORT_SYMBOL_GPL(udp6_lib_lookup);
 
-
 /*
  *     This should be easy, if there is something there we
  *     return it, otherwise we block.
@@ -1555,7 +1553,6 @@ static struct inet_protosw udpv6_protosw = {
        .flags =     INET_PROTOSW_PERMANENT,
 };
 
-
 int __init udpv6_init(void)
 {
        int ret;
index f48fbe4d16f5f433c40cba8077663db77d2984d4..74bd17882a2fe5126012fae7625254a56f14e20d 100644 (file)
@@ -42,7 +42,8 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
        ipv6_hdr(skb)->payload_len = htons(skb->len);
        __skb_push(skb, skb->data - skb_network_header(skb));
 
-       NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
+       NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, NULL, skb,
+               skb->dev, NULL,
                ip6_rcv_finish);
        return -1;
 }
index 010f8bd2d577f9767d7d44182b246e62f4a118f6..09c76a7b474dbcb12cae8aeba6fcba375d0d329a 100644 (file)
@@ -120,7 +120,7 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(xfrm6_prepare_output);
 
-int xfrm6_output_finish(struct sk_buff *skb)
+int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb)
 {
        memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
 
@@ -128,10 +128,10 @@ int xfrm6_output_finish(struct sk_buff *skb)
        IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
 #endif
 
-       return xfrm_output(skb);
+       return xfrm_output(sk, skb);
 }
 
-static int __xfrm6_output(struct sk_buff *skb)
+static int __xfrm6_output(struct sock *sk, struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
        struct xfrm_state *x = dst->xfrm;
@@ -140,7 +140,7 @@ static int __xfrm6_output(struct sk_buff *skb)
 #ifdef CONFIG_NETFILTER
        if (!x) {
                IP6CB(skb)->flags |= IP6SKB_REROUTED;
-               return dst_output(skb);
+               return dst_output_sk(sk, skb);
        }
 #endif
 
@@ -160,14 +160,15 @@ static int __xfrm6_output(struct sk_buff *skb)
        if (x->props.mode == XFRM_MODE_TUNNEL &&
            ((skb->len > mtu && !skb_is_gso(skb)) ||
                dst_allfrag(skb_dst(skb)))) {
-                       return ip6_fragment(skb, x->outer_mode->afinfo->output_finish);
+               return ip6_fragment(sk, skb,
+                                   x->outer_mode->afinfo->output_finish);
        }
-       return x->outer_mode->afinfo->output_finish(skb);
+       return x->outer_mode->afinfo->output_finish(sk, skb);
 }
 
 int xfrm6_output(struct sock *sk, struct sk_buff *skb)
 {
-       return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb,
+       return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, sk, skb,
                            NULL, skb_dst(skb)->dev, __xfrm6_output,
                            !(IP6CB(skb)->flags & IP6SKB_REROUTED));
 }
index 94b4c898a116ce28db7f0451a4d4f09937c8cef1..6daa52a18d40ca2a40f702acefccd01e6b145f36 100644 (file)
@@ -1114,10 +1114,8 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
                        noblock, &err);
        else
                skb = sock_alloc_send_skb(sk, len, noblock, &err);
-       if (!skb) {
-               err = -ENOMEM;
+       if (!skb)
                goto out;
-       }
        if (iucv->transport == AF_IUCV_TRANS_HIPER)
                skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
        if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
index 895348e44c7d22c9e6d4828195e7099a74154531..a29a504492af6f2c38607f2c15e123a297d565cd 100644 (file)
@@ -1871,6 +1871,7 @@ static int __init l2tp_init(void)
        l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0);
        if (!l2tp_wq) {
                pr_err("alloc_workqueue failed\n");
+               unregister_pernet_device(&l2tp_net_ops);
                rc = -ENOMEM;
                goto out;
        }
index 2c090c50739139f8cb95c073962a508265384923..5c564a68fb5088e21ccec11ffdde963bf8bb6cdf 100644 (file)
@@ -49,8 +49,6 @@ static void ieee80211_free_tid_rx(struct rcu_head *h)
                container_of(h, struct tid_ampdu_rx, rcu_head);
        int i;
 
-       del_timer_sync(&tid_rx->reorder_timer);
-
        for (i = 0; i < tid_rx->buf_size; i++)
                __skb_queue_purge(&tid_rx->reorder_buf[i]);
        kfree(tid_rx->reorder_buf);
@@ -93,6 +91,12 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
 
        del_timer_sync(&tid_rx->session_timer);
 
+       /* make sure ieee80211_sta_reorder_release() doesn't re-arm the timer */
+       spin_lock_bh(&tid_rx->reorder_lock);
+       tid_rx->removed = true;
+       spin_unlock_bh(&tid_rx->reorder_lock);
+       del_timer_sync(&tid_rx->reorder_timer);
+
        call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx);
 }
 
index bc59c8a20a39da20ea5039668a425c2bcc4fa7fb..260eed45b6d2ff105052643169465c04d333c182 100644 (file)
@@ -873,9 +873,10 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
 
  set_release_timer:
 
-               mod_timer(&tid_agg_rx->reorder_timer,
-                         tid_agg_rx->reorder_time[j] + 1 +
-                         HT_RX_REORDER_BUF_TIMEOUT);
+               if (!tid_agg_rx->removed)
+                       mod_timer(&tid_agg_rx->reorder_timer,
+                                 tid_agg_rx->reorder_time[j] + 1 +
+                                 HT_RX_REORDER_BUF_TIMEOUT);
        } else {
                del_timer(&tid_agg_rx->reorder_timer);
        }
index 691d8a1f94a5586125597cff6726496a6ee01ac2..5c164fb3f6c5bd2d68b5daa3993ff369a2a8792f 100644 (file)
@@ -176,6 +176,7 @@ struct tid_ampdu_tx {
  * @reorder_lock: serializes access to reorder buffer, see below.
  * @auto_seq: used for offloaded BA sessions to automatically pick head_seq_and
  *     and ssn.
+ * @removed: this session is removed (but might have been found due to RCU)
  *
  * This structure's lifetime is managed by RCU, assignments to
  * the array holding it must hold the aggregation mutex.
@@ -200,6 +201,7 @@ struct tid_ampdu_rx {
        u16 timeout;
        u8 dialog_token;
        bool auto_seq;
+       bool removed;
 };
 
 /**
index 89f73a9e98741702f2ea324e49b7b2f75bfb69fd..a87d8b8ec730421403930c69061a2c7167db0a6a 100644 (file)
@@ -70,7 +70,7 @@ obj-$(CONFIG_NETFILTER_SYNPROXY) += nf_synproxy_core.o
 
 # nf_tables
 nf_tables-objs += nf_tables_core.o nf_tables_api.o
-nf_tables-objs += nft_immediate.o nft_cmp.o nft_lookup.o
+nf_tables-objs += nft_immediate.o nft_cmp.o nft_lookup.o nft_dynset.o
 nf_tables-objs += nft_bitwise.o nft_byteorder.o nft_payload.o
 
 obj-$(CONFIG_NF_TABLES)                += nf_tables.o
index fea9ef566427f9295adac5c05ef020463af046cd..e6163017c42db2a1d553bc7b8ac812e5c401fcba 100644 (file)
@@ -120,12 +120,8 @@ EXPORT_SYMBOL(nf_unregister_hooks);
 
 unsigned int nf_iterate(struct list_head *head,
                        struct sk_buff *skb,
-                       unsigned int hook,
-                       const struct net_device *indev,
-                       const struct net_device *outdev,
-                       struct nf_hook_ops **elemp,
-                       int (*okfn)(struct sk_buff *),
-                       int hook_thresh)
+                       struct nf_hook_state *state,
+                       struct nf_hook_ops **elemp)
 {
        unsigned int verdict;
 
@@ -134,19 +130,19 @@ unsigned int nf_iterate(struct list_head *head,
         * function because of risk of continuing from deleted element.
         */
        list_for_each_entry_continue_rcu((*elemp), head, list) {
-               if (hook_thresh > (*elemp)->priority)
+               if (state->thresh > (*elemp)->priority)
                        continue;
 
                /* Optimization: we don't need to hold module
                   reference here, since function can't sleep. --RR */
 repeat:
-               verdict = (*elemp)->hook(*elemp, skb, indev, outdev, okfn);
+               verdict = (*elemp)->hook(*elemp, skb, state);
                if (verdict != NF_ACCEPT) {
 #ifdef CONFIG_NETFILTER_DEBUG
                        if (unlikely((verdict & NF_VERDICT_MASK)
                                                        > NF_MAX_VERDICT)) {
                                NFDEBUG("Evil return from %p(%u).\n",
-                                       (*elemp)->hook, hook);
+                                       (*elemp)->hook, state->hook);
                                continue;
                        }
 #endif
@@ -161,11 +157,7 @@ repeat:
 
 /* Returns 1 if okfn() needs to be executed by the caller,
  * -EPERM for NF_DROP, 0 otherwise. */
-int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
-                struct net_device *indev,
-                struct net_device *outdev,
-                int (*okfn)(struct sk_buff *),
-                int hook_thresh)
+int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state)
 {
        struct nf_hook_ops *elem;
        unsigned int verdict;
@@ -174,10 +166,11 @@ int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
        /* We may already have this, but read-locks nest anyway */
        rcu_read_lock();
 
-       elem = list_entry_rcu(&nf_hooks[pf][hook], struct nf_hook_ops, list);
+       elem = list_entry_rcu(&nf_hooks[state->pf][state->hook],
+                             struct nf_hook_ops, list);
 next_hook:
-       verdict = nf_iterate(&nf_hooks[pf][hook], skb, hook, indev,
-                            outdev, &elem, okfn, hook_thresh);
+       verdict = nf_iterate(&nf_hooks[state->pf][state->hook], skb, state,
+                            &elem);
        if (verdict == NF_ACCEPT || verdict == NF_STOP) {
                ret = 1;
        } else if ((verdict & NF_VERDICT_MASK) == NF_DROP) {
@@ -186,8 +179,8 @@ next_hook:
                if (ret == 0)
                        ret = -EPERM;
        } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
-               int err = nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
-                                               verdict >> NF_VERDICT_QBITS);
+               int err = nf_queue(skb, elem, state,
+                                  verdict >> NF_VERDICT_QBITS);
                if (err < 0) {
                        if (err == -ECANCELED)
                                goto next_hook;
index 758b002130d92f0e7fecf63627d426289cf03820..380ef5148ea11ef29b5e3ab8113b94905fbea389 100644 (file)
@@ -19,6 +19,7 @@
 #include <net/netlink.h>
 
 #include <linux/netfilter.h>
+#include <linux/netfilter_bridge.h>
 #include <linux/netfilter/ipset/pfxlen.h>
 #include <linux/netfilter/ipset/ip_set.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
@@ -211,6 +212,22 @@ hash_netiface4_data_next(struct hash_netiface4_elem *next,
 #define HKEY_DATALEN   sizeof(struct hash_netiface4_elem_hashed)
 #include "ip_set_hash_gen.h"
 
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+static const char *get_physindev_name(const struct sk_buff *skb)
+{
+       struct net_device *dev = nf_bridge_get_physindev(skb);
+
+       return dev ? dev->name : NULL;
+}
+
+static const char *get_phyoutdev_name(const struct sk_buff *skb)
+{
+       struct net_device *dev = nf_bridge_get_physoutdev(skb);
+
+       return dev ? dev->name : NULL;
+}
+#endif
+
 static int
 hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb,
                    const struct xt_action_param *par,
@@ -234,16 +251,15 @@ hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb,
        e.ip &= ip_set_netmask(e.cidr);
 
 #define IFACE(dir)     (par->dir ? par->dir->name : NULL)
-#define PHYSDEV(dir)   (nf_bridge->dir ? nf_bridge->dir->name : NULL)
 #define SRCDIR         (opt->flags & IPSET_DIM_TWO_SRC)
 
        if (opt->cmdflags & IPSET_FLAG_PHYSDEV) {
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-               const struct nf_bridge_info *nf_bridge = skb->nf_bridge;
+               e.iface = SRCDIR ? get_physindev_name(skb) :
+                                  get_phyoutdev_name(skb);
 
-               if (!nf_bridge)
+               if (!e.iface)
                        return -EINVAL;
-               e.iface = SRCDIR ? PHYSDEV(physindev) : PHYSDEV(physoutdev);
                e.physdev = 1;
 #else
                e.iface = NULL;
@@ -476,11 +492,11 @@ hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb,
 
        if (opt->cmdflags & IPSET_FLAG_PHYSDEV) {
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-               const struct nf_bridge_info *nf_bridge = skb->nf_bridge;
-
-               if (!nf_bridge)
+               e.iface = SRCDIR ? get_physindev_name(skb) :
+                                  get_phyoutdev_name(skb);
+               if (!e.iface)
                        return -EINVAL;
-               e.iface = SRCDIR ? PHYSDEV(physindev) : PHYSDEV(physoutdev);
+
                e.physdev = 1;
 #else
                e.iface = NULL;
index 04dbd9c7213fe86e1993e009372b6838127e4688..5d2b806a862e6834ff6c61aee5c0e0a899bbe4b8 100644 (file)
@@ -1272,8 +1272,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
  */
 static unsigned int
 ip_vs_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
-            const struct net_device *in, const struct net_device *out,
-            int (*okfn)(struct sk_buff *))
+            const struct nf_hook_state *state)
 {
        return ip_vs_out(ops->hooknum, skb, AF_INET);
 }
@@ -1284,8 +1283,7 @@ ip_vs_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
  */
 static unsigned int
 ip_vs_local_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                  const struct net_device *in, const struct net_device *out,
-                  int (*okfn)(struct sk_buff *))
+                  const struct nf_hook_state *state)
 {
        return ip_vs_out(ops->hooknum, skb, AF_INET);
 }
@@ -1299,8 +1297,7 @@ ip_vs_local_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
  */
 static unsigned int
 ip_vs_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb,
-            const struct net_device *in, const struct net_device *out,
-            int (*okfn)(struct sk_buff *))
+            const struct nf_hook_state *state)
 {
        return ip_vs_out(ops->hooknum, skb, AF_INET6);
 }
@@ -1311,8 +1308,7 @@ ip_vs_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb,
  */
 static unsigned int
 ip_vs_local_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                  const struct net_device *in, const struct net_device *out,
-                  int (*okfn)(struct sk_buff *))
+                  const struct nf_hook_state *state)
 {
        return ip_vs_out(ops->hooknum, skb, AF_INET6);
 }
@@ -1769,9 +1765,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
  */
 static unsigned int
 ip_vs_remote_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                     const struct net_device *in,
-                     const struct net_device *out,
-                     int (*okfn)(struct sk_buff *))
+                     const struct nf_hook_state *state)
 {
        return ip_vs_in(ops->hooknum, skb, AF_INET);
 }
@@ -1782,8 +1776,7 @@ ip_vs_remote_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
  */
 static unsigned int
 ip_vs_local_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                    const struct net_device *in, const struct net_device *out,
-                    int (*okfn)(struct sk_buff *))
+                    const struct nf_hook_state *state)
 {
        return ip_vs_in(ops->hooknum, skb, AF_INET);
 }
@@ -1796,9 +1789,7 @@ ip_vs_local_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
  */
 static unsigned int
 ip_vs_remote_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                     const struct net_device *in,
-                     const struct net_device *out,
-                     int (*okfn)(struct sk_buff *))
+                     const struct nf_hook_state *state)
 {
        return ip_vs_in(ops->hooknum, skb, AF_INET6);
 }
@@ -1809,8 +1800,7 @@ ip_vs_remote_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
  */
 static unsigned int
 ip_vs_local_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                    const struct net_device *in, const struct net_device *out,
-                    int (*okfn)(struct sk_buff *))
+                    const struct nf_hook_state *state)
 {
        return ip_vs_in(ops->hooknum, skb, AF_INET6);
 }
@@ -1829,8 +1819,7 @@ ip_vs_local_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
  */
 static unsigned int
 ip_vs_forward_icmp(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                  const struct net_device *in, const struct net_device *out,
-                  int (*okfn)(struct sk_buff *))
+                  const struct nf_hook_state *state)
 {
        int r;
        struct net *net;
@@ -1851,8 +1840,7 @@ ip_vs_forward_icmp(const struct nf_hook_ops *ops, struct sk_buff *skb,
 #ifdef CONFIG_IP_VS_IPV6
 static unsigned int
 ip_vs_forward_icmp_v6(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                     const struct net_device *in, const struct net_device *out,
-                     int (*okfn)(struct sk_buff *))
+                     const struct nf_hook_state *state)
 {
        int r;
        struct net *net;
index bf02932b7188d71ea0c0f1ea438c1b33766b7979..19986ec5f21addd110ee6deadc72398641b61998 100644 (file)
@@ -536,8 +536,8 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
                ip_vs_update_conntrack(skb, cp, 1);
        if (!local) {
                skb_forward_csum(skb);
-               NF_HOOK(pf, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev,
-                       dst_output);
+               NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb,
+                       NULL, skb_dst(skb)->dev, dst_output_sk);
        } else
                ret = NF_ACCEPT;
        return ret;
@@ -554,8 +554,8 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
                ip_vs_notrack(skb);
        if (!local) {
                skb_forward_csum(skb);
-               NF_HOOK(pf, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev,
-                       dst_output);
+               NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb,
+                       NULL, skb_dst(skb)->dev, dst_output_sk);
        } else
                ret = NF_ACCEPT;
        return ret;
index 61a3c927e63cf1c9f0f9b596155e5c8a74bcd71a..ea7f36784b3dae0d34121107607b0ca8763ff411 100644 (file)
 
 /* core.c */
 unsigned int nf_iterate(struct list_head *head, struct sk_buff *skb,
-                       unsigned int hook, const struct net_device *indev,
-                       const struct net_device *outdev,
-                       struct nf_hook_ops **elemp,
-                       int (*okfn)(struct sk_buff *), int hook_thresh);
+                       struct nf_hook_state *state, struct nf_hook_ops **elemp);
 
 /* nf_queue.c */
-int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem, u_int8_t pf,
-            unsigned int hook, struct net_device *indev,
-            struct net_device *outdev, int (*okfn)(struct sk_buff *),
-            unsigned int queuenum);
+int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem,
+            struct nf_hook_state *state, unsigned int queuenum);
 int __init netfilter_queue_init(void);
 
 /* nf_log.c */
index 2631876ac55be96aeec77ab1d15f5db8c5a80c49..a5aa5967b8e17b894a798dbf29d07bf12063fc16 100644 (file)
@@ -17,6 +17,7 @@
 #include <net/route.h>
 
 #include <linux/netfilter.h>
+#include <linux/netfilter_bridge.h>
 #include <linux/netfilter/xt_LOG.h>
 #include <net/netfilter/nf_log.h>
 
@@ -163,10 +164,10 @@ nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
                const struct net_device *physindev;
                const struct net_device *physoutdev;
 
-               physindev = skb->nf_bridge->physindev;
+               physindev = nf_bridge_get_physindev(skb);
                if (physindev && in != physindev)
                        nf_log_buf_add(m, "PHYSIN=%s ", physindev->name);
-               physoutdev = skb->nf_bridge->physoutdev;
+               physoutdev = nf_bridge_get_physoutdev(skb);
                if (physoutdev && out != physoutdev)
                        nf_log_buf_add(m, "PHYSOUT=%s ", physoutdev->name);
        }
index 4c8b68e5fa164fd71b6f613b66b36d52e6717bf4..2e88032cd5ad22fb1e910966167340a1ba8761f6 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/proc_fs.h>
 #include <linux/skbuff.h>
 #include <linux/netfilter.h>
+#include <linux/netfilter_bridge.h>
 #include <linux/seq_file.h>
 #include <linux/rcupdate.h>
 #include <net/protocol.h>
@@ -47,19 +48,25 @@ EXPORT_SYMBOL(nf_unregister_queue_handler);
 
 void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
 {
+       struct nf_hook_state *state = &entry->state;
+
        /* Release those devices we held, or Alexey will kill me. */
-       if (entry->indev)
-               dev_put(entry->indev);
-       if (entry->outdev)
-               dev_put(entry->outdev);
+       if (state->in)
+               dev_put(state->in);
+       if (state->out)
+               dev_put(state->out);
+       if (state->sk)
+               sock_put(state->sk);
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
        if (entry->skb->nf_bridge) {
-               struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge;
+               struct net_device *physdev;
 
-               if (nf_bridge->physindev)
-                       dev_put(nf_bridge->physindev);
-               if (nf_bridge->physoutdev)
-                       dev_put(nf_bridge->physoutdev);
+               physdev = nf_bridge_get_physindev(entry->skb);
+               if (physdev)
+                       dev_put(physdev);
+               physdev = nf_bridge_get_physoutdev(entry->skb);
+               if (physdev)
+                       dev_put(physdev);
        }
 #endif
        /* Drop reference to owner of hook which queued us. */
@@ -70,22 +77,25 @@ EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
 /* Bump dev refs so they don't vanish while packet is out */
 bool nf_queue_entry_get_refs(struct nf_queue_entry *entry)
 {
+       struct nf_hook_state *state = &entry->state;
+
        if (!try_module_get(entry->elem->owner))
                return false;
 
-       if (entry->indev)
-               dev_hold(entry->indev);
-       if (entry->outdev)
-               dev_hold(entry->outdev);
+       if (state->in)
+               dev_hold(state->in);
+       if (state->out)
+               dev_hold(state->out);
+       if (state->sk)
+               sock_hold(state->sk);
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
        if (entry->skb->nf_bridge) {
-               struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge;
                struct net_device *physdev;
 
-               physdev = nf_bridge->physindev;
+               physdev = nf_bridge_get_physindev(entry->skb);
                if (physdev)
                        dev_hold(physdev);
-               physdev = nf_bridge->physoutdev;
+               physdev = nf_bridge_get_physoutdev(entry->skb);
                if (physdev)
                        dev_hold(physdev);
        }
@@ -100,12 +110,9 @@ EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
  * through nf_reinject().
  */
 int nf_queue(struct sk_buff *skb,
-                     struct nf_hook_ops *elem,
-                     u_int8_t pf, unsigned int hook,
-                     struct net_device *indev,
-                     struct net_device *outdev,
-                     int (*okfn)(struct sk_buff *),
-                     unsigned int queuenum)
+            struct nf_hook_ops *elem,
+            struct nf_hook_state *state,
+            unsigned int queuenum)
 {
        int status = -ENOENT;
        struct nf_queue_entry *entry = NULL;
@@ -121,7 +128,7 @@ int nf_queue(struct sk_buff *skb,
                goto err_unlock;
        }
 
-       afinfo = nf_get_afinfo(pf);
+       afinfo = nf_get_afinfo(state->pf);
        if (!afinfo)
                goto err_unlock;
 
@@ -134,11 +141,7 @@ int nf_queue(struct sk_buff *skb,
        *entry = (struct nf_queue_entry) {
                .skb    = skb,
                .elem   = elem,
-               .pf     = pf,
-               .hook   = hook,
-               .indev  = indev,
-               .outdev = outdev,
-               .okfn   = okfn,
+               .state  = *state,
                .size   = sizeof(*entry) + afinfo->route_key_size,
        };
 
@@ -184,30 +187,29 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
        }
 
        if (verdict == NF_ACCEPT) {
-               afinfo = nf_get_afinfo(entry->pf);
+               afinfo = nf_get_afinfo(entry->state.pf);
                if (!afinfo || afinfo->reroute(skb, entry) < 0)
                        verdict = NF_DROP;
        }
 
+       entry->state.thresh = INT_MIN;
+
        if (verdict == NF_ACCEPT) {
        next_hook:
-               verdict = nf_iterate(&nf_hooks[entry->pf][entry->hook],
-                                    skb, entry->hook,
-                                    entry->indev, entry->outdev, &elem,
-                                    entry->okfn, INT_MIN);
+               verdict = nf_iterate(&nf_hooks[entry->state.pf][entry->state.hook],
+                                    skb, &entry->state, &elem);
        }
 
        switch (verdict & NF_VERDICT_MASK) {
        case NF_ACCEPT:
        case NF_STOP:
                local_bh_disable();
-               entry->okfn(skb);
+               entry->state.okfn(entry->state.sk, skb);
                local_bh_enable();
                break;
        case NF_QUEUE:
-               err = nf_queue(skb, elem, entry->pf, entry->hook,
-                               entry->indev, entry->outdev, entry->okfn,
-                               verdict >> NF_VERDICT_QBITS);
+               err = nf_queue(skb, elem, &entry->state,
+                              verdict >> NF_VERDICT_QBITS);
                if (err < 0) {
                        if (err == -ECANCELED)
                                goto next_hook;
index 5604c2df05d1a40f2e812d18d2a72fd895359100..0b96fa0d64b2f9bf536eed8a0778f36f8aa04508 100644 (file)
@@ -2159,7 +2159,7 @@ nft_select_set_ops(const struct nlattr * const nla[],
        features = 0;
        if (nla[NFTA_SET_FLAGS] != NULL) {
                features = ntohl(nla_get_be32(nla[NFTA_SET_FLAGS]));
-               features &= NFT_SET_INTERVAL | NFT_SET_MAP;
+               features &= NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_TIMEOUT;
        }
 
        bops       = NULL;
@@ -2216,6 +2216,8 @@ static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
        [NFTA_SET_POLICY]               = { .type = NLA_U32 },
        [NFTA_SET_DESC]                 = { .type = NLA_NESTED },
        [NFTA_SET_ID]                   = { .type = NLA_U32 },
+       [NFTA_SET_TIMEOUT]              = { .type = NLA_U64 },
+       [NFTA_SET_GC_INTERVAL]          = { .type = NLA_U32 },
 };
 
 static const struct nla_policy nft_set_desc_policy[NFTA_SET_DESC_MAX + 1] = {
@@ -2366,6 +2368,13 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
                        goto nla_put_failure;
        }
 
+       if (set->timeout &&
+           nla_put_be64(skb, NFTA_SET_TIMEOUT, cpu_to_be64(set->timeout)))
+               goto nla_put_failure;
+       if (set->gc_int &&
+           nla_put_be32(skb, NFTA_SET_GC_INTERVAL, htonl(set->gc_int)))
+               goto nla_put_failure;
+
        if (set->policy != NFT_SET_POL_PERFORMANCE) {
                if (nla_put_be32(skb, NFTA_SET_POLICY, htonl(set->policy)))
                        goto nla_put_failure;
@@ -2578,7 +2587,8 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
        char name[IFNAMSIZ];
        unsigned int size;
        bool create;
-       u32 ktype, dtype, flags, policy;
+       u64 timeout;
+       u32 ktype, dtype, flags, policy, gc_int;
        struct nft_set_desc desc;
        int err;
 
@@ -2605,7 +2615,8 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
        if (nla[NFTA_SET_FLAGS] != NULL) {
                flags = ntohl(nla_get_be32(nla[NFTA_SET_FLAGS]));
                if (flags & ~(NFT_SET_ANONYMOUS | NFT_SET_CONSTANT |
-                             NFT_SET_INTERVAL | NFT_SET_MAP))
+                             NFT_SET_INTERVAL | NFT_SET_MAP |
+                             NFT_SET_TIMEOUT))
                        return -EINVAL;
        }
 
@@ -2631,6 +2642,19 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
        } else if (flags & NFT_SET_MAP)
                return -EINVAL;
 
+       timeout = 0;
+       if (nla[NFTA_SET_TIMEOUT] != NULL) {
+               if (!(flags & NFT_SET_TIMEOUT))
+                       return -EINVAL;
+               timeout = be64_to_cpu(nla_get_be64(nla[NFTA_SET_TIMEOUT]));
+       }
+       gc_int = 0;
+       if (nla[NFTA_SET_GC_INTERVAL] != NULL) {
+               if (!(flags & NFT_SET_TIMEOUT))
+                       return -EINVAL;
+               gc_int = ntohl(nla_get_be32(nla[NFTA_SET_GC_INTERVAL]));
+       }
+
        policy = NFT_SET_POL_PERFORMANCE;
        if (nla[NFTA_SET_POLICY] != NULL)
                policy = ntohl(nla_get_be32(nla[NFTA_SET_POLICY]));
@@ -2699,6 +2723,8 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
        set->flags = flags;
        set->size  = desc.size;
        set->policy = policy;
+       set->timeout = timeout;
+       set->gc_int = gc_int;
 
        err = ops->init(set, &desc, nla);
        if (err < 0)
@@ -2785,12 +2811,13 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
        if (!list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS)
                return -EBUSY;
 
-       if (set->flags & NFT_SET_MAP) {
+       if (binding->flags & NFT_SET_MAP) {
                /* If the set is already bound to the same chain all
                 * jumps are already validated for that chain.
                 */
                list_for_each_entry(i, &set->bindings, list) {
-                       if (i->chain == binding->chain)
+                       if (binding->flags & NFT_SET_MAP &&
+                           i->chain == binding->chain)
                                goto bind;
                }
 
@@ -2837,6 +2864,18 @@ const struct nft_set_ext_type nft_set_ext_types[] = {
                .len    = sizeof(u8),
                .align  = __alignof__(u8),
        },
+       [NFT_SET_EXT_TIMEOUT]           = {
+               .len    = sizeof(u64),
+               .align  = __alignof__(u64),
+       },
+       [NFT_SET_EXT_EXPIRATION]        = {
+               .len    = sizeof(unsigned long),
+               .align  = __alignof__(unsigned long),
+       },
+       [NFT_SET_EXT_USERDATA]          = {
+               .len    = sizeof(struct nft_userdata),
+               .align  = __alignof__(struct nft_userdata),
+       },
 };
 EXPORT_SYMBOL_GPL(nft_set_ext_types);
 
@@ -2848,6 +2887,9 @@ static const struct nla_policy nft_set_elem_policy[NFTA_SET_ELEM_MAX + 1] = {
        [NFTA_SET_ELEM_KEY]             = { .type = NLA_NESTED },
        [NFTA_SET_ELEM_DATA]            = { .type = NLA_NESTED },
        [NFTA_SET_ELEM_FLAGS]           = { .type = NLA_U32 },
+       [NFTA_SET_ELEM_TIMEOUT]         = { .type = NLA_U64 },
+       [NFTA_SET_ELEM_USERDATA]        = { .type = NLA_BINARY,
+                                           .len = NFT_USERDATA_MAXLEN },
 };
 
 static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = {
@@ -2909,6 +2951,34 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
                         htonl(*nft_set_ext_flags(ext))))
                goto nla_put_failure;
 
+       if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) &&
+           nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT,
+                        cpu_to_be64(*nft_set_ext_timeout(ext))))
+               goto nla_put_failure;
+
+       if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) {
+               unsigned long expires, now = jiffies;
+
+               expires = *nft_set_ext_expiration(ext);
+               if (time_before(now, expires))
+                       expires -= now;
+               else
+                       expires = 0;
+
+               if (nla_put_be64(skb, NFTA_SET_ELEM_EXPIRATION,
+                                cpu_to_be64(jiffies_to_msecs(expires))))
+                       goto nla_put_failure;
+       }
+
+       if (nft_set_ext_exists(ext, NFT_SET_EXT_USERDATA)) {
+               struct nft_userdata *udata;
+
+               udata = nft_set_ext_userdata(ext);
+               if (nla_put(skb, NFTA_SET_ELEM_USERDATA,
+                           udata->len + 1, udata->data))
+                       goto nla_put_failure;
+       }
+
        nla_nest_end(skb, nest);
        return 0;
 
@@ -3128,11 +3198,11 @@ static struct nft_trans *nft_trans_elem_alloc(struct nft_ctx *ctx,
        return trans;
 }
 
-static void *nft_set_elem_init(const struct nft_set *set,
-                              const struct nft_set_ext_tmpl *tmpl,
-                              const struct nft_data *key,
-                              const struct nft_data *data,
-                              gfp_t gfp)
+void *nft_set_elem_init(const struct nft_set *set,
+                       const struct nft_set_ext_tmpl *tmpl,
+                       const struct nft_data *key,
+                       const struct nft_data *data,
+                       u64 timeout, gfp_t gfp)
 {
        struct nft_set_ext *ext;
        void *elem;
@@ -3147,6 +3217,11 @@ static void *nft_set_elem_init(const struct nft_set *set,
        memcpy(nft_set_ext_key(ext), key, set->klen);
        if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
                memcpy(nft_set_ext_data(ext), data, set->dlen);
+       if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION))
+               *nft_set_ext_expiration(ext) =
+                       jiffies + msecs_to_jiffies(timeout);
+       if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT))
+               *nft_set_ext_timeout(ext) = timeout;
 
        return elem;
 }
@@ -3172,15 +3247,15 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
        struct nft_set_ext *ext;
        struct nft_set_elem elem;
        struct nft_set_binding *binding;
+       struct nft_userdata *udata;
        struct nft_data data;
        enum nft_registers dreg;
        struct nft_trans *trans;
+       u64 timeout;
        u32 flags;
+       u8 ulen;
        int err;
 
-       if (set->size && set->nelems == set->size)
-               return -ENFILE;
-
        err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr,
                               nft_set_elem_policy);
        if (err < 0)
@@ -3215,6 +3290,15 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                        return -EINVAL;
        }
 
+       timeout = 0;
+       if (nla[NFTA_SET_ELEM_TIMEOUT] != NULL) {
+               if (!(set->flags & NFT_SET_TIMEOUT))
+                       return -EINVAL;
+               timeout = be64_to_cpu(nla_get_be64(nla[NFTA_SET_ELEM_TIMEOUT]));
+       } else if (set->flags & NFT_SET_TIMEOUT) {
+               timeout = set->timeout;
+       }
+
        err = nft_data_init(ctx, &elem.key, &d1, nla[NFTA_SET_ELEM_KEY]);
        if (err < 0)
                goto err1;
@@ -3223,6 +3307,11 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                goto err2;
 
        nft_set_ext_add(&tmpl, NFT_SET_EXT_KEY);
+       if (timeout > 0) {
+               nft_set_ext_add(&tmpl, NFT_SET_EXT_EXPIRATION);
+               if (timeout != set->timeout)
+                       nft_set_ext_add(&tmpl, NFT_SET_EXT_TIMEOUT);
+       }
 
        if (nla[NFTA_SET_ELEM_DATA] != NULL) {
                err = nft_data_init(ctx, &data, &d2, nla[NFTA_SET_ELEM_DATA]);
@@ -3241,6 +3330,9 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                                .chain  = (struct nft_chain *)binding->chain,
                        };
 
+                       if (!(binding->flags & NFT_SET_MAP))
+                               continue;
+
                        err = nft_validate_data_load(&bind_ctx, dreg,
                                                     &data, d2.type);
                        if (err < 0)
@@ -3250,20 +3342,38 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                nft_set_ext_add(&tmpl, NFT_SET_EXT_DATA);
        }
 
+       /* The full maximum length of userdata can exceed the maximum
+        * offset value (U8_MAX) for following extensions, therefor it
+        * must be the last extension added.
+        */
+       ulen = 0;
+       if (nla[NFTA_SET_ELEM_USERDATA] != NULL) {
+               ulen = nla_len(nla[NFTA_SET_ELEM_USERDATA]);
+               if (ulen > 0)
+                       nft_set_ext_add_length(&tmpl, NFT_SET_EXT_USERDATA,
+                                              ulen);
+       }
+
        err = -ENOMEM;
-       elem.priv = nft_set_elem_init(set, &tmpl, &elem.key, &data, GFP_KERNEL);
+       elem.priv = nft_set_elem_init(set, &tmpl, &elem.key, &data,
+                                     timeout, GFP_KERNEL);
        if (elem.priv == NULL)
                goto err3;
 
        ext = nft_set_elem_ext(set, elem.priv);
        if (flags)
                *nft_set_ext_flags(ext) = flags;
+       if (ulen > 0) {
+               udata = nft_set_ext_userdata(ext);
+               udata->len = ulen - 1;
+               nla_memcpy(&udata->data, nla[NFTA_SET_ELEM_USERDATA], ulen);
+       }
 
        trans = nft_trans_elem_alloc(ctx, NFT_MSG_NEWSETELEM, set);
        if (trans == NULL)
                goto err4;
 
-       ext->genmask = nft_genmask_cur(ctx->net);
+       ext->genmask = nft_genmask_cur(ctx->net) | NFT_SET_ELEM_BUSY_MASK;
        err = set->ops->insert(set, &elem);
        if (err < 0)
                goto err5;
@@ -3316,11 +3426,15 @@ static int nf_tables_newsetelem(struct sock *nlsk, struct sk_buff *skb,
                return -EBUSY;
 
        nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
+               if (set->size &&
+                   !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact))
+                       return -ENFILE;
+
                err = nft_add_set_elem(&ctx, set, attr);
-               if (err < 0)
+               if (err < 0) {
+                       atomic_dec(&set->nelems);
                        break;
-
-               set->nelems++;
+               }
        }
        return err;
 }
@@ -3402,11 +3516,36 @@ static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb,
                if (err < 0)
                        break;
 
-               set->nelems--;
+               set->ndeact++;
        }
        return err;
 }
 
+void nft_set_gc_batch_release(struct rcu_head *rcu)
+{
+       struct nft_set_gc_batch *gcb;
+       unsigned int i;
+
+       gcb = container_of(rcu, struct nft_set_gc_batch, head.rcu);
+       for (i = 0; i < gcb->head.cnt; i++)
+               nft_set_elem_destroy(gcb->head.set, gcb->elems[i]);
+       kfree(gcb);
+}
+EXPORT_SYMBOL_GPL(nft_set_gc_batch_release);
+
+struct nft_set_gc_batch *nft_set_gc_batch_alloc(const struct nft_set *set,
+                                               gfp_t gfp)
+{
+       struct nft_set_gc_batch *gcb;
+
+       gcb = kzalloc(sizeof(*gcb), gfp);
+       if (gcb == NULL)
+               return gcb;
+       gcb->head.set = set;
+       return gcb;
+}
+EXPORT_SYMBOL_GPL(nft_set_gc_batch_alloc);
+
 static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net,
                                   u32 portid, u32 seq)
 {
@@ -3710,6 +3849,8 @@ static int nf_tables_commit(struct sk_buff *skb)
                                                 &te->elem,
                                                 NFT_MSG_DELSETELEM, 0);
                        te->set->ops->remove(te->set, &te->elem);
+                       atomic_dec(&te->set->nelems);
+                       te->set->ndeact--;
                        break;
                }
        }
@@ -3813,16 +3954,16 @@ static int nf_tables_abort(struct sk_buff *skb)
                        nft_trans_destroy(trans);
                        break;
                case NFT_MSG_NEWSETELEM:
-                       nft_trans_elem_set(trans)->nelems--;
                        te = (struct nft_trans_elem *)trans->data;
 
                        te->set->ops->remove(te->set, &te->elem);
+                       atomic_dec(&te->set->nelems);
                        break;
                case NFT_MSG_DELSETELEM:
                        te = (struct nft_trans_elem *)trans->data;
 
-                       nft_trans_elem_set(trans)->nelems++;
                        te->set->ops->activate(te->set, &te->elem);
+                       te->set->ndeact--;
 
                        nft_trans_destroy(trans);
                        break;
@@ -3960,7 +4101,8 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
                        continue;
 
                list_for_each_entry(binding, &set->bindings, list) {
-                       if (binding->chain != chain)
+                       if (!(binding->flags & NFT_SET_MAP) ||
+                           binding->chain != chain)
                                continue;
 
                        iter.skip       = 0;
index ef4dfcbaf149f4c207f0096ceb6b8a6c1aa3d924..7caf08a9225d29c3621896c881896ac03765827f 100644 (file)
@@ -239,8 +239,14 @@ int __init nf_tables_core_module_init(void)
        if (err < 0)
                goto err6;
 
+       err = nft_dynset_module_init();
+       if (err < 0)
+               goto err7;
+
        return 0;
 
+err7:
+       nft_payload_module_exit();
 err6:
        nft_byteorder_module_exit();
 err5:
@@ -257,6 +263,7 @@ err1:
 
 void nf_tables_core_module_exit(void)
 {
+       nft_dynset_module_exit();
        nft_payload_module_exit();
        nft_byteorder_module_exit();
        nft_bitwise_module_exit();
index 957b83a0223b8eef159b572a2b685095a2d3e0ab..51afea4b0af78a46c41099cd55ca9506f3636835 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/ipv6.h>
 #include <linux/netdevice.h>
 #include <linux/netfilter.h>
+#include <linux/netfilter_bridge.h>
 #include <net/netlink.h>
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_log.h>
@@ -448,14 +449,18 @@ __build_packet_message(struct nfnl_log_net *log,
                                         htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
                                goto nla_put_failure;
                } else {
+                       struct net_device *physindev;
+
                        /* Case 2: indev is bridge group, we need to look for
                         * physical device (when called from ipv4) */
                        if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
                                         htonl(indev->ifindex)))
                                goto nla_put_failure;
-                       if (skb->nf_bridge && skb->nf_bridge->physindev &&
+
+                       physindev = nf_bridge_get_physindev(skb);
+                       if (physindev &&
                            nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
-                                        htonl(skb->nf_bridge->physindev->ifindex)))
+                                        htonl(physindev->ifindex)))
                                goto nla_put_failure;
                }
 #endif
@@ -479,14 +484,18 @@ __build_packet_message(struct nfnl_log_net *log,
                                         htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
                                goto nla_put_failure;
                } else {
+                       struct net_device *physoutdev;
+
                        /* Case 2: indev is a bridge group, we need to look
                         * for physical device (when called from ipv4) */
                        if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
                                         htonl(outdev->ifindex)))
                                goto nla_put_failure;
-                       if (skb->nf_bridge && skb->nf_bridge->physoutdev &&
+
+                       physoutdev = nf_bridge_get_physoutdev(skb);
+                       if (physoutdev &&
                            nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
-                                        htonl(skb->nf_bridge->physoutdev->ifindex)))
+                                        htonl(physoutdev->ifindex)))
                                goto nla_put_failure;
                }
 #endif
index 86ee8b05adae3bb59f59da371c398fb3bb85d063..628afc350c025f7012fa927c03ec3bdc6b3b6a2c 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/proc_fs.h>
 #include <linux/netfilter_ipv4.h>
 #include <linux/netfilter_ipv6.h>
+#include <linux/netfilter_bridge.h>
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_queue.h>
 #include <linux/list.h>
@@ -314,13 +315,13 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
        if (entskb->tstamp.tv64)
                size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
 
-       if (entry->hook <= NF_INET_FORWARD ||
-          (entry->hook == NF_INET_POST_ROUTING && entskb->sk == NULL))
+       if (entry->state.hook <= NF_INET_FORWARD ||
+          (entry->state.hook == NF_INET_POST_ROUTING && entskb->sk == NULL))
                csum_verify = !skb_csum_unnecessary(entskb);
        else
                csum_verify = false;
 
-       outdev = entry->outdev;
+       outdev = entry->state.out;
 
        switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) {
        case NFQNL_COPY_META:
@@ -368,23 +369,23 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
                return NULL;
        }
        nfmsg = nlmsg_data(nlh);
-       nfmsg->nfgen_family = entry->pf;
+       nfmsg->nfgen_family = entry->state.pf;
        nfmsg->version = NFNETLINK_V0;
        nfmsg->res_id = htons(queue->queue_num);
 
        nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg));
        pmsg = nla_data(nla);
        pmsg->hw_protocol       = entskb->protocol;
-       pmsg->hook              = entry->hook;
+       pmsg->hook              = entry->state.hook;
        *packet_id_ptr          = &pmsg->packet_id;
 
-       indev = entry->indev;
+       indev = entry->state.in;
        if (indev) {
 #if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
                if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)))
                        goto nla_put_failure;
 #else
-               if (entry->pf == PF_BRIDGE) {
+               if (entry->state.pf == PF_BRIDGE) {
                        /* Case 1: indev is physical input device, we need to
                         * look for bridge group (when called from
                         * netfilter_bridge) */
@@ -396,14 +397,18 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
                                         htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
                                goto nla_put_failure;
                } else {
+                       int physinif;
+
                        /* Case 2: indev is bridge group, we need to look for
                         * physical device (when called from ipv4) */
                        if (nla_put_be32(skb, NFQA_IFINDEX_INDEV,
                                         htonl(indev->ifindex)))
                                goto nla_put_failure;
-                       if (entskb->nf_bridge && entskb->nf_bridge->physindev &&
+
+                       physinif = nf_bridge_get_physinif(entskb);
+                       if (physinif &&
                            nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
-                                        htonl(entskb->nf_bridge->physindev->ifindex)))
+                                        htonl(physinif)))
                                goto nla_put_failure;
                }
 #endif
@@ -414,7 +419,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
                if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)))
                        goto nla_put_failure;
 #else
-               if (entry->pf == PF_BRIDGE) {
+               if (entry->state.pf == PF_BRIDGE) {
                        /* Case 1: outdev is physical output device, we need to
                         * look for bridge group (when called from
                         * netfilter_bridge) */
@@ -426,14 +431,18 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
                                         htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
                                goto nla_put_failure;
                } else {
+                       int physoutif;
+
                        /* Case 2: outdev is bridge group, we need to look for
                         * physical output device (when called from ipv4) */
                        if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
                                         htonl(outdev->ifindex)))
                                goto nla_put_failure;
-                       if (entskb->nf_bridge && entskb->nf_bridge->physoutdev &&
+
+                       physoutif = nf_bridge_get_physoutif(entskb);
+                       if (physoutif &&
                            nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
-                                        htonl(entskb->nf_bridge->physoutdev->ifindex)))
+                                        htonl(physoutif)))
                                goto nla_put_failure;
                }
 #endif
@@ -633,8 +642,8 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
        struct nfqnl_instance *queue;
        struct sk_buff *skb, *segs;
        int err = -ENOBUFS;
-       struct net *net = dev_net(entry->indev ?
-                                 entry->indev : entry->outdev);
+       struct net *net = dev_net(entry->state.in ?
+                                 entry->state.in : entry->state.out);
        struct nfnl_queue_net *q = nfnl_queue_pernet(net);
 
        /* rcu_read_lock()ed by nf_hook_slow() */
@@ -647,7 +656,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
 
        skb = entry->skb;
 
-       switch (entry->pf) {
+       switch (entry->state.pf) {
        case NFPROTO_IPV4:
                skb->protocol = htons(ETH_P_IP);
                break;
@@ -757,19 +766,20 @@ nfqnl_set_mode(struct nfqnl_instance *queue,
 static int
 dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
 {
-       if (entry->indev)
-               if (entry->indev->ifindex == ifindex)
+       if (entry->state.in)
+               if (entry->state.in->ifindex == ifindex)
                        return 1;
-       if (entry->outdev)
-               if (entry->outdev->ifindex == ifindex)
+       if (entry->state.out)
+               if (entry->state.out->ifindex == ifindex)
                        return 1;
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
        if (entry->skb->nf_bridge) {
-               if (entry->skb->nf_bridge->physindev &&
-                   entry->skb->nf_bridge->physindev->ifindex == ifindex)
-                       return 1;
-               if (entry->skb->nf_bridge->physoutdev &&
-                   entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
+               int physinif, physoutif;
+
+               physinif = nf_bridge_get_physinif(entry->skb);
+               physoutif = nf_bridge_get_physoutif(entry->skb);
+
+               if (physinif == ifindex || physoutif == ifindex)
                        return 1;
        }
 #endif
index 589b8487cd0840de2bafd01304242265b16ea11f..0d137c1ac889ea2cd8f74aaf71238773855198c0 100644 (file)
@@ -321,11 +321,11 @@ static void nft_match_eval(const struct nft_expr *expr,
                return;
        }
 
-       switch(ret) {
-       case true:
+       switch (ret ? 1 : 0) {
+       case 1:
                data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
                break;
-       case false:
+       case 0:
                data[NFT_REG_VERDICT].verdict = NFT_BREAK;
                break;
        }
index cc5603016242ea8e1f5cdce1d633e3a2687276ae..18d520e0ca0a73cadeb8cb8efa2565e1169e9980 100644 (file)
@@ -56,6 +56,8 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
                        state = NF_CT_STATE_BIT(ctinfo);
                dest->data[0] = state;
                return;
+       default:
+               break;
        }
 
        if (ct == NULL)
@@ -117,6 +119,8 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
                return;
        }
 #endif
+       default:
+               break;
        }
 
        tuple = &ct->tuplehash[priv->dir].tuple;
@@ -141,6 +145,8 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
        case NFT_CT_PROTO_DST:
                dest->data[0] = (__force __u16)tuple->dst.u.all;
                return;
+       default:
+               break;
        }
        return;
 err:
@@ -172,6 +178,8 @@ static void nft_ct_set_eval(const struct nft_expr *expr,
                }
                break;
 #endif
+       default:
+               break;
        }
 }
 
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
new file mode 100644 (file)
index 0000000..eeb72de
--- /dev/null
@@ -0,0 +1,218 @@
+/*
+ * Copyright (c) 2015 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+
+struct nft_dynset {
+       struct nft_set                  *set;
+       struct nft_set_ext_tmpl         tmpl;
+       enum nft_dynset_ops             op:8;
+       enum nft_registers              sreg_key:8;
+       enum nft_registers              sreg_data:8;
+       u64                             timeout;
+       struct nft_set_binding          binding;
+};
+
+static void *nft_dynset_new(struct nft_set *set, const struct nft_expr *expr,
+                           struct nft_data data[NFT_REG_MAX + 1])
+{
+       const struct nft_dynset *priv = nft_expr_priv(expr);
+       u64 timeout;
+       void *elem;
+
+       if (set->size && !atomic_add_unless(&set->nelems, 1, set->size))
+               return NULL;
+
+       timeout = priv->timeout ? : set->timeout;
+       elem = nft_set_elem_init(set, &priv->tmpl,
+                                &data[priv->sreg_key], &data[priv->sreg_data],
+                                timeout, GFP_ATOMIC);
+       if (elem == NULL) {
+               if (set->size)
+                       atomic_dec(&set->nelems);
+       }
+       return elem;
+}
+
+static void nft_dynset_eval(const struct nft_expr *expr,
+                           struct nft_data data[NFT_REG_MAX + 1],
+                           const struct nft_pktinfo *pkt)
+{
+       const struct nft_dynset *priv = nft_expr_priv(expr);
+       struct nft_set *set = priv->set;
+       const struct nft_set_ext *ext;
+       u64 timeout;
+
+       if (set->ops->update(set, &data[priv->sreg_key], nft_dynset_new,
+                            expr, data, &ext)) {
+               if (priv->op == NFT_DYNSET_OP_UPDATE &&
+                   nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) {
+                       timeout = priv->timeout ? : set->timeout;
+                       *nft_set_ext_expiration(ext) = jiffies + timeout;
+                       return;
+               }
+       }
+
+       data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static const struct nla_policy nft_dynset_policy[NFTA_DYNSET_MAX + 1] = {
+       [NFTA_DYNSET_SET_NAME]  = { .type = NLA_STRING },
+       [NFTA_DYNSET_SET_ID]    = { .type = NLA_U32 },
+       [NFTA_DYNSET_OP]        = { .type = NLA_U32 },
+       [NFTA_DYNSET_SREG_KEY]  = { .type = NLA_U32 },
+       [NFTA_DYNSET_SREG_DATA] = { .type = NLA_U32 },
+       [NFTA_DYNSET_TIMEOUT]   = { .type = NLA_U64 },
+};
+
+static int nft_dynset_init(const struct nft_ctx *ctx,
+                          const struct nft_expr *expr,
+                          const struct nlattr * const tb[])
+{
+       struct nft_dynset *priv = nft_expr_priv(expr);
+       struct nft_set *set;
+       u64 timeout;
+       int err;
+
+       if (tb[NFTA_DYNSET_SET_NAME] == NULL ||
+           tb[NFTA_DYNSET_OP] == NULL ||
+           tb[NFTA_DYNSET_SREG_KEY] == NULL)
+               return -EINVAL;
+
+       set = nf_tables_set_lookup(ctx->table, tb[NFTA_DYNSET_SET_NAME]);
+       if (IS_ERR(set)) {
+               if (tb[NFTA_DYNSET_SET_ID])
+                       set = nf_tables_set_lookup_byid(ctx->net,
+                                                       tb[NFTA_DYNSET_SET_ID]);
+               if (IS_ERR(set))
+                       return PTR_ERR(set);
+       }
+
+       if (set->flags & NFT_SET_CONSTANT)
+               return -EBUSY;
+
+       priv->op = ntohl(nla_get_be32(tb[NFTA_DYNSET_OP]));
+       switch (priv->op) {
+       case NFT_DYNSET_OP_ADD:
+               break;
+       case NFT_DYNSET_OP_UPDATE:
+               if (!(set->flags & NFT_SET_TIMEOUT))
+                       return -EOPNOTSUPP;
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       timeout = 0;
+       if (tb[NFTA_DYNSET_TIMEOUT] != NULL) {
+               if (!(set->flags & NFT_SET_TIMEOUT))
+                       return -EINVAL;
+               timeout = be64_to_cpu(nla_get_be64(tb[NFTA_DYNSET_TIMEOUT]));
+       }
+
+       priv->sreg_key = ntohl(nla_get_be32(tb[NFTA_DYNSET_SREG_KEY]));
+       err = nft_validate_input_register(priv->sreg_key);
+       if (err < 0)
+               return err;
+
+       if (tb[NFTA_DYNSET_SREG_DATA] != NULL) {
+               if (!(set->flags & NFT_SET_MAP))
+                       return -EINVAL;
+               if (set->dtype == NFT_DATA_VERDICT)
+                       return -EOPNOTSUPP;
+
+               priv->sreg_data = ntohl(nla_get_be32(tb[NFTA_DYNSET_SREG_DATA]));
+               err = nft_validate_input_register(priv->sreg_data);
+               if (err < 0)
+                       return err;
+       } else if (set->flags & NFT_SET_MAP)
+               return -EINVAL;
+
+       nft_set_ext_prepare(&priv->tmpl);
+       nft_set_ext_add_length(&priv->tmpl, NFT_SET_EXT_KEY, set->klen);
+       if (set->flags & NFT_SET_MAP)
+               nft_set_ext_add_length(&priv->tmpl, NFT_SET_EXT_DATA, set->dlen);
+       if (set->flags & NFT_SET_TIMEOUT) {
+               if (timeout || set->timeout)
+                       nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_EXPIRATION);
+       }
+
+       priv->timeout = timeout;
+
+       err = nf_tables_bind_set(ctx, set, &priv->binding);
+       if (err < 0)
+               return err;
+
+       priv->set = set;
+       return 0;
+}
+
+static void nft_dynset_destroy(const struct nft_ctx *ctx,
+                              const struct nft_expr *expr)
+{
+       struct nft_dynset *priv = nft_expr_priv(expr);
+
+       nf_tables_unbind_set(ctx, priv->set, &priv->binding);
+}
+
+static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_dynset *priv = nft_expr_priv(expr);
+
+       if (nla_put_be32(skb, NFTA_DYNSET_SREG_KEY, htonl(priv->sreg_key)))
+               goto nla_put_failure;
+       if (priv->set->flags & NFT_SET_MAP &&
+           nla_put_be32(skb, NFTA_DYNSET_SREG_DATA, htonl(priv->sreg_data)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_DYNSET_OP, htonl(priv->op)))
+               goto nla_put_failure;
+       if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name))
+               goto nla_put_failure;
+       if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT, cpu_to_be64(priv->timeout)))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_dynset_type;
+static const struct nft_expr_ops nft_dynset_ops = {
+       .type           = &nft_dynset_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_dynset)),
+       .eval           = nft_dynset_eval,
+       .init           = nft_dynset_init,
+       .destroy        = nft_dynset_destroy,
+       .dump           = nft_dynset_dump,
+};
+
+static struct nft_expr_type nft_dynset_type __read_mostly = {
+       .name           = "dynset",
+       .ops            = &nft_dynset_ops,
+       .policy         = nft_dynset_policy,
+       .maxattr        = NFTA_DYNSET_MAX,
+       .owner          = THIS_MODULE,
+};
+
+int __init nft_dynset_module_init(void)
+{
+       return nft_register_expr(&nft_dynset_type);
+}
+
+void nft_dynset_module_exit(void)
+{
+       nft_unregister_expr(&nft_dynset_type);
+}
index c7e1a9d7d46f515c9ef80f67d8fffe630ddafb01..bc23806b7fbef29005dbb9d4adc35ae4d76ff16e 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/log2.h>
 #include <linux/jhash.h>
 #include <linux/netlink.h>
+#include <linux/workqueue.h>
 #include <linux/rhashtable.h>
 #include <linux/netfilter.h>
 #include <linux/netfilter/nf_tables.h>
@@ -25,6 +26,7 @@
 
 struct nft_hash {
        struct rhashtable               ht;
+       struct delayed_work             gc_work;
 };
 
 struct nft_hash_elem {
@@ -62,6 +64,8 @@ static inline int nft_hash_cmp(struct rhashtable_compare_arg *arg,
 
        if (nft_data_cmp(nft_set_ext_key(&he->ext), x->key, x->set->klen))
                return 1;
+       if (nft_set_elem_expired(&he->ext))
+               return 1;
        if (!nft_set_elem_active(&he->ext, x->genmask))
                return 1;
        return 0;
@@ -86,6 +90,42 @@ static bool nft_hash_lookup(const struct nft_set *set,
        return !!he;
 }
 
+static bool nft_hash_update(struct nft_set *set, const struct nft_data *key,
+                           void *(*new)(struct nft_set *,
+                                        const struct nft_expr *,
+                                        struct nft_data []),
+                           const struct nft_expr *expr,
+                           struct nft_data data[],
+                           const struct nft_set_ext **ext)
+{
+       struct nft_hash *priv = nft_set_priv(set);
+       struct nft_hash_elem *he;
+       struct nft_hash_cmp_arg arg = {
+               .genmask = NFT_GENMASK_ANY,
+               .set     = set,
+               .key     = key,
+       };
+
+       he = rhashtable_lookup_fast(&priv->ht, &arg, nft_hash_params);
+       if (he != NULL)
+               goto out;
+
+       he = new(set, expr, data);
+       if (he == NULL)
+               goto err1;
+       if (rhashtable_lookup_insert_key(&priv->ht, &arg, &he->node,
+                                        nft_hash_params))
+               goto err2;
+out:
+       *ext = &he->ext;
+       return true;
+
+err2:
+       nft_set_elem_destroy(set, he);
+err1:
+       return false;
+}
+
 static int nft_hash_insert(const struct nft_set *set,
                           const struct nft_set_elem *elem)
 {
@@ -107,6 +147,7 @@ static void nft_hash_activate(const struct nft_set *set,
        struct nft_hash_elem *he = elem->priv;
 
        nft_set_elem_change_active(set, &he->ext);
+       nft_set_elem_clear_busy(&he->ext);
 }
 
 static void *nft_hash_deactivate(const struct nft_set *set,
@@ -120,9 +161,15 @@ static void *nft_hash_deactivate(const struct nft_set *set,
                .key     = &elem->key,
        };
 
+       rcu_read_lock();
        he = rhashtable_lookup_fast(&priv->ht, &arg, nft_hash_params);
-       if (he != NULL)
-               nft_set_elem_change_active(set, &he->ext);
+       if (he != NULL) {
+               if (!nft_set_elem_mark_busy(&he->ext))
+                       nft_set_elem_change_active(set, &he->ext);
+               else
+                       he = NULL;
+       }
+       rcu_read_unlock();
 
        return he;
 }
@@ -170,6 +217,8 @@ static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
 
                if (iter->count < iter->skip)
                        goto cont;
+               if (nft_set_elem_expired(&he->ext))
+                       goto cont;
                if (!nft_set_elem_active(&he->ext, genmask))
                        goto cont;
 
@@ -188,6 +237,55 @@ out:
        rhashtable_walk_exit(&hti);
 }
 
+static void nft_hash_gc(struct work_struct *work)
+{
+       struct nft_set *set;
+       struct nft_hash_elem *he;
+       struct nft_hash *priv;
+       struct nft_set_gc_batch *gcb = NULL;
+       struct rhashtable_iter hti;
+       int err;
+
+       priv = container_of(work, struct nft_hash, gc_work.work);
+       set  = nft_set_container_of(priv);
+
+       err = rhashtable_walk_init(&priv->ht, &hti);
+       if (err)
+               goto schedule;
+
+       err = rhashtable_walk_start(&hti);
+       if (err && err != -EAGAIN)
+               goto out;
+
+       while ((he = rhashtable_walk_next(&hti))) {
+               if (IS_ERR(he)) {
+                       if (PTR_ERR(he) != -EAGAIN)
+                               goto out;
+                       continue;
+               }
+
+               if (!nft_set_elem_expired(&he->ext))
+                       continue;
+               if (nft_set_elem_mark_busy(&he->ext))
+                       continue;
+
+               gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
+               if (gcb == NULL)
+                       goto out;
+               rhashtable_remove_fast(&priv->ht, &he->node, nft_hash_params);
+               atomic_dec(&set->nelems);
+               nft_set_gc_batch_add(gcb, he);
+       }
+out:
+       rhashtable_walk_stop(&hti);
+       rhashtable_walk_exit(&hti);
+
+       nft_set_gc_batch_complete(gcb);
+schedule:
+       queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
+                          nft_set_gc_interval(set));
+}
+
 static unsigned int nft_hash_privsize(const struct nlattr * const nla[])
 {
        return sizeof(struct nft_hash);
@@ -207,11 +305,20 @@ static int nft_hash_init(const struct nft_set *set,
 {
        struct nft_hash *priv = nft_set_priv(set);
        struct rhashtable_params params = nft_hash_params;
+       int err;
 
        params.nelem_hint = desc->size ?: NFT_HASH_ELEMENT_HINT;
        params.key_len    = set->klen;
 
-       return rhashtable_init(&priv->ht, &params);
+       err = rhashtable_init(&priv->ht, &params);
+       if (err < 0)
+               return err;
+
+       INIT_DEFERRABLE_WORK(&priv->gc_work, nft_hash_gc);
+       if (set->flags & NFT_SET_TIMEOUT)
+               queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
+                                  nft_set_gc_interval(set));
+       return 0;
 }
 
 static void nft_hash_elem_destroy(void *ptr, void *arg)
@@ -223,6 +330,7 @@ static void nft_hash_destroy(const struct nft_set *set)
 {
        struct nft_hash *priv = nft_set_priv(set);
 
+       cancel_delayed_work_sync(&priv->gc_work);
        rhashtable_free_and_destroy(&priv->ht, nft_hash_elem_destroy,
                                    (void *)set);
 }
@@ -263,8 +371,9 @@ static struct nft_set_ops nft_hash_ops __read_mostly = {
        .deactivate     = nft_hash_deactivate,
        .remove         = nft_hash_remove,
        .lookup         = nft_hash_lookup,
+       .update         = nft_hash_update,
        .walk           = nft_hash_walk,
-       .features       = NFT_SET_MAP,
+       .features       = NFT_SET_MAP | NFT_SET_TIMEOUT,
        .owner          = THIS_MODULE,
 };
 
index a5f30b8760eab5aa476f0afc13fe0e8686c9ff47..d8cf86fb30fc33fdf657a03da1320f331bc38c9a 100644 (file)
@@ -92,6 +92,8 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
        } else if (set->flags & NFT_SET_MAP)
                return -EINVAL;
 
+       priv->binding.flags = set->flags & NFT_SET_MAP;
+
        err = nf_tables_bind_set(ctx, set, &priv->binding);
        if (err < 0)
                return err;
index 5197874372ec4a2055a3f9251f3a3ec248f53fbb..d79ce88be77f3568aa9e6409f7de20242d9c343f 100644 (file)
@@ -166,9 +166,8 @@ void nft_meta_get_eval(const struct nft_expr *expr,
                dest->data[0] = out->group;
                break;
        case NFT_META_CGROUP:
-               if (skb->sk == NULL)
-                       break;
-
+               if (skb->sk == NULL || !sk_fullsock(skb->sk))
+                       goto err;
                dest->data[0] = skb->sk->sk_classid;
                break;
        default:
index 7198d660b4dea1e9e79c6f9a13f4e6669bca569d..a1d126f2946305a10ccc04ce92e469b1255f60f9 100644 (file)
@@ -39,7 +39,7 @@ cgroup_mt(const struct sk_buff *skb, struct xt_action_param *par)
 {
        const struct xt_cgroup_info *info = par->matchinfo;
 
-       if (skb->sk == NULL)
+       if (skb->sk == NULL || !sk_fullsock(skb->sk))
                return false;
 
        return (info->id == skb->sk->sk_classid) ^ info->invert;
index 50a52043650fd95989eb6618a36cbb8dba0f6b18..1caaccbc306c7751f717aa1e223cf2ef30f00bec 100644 (file)
@@ -25,16 +25,15 @@ MODULE_ALIAS("ip6t_physdev");
 static bool
 physdev_mt(const struct sk_buff *skb, struct xt_action_param *par)
 {
-       static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
        const struct xt_physdev_info *info = par->matchinfo;
+       const struct net_device *physdev;
        unsigned long ret;
        const char *indev, *outdev;
-       const struct nf_bridge_info *nf_bridge;
 
        /* Not a bridged IP packet or no info available yet:
         * LOCAL_OUT/mangle and LOCAL_OUT/nat don't know if
         * the destination device will be a bridge. */
-       if (!(nf_bridge = skb->nf_bridge)) {
+       if (!skb->nf_bridge) {
                /* Return MATCH if the invert flags of the used options are on */
                if ((info->bitmask & XT_PHYSDEV_OP_BRIDGED) &&
                    !(info->invert & XT_PHYSDEV_OP_BRIDGED))
@@ -54,30 +53,41 @@ physdev_mt(const struct sk_buff *skb, struct xt_action_param *par)
                return true;
        }
 
+       physdev = nf_bridge_get_physoutdev(skb);
+       outdev = physdev ? physdev->name : NULL;
+
        /* This only makes sense in the FORWARD and POSTROUTING chains */
        if ((info->bitmask & XT_PHYSDEV_OP_BRIDGED) &&
-           (!!nf_bridge->physoutdev ^ !(info->invert & XT_PHYSDEV_OP_BRIDGED)))
+           (!!outdev ^ !(info->invert & XT_PHYSDEV_OP_BRIDGED)))
                return false;
 
+       physdev = nf_bridge_get_physindev(skb);
+       indev = physdev ? physdev->name : NULL;
+
        if ((info->bitmask & XT_PHYSDEV_OP_ISIN &&
-           (!nf_bridge->physindev ^ !!(info->invert & XT_PHYSDEV_OP_ISIN))) ||
+           (!indev ^ !!(info->invert & XT_PHYSDEV_OP_ISIN))) ||
            (info->bitmask & XT_PHYSDEV_OP_ISOUT &&
-           (!nf_bridge->physoutdev ^ !!(info->invert & XT_PHYSDEV_OP_ISOUT))))
+           (!outdev ^ !!(info->invert & XT_PHYSDEV_OP_ISOUT))))
                return false;
 
        if (!(info->bitmask & XT_PHYSDEV_OP_IN))
                goto match_outdev;
-       indev = nf_bridge->physindev ? nf_bridge->physindev->name : nulldevname;
-       ret = ifname_compare_aligned(indev, info->physindev, info->in_mask);
 
-       if (!ret ^ !(info->invert & XT_PHYSDEV_OP_IN))
-               return false;
+       if (indev) {
+               ret = ifname_compare_aligned(indev, info->physindev,
+                                            info->in_mask);
+
+               if (!ret ^ !(info->invert & XT_PHYSDEV_OP_IN))
+                       return false;
+       }
 
 match_outdev:
        if (!(info->bitmask & XT_PHYSDEV_OP_OUT))
                return true;
-       outdev = nf_bridge->physoutdev ?
-                nf_bridge->physoutdev->name : nulldevname;
+
+       if (!outdev)
+               return false;
+
        ret = ifname_compare_aligned(outdev, info->physoutdev, info->out_mask);
 
        return (!!ret ^ !(info->invert & XT_PHYSDEV_OP_OUT));
index 895534e87a47a5bb3c4452f93a019831c43754ca..e092cb04632607f21f253d84a8692681a1fefe89 100644 (file)
@@ -143,13 +143,10 @@ static bool xt_socket_sk_is_transparent(struct sock *sk)
        }
 }
 
-static bool
-socket_match(const struct sk_buff *skb, struct xt_action_param *par,
-            const struct xt_socket_mtinfo1 *info)
+static struct sock *xt_socket_lookup_slow_v4(const struct sk_buff *skb,
+                                            const struct net_device *indev)
 {
        const struct iphdr *iph = ip_hdr(skb);
-       struct udphdr _hdr, *hp = NULL;
-       struct sock *sk = skb->sk;
        __be32 uninitialized_var(daddr), uninitialized_var(saddr);
        __be16 uninitialized_var(dport), uninitialized_var(sport);
        u8 uninitialized_var(protocol);
@@ -159,10 +156,12 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
 #endif
 
        if (iph->protocol == IPPROTO_UDP || iph->protocol == IPPROTO_TCP) {
+               struct udphdr _hdr, *hp;
+
                hp = skb_header_pointer(skb, ip_hdrlen(skb),
                                        sizeof(_hdr), &_hdr);
                if (hp == NULL)
-                       return false;
+                       return NULL;
 
                protocol = iph->protocol;
                saddr = iph->saddr;
@@ -172,16 +171,17 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
 
        } else if (iph->protocol == IPPROTO_ICMP) {
                if (extract_icmp4_fields(skb, &protocol, &saddr, &daddr,
-                                       &sport, &dport))
-                       return false;
+                                        &sport, &dport))
+                       return NULL;
        } else {
-               return false;
+               return NULL;
        }
 
 #ifdef XT_SOCKET_HAVE_CONNTRACK
-       /* Do the lookup with the original socket address in case this is a
-        * reply packet of an established SNAT-ted connection. */
-
+       /* Do the lookup with the original socket address in
+        * case this is a reply packet of an established
+        * SNAT-ted connection.
+        */
        ct = nf_ct_get(skb, &ctinfo);
        if (ct && !nf_ct_is_untracked(ct) &&
            ((iph->protocol != IPPROTO_ICMP &&
@@ -197,10 +197,18 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
        }
 #endif
 
+       return xt_socket_get_sock_v4(dev_net(skb->dev), protocol, saddr, daddr,
+                                    sport, dport, indev);
+}
+
+static bool
+socket_match(const struct sk_buff *skb, struct xt_action_param *par,
+            const struct xt_socket_mtinfo1 *info)
+{
+       struct sock *sk = skb->sk;
+
        if (!sk)
-               sk = xt_socket_get_sock_v4(dev_net(skb->dev), protocol,
-                                          saddr, daddr, sport, dport,
-                                          par->in);
+               sk = xt_socket_lookup_slow_v4(skb, par->in);
        if (sk) {
                bool wildcard;
                bool transparent = true;
@@ -225,12 +233,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
                        sk = NULL;
        }
 
-       pr_debug("proto %hhu %pI4:%hu -> %pI4:%hu (orig %pI4:%hu) sock %p\n",
-                protocol, &saddr, ntohs(sport),
-                &daddr, ntohs(dport),
-                &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
-
-       return (sk != NULL);
+       return sk != NULL;
 }
 
 static bool
@@ -327,28 +330,26 @@ xt_socket_get_sock_v6(struct net *net, const u8 protocol,
        return NULL;
 }
 
-static bool
-socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
+static struct sock *xt_socket_lookup_slow_v6(const struct sk_buff *skb,
+                                            const struct net_device *indev)
 {
-       struct ipv6hdr ipv6_var, *iph = ipv6_hdr(skb);
-       struct udphdr _hdr, *hp = NULL;
-       struct sock *sk = skb->sk;
-       const struct in6_addr *daddr = NULL, *saddr = NULL;
        __be16 uninitialized_var(dport), uninitialized_var(sport);
-       int thoff = 0, uninitialized_var(tproto);
-       const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
+       const struct in6_addr *daddr = NULL, *saddr = NULL;
+       struct ipv6hdr *iph = ipv6_hdr(skb);
+       int thoff = 0, tproto;
 
        tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL);
        if (tproto < 0) {
                pr_debug("unable to find transport header in IPv6 packet, dropping\n");
-               return NF_DROP;
+               return NULL;
        }
 
        if (tproto == IPPROTO_UDP || tproto == IPPROTO_TCP) {
-               hp = skb_header_pointer(skb, thoff,
-                                       sizeof(_hdr), &_hdr);
+               struct udphdr _hdr, *hp;
+
+               hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr);
                if (hp == NULL)
-                       return false;
+                       return NULL;
 
                saddr = &iph->saddr;
                sport = hp->source;
@@ -356,17 +357,27 @@ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
                dport = hp->dest;
 
        } else if (tproto == IPPROTO_ICMPV6) {
+               struct ipv6hdr ipv6_var;
+
                if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr,
                                         &sport, &dport, &ipv6_var))
-                       return false;
+                       return NULL;
        } else {
-               return false;
+               return NULL;
        }
 
+       return xt_socket_get_sock_v6(dev_net(skb->dev), tproto, saddr, daddr,
+                                    sport, dport, indev);
+}
+
+static bool
+socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
+{
+       const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
+       struct sock *sk = skb->sk;
+
        if (!sk)
-               sk = xt_socket_get_sock_v6(dev_net(skb->dev), tproto,
-                                          saddr, daddr, sport, dport,
-                                          par->in);
+               sk = xt_socket_lookup_slow_v6(skb, par->in);
        if (sk) {
                bool wildcard;
                bool transparent = true;
@@ -391,13 +402,7 @@ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
                        sk = NULL;
        }
 
-       pr_debug("proto %hhd %pI6:%hu -> %pI6:%hu "
-                "(orig %pI6:%hu) sock %p\n",
-                tproto, saddr, ntohs(sport),
-                daddr, ntohs(dport),
-                &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
-
-       return (sk != NULL);
+       return sk != NULL;
 }
 #endif
 
index 9575a1892607c5dbf18f5bc04f531ed826dfe812..49ff321060809a320966d5efc24ea6aaaab1add8 100644 (file)
@@ -907,6 +907,16 @@ static int nci_se_io(struct nfc_dev *nfc_dev, u32 se_idx,
        return 0;
 }
 
+static int nci_fw_download(struct nfc_dev *nfc_dev, const char *firmware_name)
+{
+       struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+
+       if (!ndev->ops->fw_download)
+               return -ENOTSUPP;
+
+       return ndev->ops->fw_download(ndev, firmware_name);
+}
+
 static struct nfc_ops nci_nfc_ops = {
        .dev_up = nci_dev_up,
        .dev_down = nci_dev_down,
@@ -922,6 +932,7 @@ static struct nfc_ops nci_nfc_ops = {
        .disable_se = nci_disable_se,
        .discover_se = nci_discover_se,
        .se_io = nci_se_io,
+       .fw_download = nci_fw_download,
 };
 
 /* ---- Interface to NCI drivers ---- */
index 3277a7520e31320479e0d40bbd66b6dd6f89de59..6d39766e7828c4351a004fa8ce7d839348c3681d 100644 (file)
@@ -222,7 +222,8 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
 {
        struct net *net = ovs_dp_get_net(vport->dp);
        struct vxlan_port *vxlan_port = vxlan_vport(vport);
-       __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;
+       struct sock *sk = vxlan_port->vs->sock->sk;
+       __be16 dst_port = inet_sk(sk)->inet_sport;
        const struct ovs_key_ipv4_tunnel *tun_key;
        struct vxlan_metadata md = {0};
        struct rtable *rt;
@@ -255,7 +256,7 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
        vxflags = vxlan_port->exts |
                      (tun_key->tun_flags & TUNNEL_CSUM ? VXLAN_F_UDP_CSUM : 0);
 
-       err = vxlan_xmit_skb(rt, skb, fl.saddr, tun_key->ipv4_dst,
+       err = vxlan_xmit_skb(rt, sk, skb, fl.saddr, tun_key->ipv4_dst,
                             tun_key->ipv4_tos, tun_key->ipv4_ttl, df,
                             src_port, dst_port,
                             &md, false, vxflags);
index ec2954ffc690c612eb1b04b018134ba0f52ba5c8..067a3fff1d2cb0c629c1dc2d75d0353b9269ba71 100644 (file)
@@ -274,10 +274,8 @@ void ovs_vport_del(struct vport *vport)
        ASSERT_OVSL();
 
        hlist_del_rcu(&vport->hash_node);
-
-       vport->ops->destroy(vport);
-
        module_put(vport->ops->owner);
+       vport->ops->destroy(vport);
 }
 
 /**
index dfcea20e31711288aea660add30248b442769979..f377702d4b9185762293a7251f2574a9e72515eb 100644 (file)
@@ -8,7 +8,7 @@
  *     as published by the Free Software Foundation; either version
  *     2 of the License, or (at your option) any later version.
  *
- *  Meant to be mostly used for localy generated traffic :
+ *  Meant to be mostly used for locally generated traffic :
  *  Fast classification depends on skb->sk being set before reaching us.
  *  If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
  *  All packets belonging to a socket are considered as a 'flow'.
@@ -63,7 +63,7 @@ struct fq_flow {
                struct sk_buff *tail;   /* last skb in the list */
                unsigned long  age;     /* jiffies when flow was emptied, for gc */
        };
-       struct rb_node  fq_node;        /* anchor in fq_root[] trees */
+       struct rb_node  fq_node;        /* anchor in fq_root[] trees */
        struct sock     *sk;
        int             qlen;           /* number of packets in flow queue */
        int             credit;
index 612aa73bbc60c990a320054e4bd7d2846575428e..e6ce1517367f884608640b2532080ab6566b9379 100644 (file)
@@ -303,9 +303,7 @@ static int rpc_client_register(struct rpc_clnt *clnt,
        struct super_block *pipefs_sb;
        int err;
 
-       err = rpc_clnt_debugfs_register(clnt);
-       if (err)
-               return err;
+       rpc_clnt_debugfs_register(clnt);
 
        pipefs_sb = rpc_get_sb_net(net);
        if (pipefs_sb) {
index e811f390f9f67ceb2e897ee8da79189417eacc75..82962f7e6e888f619ad79754f038732d5d5b6333 100644 (file)
@@ -129,48 +129,52 @@ static const struct file_operations tasks_fops = {
        .release        = tasks_release,
 };
 
-int
+void
 rpc_clnt_debugfs_register(struct rpc_clnt *clnt)
 {
-       int len, err;
+       int len;
        char name[24]; /* enough for "../../rpc_xprt/ + 8 hex digits + NULL */
+       struct rpc_xprt *xprt;
 
        /* Already registered? */
-       if (clnt->cl_debugfs)
-               return 0;
+       if (clnt->cl_debugfs || !rpc_clnt_dir)
+               return;
 
        len = snprintf(name, sizeof(name), "%x", clnt->cl_clid);
        if (len >= sizeof(name))
-               return -EINVAL;
+               return;
 
        /* make the per-client dir */
        clnt->cl_debugfs = debugfs_create_dir(name, rpc_clnt_dir);
        if (!clnt->cl_debugfs)
-               return -ENOMEM;
+               return;
 
        /* make tasks file */
-       err = -ENOMEM;
        if (!debugfs_create_file("tasks", S_IFREG | S_IRUSR, clnt->cl_debugfs,
                                 clnt, &tasks_fops))
                goto out_err;
 
-       err = -EINVAL;
        rcu_read_lock();
+       xprt = rcu_dereference(clnt->cl_xprt);
+       /* no "debugfs" dentry? Don't bother with the symlink. */
+       if (!xprt->debugfs) {
+               rcu_read_unlock();
+               return;
+       }
        len = snprintf(name, sizeof(name), "../../rpc_xprt/%s",
-                       rcu_dereference(clnt->cl_xprt)->debugfs->d_name.name);
+                       xprt->debugfs->d_name.name);
        rcu_read_unlock();
+
        if (len >= sizeof(name))
                goto out_err;
 
-       err = -ENOMEM;
        if (!debugfs_create_symlink("xprt", clnt->cl_debugfs, name))
                goto out_err;
 
-       return 0;
+       return;
 out_err:
        debugfs_remove_recursive(clnt->cl_debugfs);
        clnt->cl_debugfs = NULL;
-       return err;
 }
 
 void
@@ -226,33 +230,33 @@ static const struct file_operations xprt_info_fops = {
        .release        = xprt_info_release,
 };
 
-int
+void
 rpc_xprt_debugfs_register(struct rpc_xprt *xprt)
 {
        int len, id;
        static atomic_t cur_id;
        char            name[9]; /* 8 hex digits + NULL term */
 
+       if (!rpc_xprt_dir)
+               return;
+
        id = (unsigned int)atomic_inc_return(&cur_id);
 
        len = snprintf(name, sizeof(name), "%x", id);
        if (len >= sizeof(name))
-               return -EINVAL;
+               return;
 
        /* make the per-client dir */
        xprt->debugfs = debugfs_create_dir(name, rpc_xprt_dir);
        if (!xprt->debugfs)
-               return -ENOMEM;
+               return;
 
        /* make tasks file */
        if (!debugfs_create_file("info", S_IFREG | S_IRUSR, xprt->debugfs,
                                 xprt, &xprt_info_fops)) {
                debugfs_remove_recursive(xprt->debugfs);
                xprt->debugfs = NULL;
-               return -ENOMEM;
        }
-
-       return 0;
 }
 
 void
@@ -266,14 +270,17 @@ void __exit
 sunrpc_debugfs_exit(void)
 {
        debugfs_remove_recursive(topdir);
+       topdir = NULL;
+       rpc_clnt_dir = NULL;
+       rpc_xprt_dir = NULL;
 }
 
-int __init
+void __init
 sunrpc_debugfs_init(void)
 {
        topdir = debugfs_create_dir("sunrpc", NULL);
        if (!topdir)
-               goto out;
+               return;
 
        rpc_clnt_dir = debugfs_create_dir("rpc_clnt", topdir);
        if (!rpc_clnt_dir)
@@ -283,10 +290,9 @@ sunrpc_debugfs_init(void)
        if (!rpc_xprt_dir)
                goto out_remove;
 
-       return 0;
+       return;
 out_remove:
        debugfs_remove_recursive(topdir);
        topdir = NULL;
-out:
-       return -ENOMEM;
+       rpc_clnt_dir = NULL;
 }
index e37fbed879568da535aa540656e7b7ace508e2cb..ee5d3d253102bf5d81a39f953248a6a6ca7a38d6 100644 (file)
@@ -98,10 +98,7 @@ init_sunrpc(void)
        if (err)
                goto out4;
 
-       err = sunrpc_debugfs_init();
-       if (err)
-               goto out5;
-
+       sunrpc_debugfs_init();
 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
        rpc_register_sysctl();
 #endif
@@ -109,8 +106,6 @@ init_sunrpc(void)
        init_socket_xprt();     /* clnt sock transport */
        return 0;
 
-out5:
-       unregister_rpc_pipefs();
 out4:
        unregister_pernet_subsys(&sunrpc_net_ops);
 out3:
index e3015aede0d9443d99eba6b820aed104ab7515a6..9949722d99cebf6afa15953d8a9ac6a5c0bc2824 100644 (file)
@@ -1331,7 +1331,6 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net)
  */
 struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
 {
-       int err;
        struct rpc_xprt *xprt;
        struct xprt_class *t;
 
@@ -1372,11 +1371,7 @@ found:
                return ERR_PTR(-ENOMEM);
        }
 
-       err = rpc_xprt_debugfs_register(xprt);
-       if (err) {
-               xprt_destroy(xprt);
-               return ERR_PTR(err);
-       }
+       rpc_xprt_debugfs_register(xprt);
 
        dprintk("RPC:       created transport %p with %u slots\n", xprt,
                        xprt->max_reqs);
index ae558dd7f8eec3f24eca4c301c36ad8da07389df..c5cbdcb1f0b561a22d2f5537f5cca0a80ee36486 100644 (file)
@@ -413,7 +413,7 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
         */
        if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
                tipc_link_proto_xmit(node->active_links[node->addr & 1],
-                                    STATE_MSG, 0, 0, 0, 0, 0);
+                                    STATE_MSG, 0, 0, 0, 0);
                tn->bcl->stats.sent_acks++;
        }
 }
@@ -899,7 +899,7 @@ int tipc_bclink_init(struct net *net)
        skb_queue_head_init(&bclink->inputq);
        bcl->owner = &bclink->node;
        bcl->owner->net = net;
-       bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
+       bcl->mtu = MAX_PKT_DEFAULT_MCAST;
        tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
        bcl->bearer_id = MAX_BEARERS;
        rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
index 935205e6bcfe6da614fcc6c67e77754dc3a484fa..be1c9fa60b09dc713155c94e7bf6bcc6366fc7aa 100644 (file)
@@ -152,11 +152,11 @@ out_netlink:
 static void __exit tipc_exit(void)
 {
        tipc_bearer_cleanup();
+       unregister_pernet_subsys(&tipc_net_ops);
        tipc_netlink_stop();
        tipc_netlink_compat_stop();
        tipc_socket_stop();
        tipc_unregister_sysctl();
-       unregister_pernet_subsys(&tipc_net_ops);
 
        pr_info("Deactivated\n");
 }
index 514466efc25cd64961c64d39bba2fce6801ff757..a6b30df6ec02ec22f1b4b44930bd1ceb168258f3 100644 (file)
@@ -89,24 +89,14 @@ static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
 #define  TIMEOUT_EVT     560817u       /* link timer expired */
 
 /*
- * The following two 'message types' is really just implementation
- * data conveniently stored in the message header.
- * They must not be considered part of the protocol
+ * State value stored in 'failover_pkts'
  */
-#define OPEN_MSG   0
-#define CLOSED_MSG 1
-
-/*
- * State value stored in 'exp_msg_count'
- */
-#define START_CHANGEOVER 100000u
+#define FIRST_FAILOVER 0xffffu
 
 static void link_handle_out_of_seq_msg(struct tipc_link *link,
                                       struct sk_buff *skb);
 static void tipc_link_proto_rcv(struct tipc_link *link,
                                struct sk_buff *skb);
-static int  tipc_link_tunnel_rcv(struct tipc_node *node,
-                                struct sk_buff **skb);
 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol);
 static void link_state_event(struct tipc_link *l_ptr, u32 event);
 static void link_reset_statistics(struct tipc_link *l_ptr);
@@ -115,7 +105,7 @@ static void tipc_link_sync_xmit(struct tipc_link *l);
 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
 static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
-
+static bool tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb);
 /*
  *  Simple link routines
  */
@@ -146,34 +136,6 @@ static struct tipc_link *tipc_parallel_link(struct tipc_link *l)
        return l->owner->active_links[1];
 }
 
-static void link_init_max_pkt(struct tipc_link *l_ptr)
-{
-       struct tipc_node *node = l_ptr->owner;
-       struct tipc_net *tn = net_generic(node->net, tipc_net_id);
-       struct tipc_bearer *b_ptr;
-       u32 max_pkt;
-
-       rcu_read_lock();
-       b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
-       if (!b_ptr) {
-               rcu_read_unlock();
-               return;
-       }
-       max_pkt = (b_ptr->mtu & ~3);
-       rcu_read_unlock();
-
-       if (max_pkt > MAX_MSG_SIZE)
-               max_pkt = MAX_MSG_SIZE;
-
-       l_ptr->max_pkt_target = max_pkt;
-       if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
-               l_ptr->max_pkt = l_ptr->max_pkt_target;
-       else
-               l_ptr->max_pkt = MAX_PKT_DEFAULT;
-
-       l_ptr->max_pkt_probes = 0;
-}
-
 /*
  *  Simple non-static link routines (i.e. referenced outside this file)
  */
@@ -314,7 +276,8 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
        msg_set_bearer_id(msg, b_ptr->identity);
        strcpy((char *)msg_data(msg), if_name);
        l_ptr->net_plane = b_ptr->net_plane;
-       link_init_max_pkt(l_ptr);
+       l_ptr->advertised_mtu = b_ptr->mtu;
+       l_ptr->mtu = l_ptr->advertised_mtu;
        l_ptr->priority = b_ptr->priority;
        tipc_link_set_queue_limits(l_ptr, b_ptr->window);
        l_ptr->next_out_no = 1;
@@ -333,15 +296,19 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
 }
 
 /**
- * link_delete - Conditional deletion of link.
- *               If timer still running, real delete is done when it expires
- * @link: link to be deleted
+ * tipc_link_delete - Delete a link
+ * @l: link to be deleted
  */
-void tipc_link_delete(struct tipc_link *link)
+void tipc_link_delete(struct tipc_link *l)
 {
-       tipc_link_reset_fragments(link);
-       tipc_node_detach_link(link->owner, link);
-       tipc_link_put(link);
+       tipc_link_reset(l);
+       if (del_timer(&l->timer))
+               tipc_link_put(l);
+       l->flags |= LINK_STOPPED;
+       /* Delete link now, or when timer is finished: */
+       tipc_link_reset_fragments(l);
+       tipc_node_detach_link(l->owner, l);
+       tipc_link_put(l);
 }
 
 void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
@@ -350,23 +317,12 @@ void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
        struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_link *link;
        struct tipc_node *node;
-       bool del_link;
 
        rcu_read_lock();
        list_for_each_entry_rcu(node, &tn->node_list, list) {
                tipc_node_lock(node);
                link = node->links[bearer_id];
-               if (!link) {
-                       tipc_node_unlock(node);
-                       continue;
-               }
-               del_link = !tipc_link_is_up(link) && !link->exp_msg_count;
-               tipc_link_reset(link);
-               if (del_timer(&link->timer))
-                       tipc_link_put(link);
-               link->flags |= LINK_STOPPED;
-               /* Delete link now, or when failover is finished: */
-               if (shutting_down || !tipc_node_is_up(node) || del_link)
+               if (link)
                        tipc_link_delete(link);
                tipc_node_unlock(node);
        }
@@ -473,17 +429,17 @@ void tipc_link_purge_queues(struct tipc_link *l_ptr)
 void tipc_link_reset(struct tipc_link *l_ptr)
 {
        u32 prev_state = l_ptr->state;
-       u32 checkpoint = l_ptr->next_in_no;
        int was_active_link = tipc_link_is_active(l_ptr);
        struct tipc_node *owner = l_ptr->owner;
+       struct tipc_link *pl = tipc_parallel_link(l_ptr);
 
        msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
 
        /* Link is down, accept any session */
        l_ptr->peer_session = INVALID_SESSION;
 
-       /* Prepare for max packet size negotiation */
-       link_init_max_pkt(l_ptr);
+       /* Prepare for renewed mtu size negotiation */
+       l_ptr->mtu = l_ptr->advertised_mtu;
 
        l_ptr->state = RESET_UNKNOWN;
 
@@ -493,11 +449,15 @@ void tipc_link_reset(struct tipc_link *l_ptr)
        tipc_node_link_down(l_ptr->owner, l_ptr);
        tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr);
 
-       if (was_active_link && tipc_node_active_links(l_ptr->owner)) {
-               l_ptr->reset_checkpoint = checkpoint;
-               l_ptr->exp_msg_count = START_CHANGEOVER;
+       if (was_active_link && tipc_node_is_up(l_ptr->owner) && (pl != l_ptr)) {
+               l_ptr->flags |= LINK_FAILINGOVER;
+               l_ptr->failover_checkpt = l_ptr->next_in_no;
+               pl->failover_pkts = FIRST_FAILOVER;
+               pl->failover_checkpt = l_ptr->next_in_no;
+               pl->failover_skb = l_ptr->reasm_buf;
+       } else {
+               kfree_skb(l_ptr->reasm_buf);
        }
-
        /* Clean up all queues, except inputq: */
        __skb_queue_purge(&l_ptr->transmq);
        __skb_queue_purge(&l_ptr->deferdq);
@@ -507,6 +467,7 @@ void tipc_link_reset(struct tipc_link *l_ptr)
        if (!skb_queue_empty(owner->inputq))
                owner->action_flags |= TIPC_MSG_EVT;
        tipc_link_purge_backlog(l_ptr);
+       l_ptr->reasm_buf = NULL;
        l_ptr->rcv_unacked = 0;
        l_ptr->checkpoint = 1;
        l_ptr->next_out_no = 1;
@@ -558,8 +519,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
        if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
                return;         /* Not yet. */
 
-       /* Check whether changeover is going on */
-       if (l_ptr->exp_msg_count) {
+       if (l_ptr->flags & LINK_FAILINGOVER) {
                if (event == TIMEOUT_EVT)
                        link_set_timer(l_ptr, cont_intv);
                return;
@@ -576,11 +536,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                                l_ptr->checkpoint = l_ptr->next_in_no;
                                if (tipc_bclink_acks_missing(l_ptr->owner)) {
                                        tipc_link_proto_xmit(l_ptr, STATE_MSG,
-                                                            0, 0, 0, 0, 0);
-                                       l_ptr->fsm_msg_cnt++;
-                               } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
-                                       tipc_link_proto_xmit(l_ptr, STATE_MSG,
-                                                            1, 0, 0, 0, 0);
+                                                            0, 0, 0, 0);
                                        l_ptr->fsm_msg_cnt++;
                                }
                                link_set_timer(l_ptr, cont_intv);
@@ -588,7 +544,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                        }
                        l_ptr->state = WORKING_UNKNOWN;
                        l_ptr->fsm_msg_cnt = 0;
-                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        link_set_timer(l_ptr, cont_intv / 4);
                        break;
@@ -599,7 +555,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                        l_ptr->state = RESET_RESET;
                        l_ptr->fsm_msg_cnt = 0;
                        tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
-                                            0, 0, 0, 0, 0);
+                                            0, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        link_set_timer(l_ptr, cont_intv);
                        break;
@@ -622,7 +578,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                        l_ptr->state = RESET_RESET;
                        l_ptr->fsm_msg_cnt = 0;
                        tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
-                                            0, 0, 0, 0, 0);
+                                            0, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        link_set_timer(l_ptr, cont_intv);
                        break;
@@ -633,13 +589,13 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                                l_ptr->checkpoint = l_ptr->next_in_no;
                                if (tipc_bclink_acks_missing(l_ptr->owner)) {
                                        tipc_link_proto_xmit(l_ptr, STATE_MSG,
-                                                            0, 0, 0, 0, 0);
+                                                            0, 0, 0, 0);
                                        l_ptr->fsm_msg_cnt++;
                                }
                                link_set_timer(l_ptr, cont_intv);
                        } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
                                tipc_link_proto_xmit(l_ptr, STATE_MSG,
-                                                    1, 0, 0, 0, 0);
+                                                    1, 0, 0, 0);
                                l_ptr->fsm_msg_cnt++;
                                link_set_timer(l_ptr, cont_intv / 4);
                        } else {        /* Link has failed */
@@ -649,7 +605,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                                l_ptr->state = RESET_UNKNOWN;
                                l_ptr->fsm_msg_cnt = 0;
                                tipc_link_proto_xmit(l_ptr, RESET_MSG,
-                                                    0, 0, 0, 0, 0);
+                                                    0, 0, 0, 0);
                                l_ptr->fsm_msg_cnt++;
                                link_set_timer(l_ptr, cont_intv);
                        }
@@ -669,7 +625,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                        l_ptr->state = WORKING_WORKING;
                        l_ptr->fsm_msg_cnt = 0;
                        link_activate(l_ptr);
-                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        if (l_ptr->owner->working_links == 1)
                                tipc_link_sync_xmit(l_ptr);
@@ -679,7 +635,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                        l_ptr->state = RESET_RESET;
                        l_ptr->fsm_msg_cnt = 0;
                        tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
-                                            1, 0, 0, 0, 0);
+                                            1, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        link_set_timer(l_ptr, cont_intv);
                        break;
@@ -689,7 +645,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                        link_set_timer(l_ptr, cont_intv);
                        break;
                case TIMEOUT_EVT:
-                       tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
+                       tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        link_set_timer(l_ptr, cont_intv);
                        break;
@@ -707,7 +663,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                        l_ptr->state = WORKING_WORKING;
                        l_ptr->fsm_msg_cnt = 0;
                        link_activate(l_ptr);
-                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        if (l_ptr->owner->working_links == 1)
                                tipc_link_sync_xmit(l_ptr);
@@ -717,7 +673,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                        break;
                case TIMEOUT_EVT:
                        tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
-                                            0, 0, 0, 0, 0);
+                                            0, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        link_set_timer(l_ptr, cont_intv);
                        break;
@@ -746,7 +702,7 @@ int __tipc_link_xmit(struct net *net, struct tipc_link *link,
        struct tipc_msg *msg = buf_msg(skb_peek(list));
        unsigned int maxwin = link->window;
        unsigned int imp = msg_importance(msg);
-       uint mtu = link->max_pkt;
+       uint mtu = link->mtu;
        uint ack = mod(link->next_in_no - 1);
        uint seqno = link->next_out_no;
        uint bc_last_in = link->owner->bclink.last_in;
@@ -1200,7 +1156,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
                        link_retrieve_defq(l_ptr, &head);
                if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
                        l_ptr->stats.sent_acks++;
-                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
+                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0);
                }
                tipc_link_input(l_ptr, skb);
                skb = NULL;
@@ -1243,7 +1199,7 @@ static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
                        node->action_flags |= TIPC_NAMED_MSG_EVT;
                return true;
        case MSG_BUNDLER:
-       case CHANGEOVER_PROTOCOL:
+       case TUNNEL_PROTOCOL:
        case MSG_FRAGMENTER:
        case BCAST_PROTOCOL:
                return false;
@@ -1270,12 +1226,14 @@ static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
                return;
 
        switch (msg_user(msg)) {
-       case CHANGEOVER_PROTOCOL:
+       case TUNNEL_PROTOCOL:
                if (msg_dup(msg)) {
                        link->flags |= LINK_SYNCHING;
                        link->synch_point = msg_seqno(msg_get_wrapped(msg));
+                       kfree_skb(skb);
+                       break;
                }
-               if (!tipc_link_tunnel_rcv(node, &skb))
+               if (!tipc_link_failover_rcv(link, &skb))
                        break;
                if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
                        tipc_data_input(link, skb);
@@ -1373,7 +1331,7 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
        if (tipc_link_defer_pkt(&l_ptr->deferdq, buf)) {
                l_ptr->stats.deferred_recv++;
                if ((skb_queue_len(&l_ptr->deferdq) % TIPC_MIN_LINK_WIN) == 1)
-                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
+                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0);
        } else {
                l_ptr->stats.duplicates++;
        }
@@ -1383,15 +1341,15 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
  * Send protocol message to the other endpoint.
  */
 void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
-                         u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
+                         u32 gap, u32 tolerance, u32 priority)
 {
        struct sk_buff *buf = NULL;
        struct tipc_msg *msg = l_ptr->pmsg;
        u32 msg_size = sizeof(l_ptr->proto_msg);
        int r_flag;
 
-       /* Don't send protocol message during link changeover */
-       if (l_ptr->exp_msg_count)
+       /* Don't send protocol message during link failover */
+       if (l_ptr->flags & LINK_FAILINGOVER)
                return;
 
        /* Abort non-RESET send if communication with node is prohibited */
@@ -1421,35 +1379,20 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
                        l_ptr->stats.sent_nacks++;
                msg_set_link_tolerance(msg, tolerance);
                msg_set_linkprio(msg, priority);
-               msg_set_max_pkt(msg, ack_mtu);
+               msg_set_max_pkt(msg, l_ptr->mtu);
                msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
                msg_set_probe(msg, probe_msg != 0);
-               if (probe_msg) {
-                       u32 mtu = l_ptr->max_pkt;
-
-                       if ((mtu < l_ptr->max_pkt_target) &&
-                           link_working_working(l_ptr) &&
-                           l_ptr->fsm_msg_cnt) {
-                               msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
-                               if (l_ptr->max_pkt_probes == 10) {
-                                       l_ptr->max_pkt_target = (msg_size - 4);
-                                       l_ptr->max_pkt_probes = 0;
-                                       msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
-                               }
-                               l_ptr->max_pkt_probes++;
-                       }
-
+               if (probe_msg)
                        l_ptr->stats.sent_probes++;
-               }
                l_ptr->stats.sent_states++;
        } else {                /* RESET_MSG or ACTIVATE_MSG */
-               msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
+               msg_set_ack(msg, mod(l_ptr->failover_checkpt - 1));
                msg_set_seq_gap(msg, 0);
                msg_set_next_sent(msg, 1);
                msg_set_probe(msg, 0);
                msg_set_link_tolerance(msg, l_ptr->tolerance);
                msg_set_linkprio(msg, l_ptr->priority);
-               msg_set_max_pkt(msg, l_ptr->max_pkt_target);
+               msg_set_max_pkt(msg, l_ptr->advertised_mtu);
        }
 
        r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
@@ -1480,13 +1423,10 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
                                struct sk_buff *buf)
 {
        u32 rec_gap = 0;
-       u32 max_pkt_info;
-       u32 max_pkt_ack;
        u32 msg_tol;
        struct tipc_msg *msg = buf_msg(buf);
 
-       /* Discard protocol message during link changeover */
-       if (l_ptr->exp_msg_count)
+       if (l_ptr->flags & LINK_FAILINGOVER)
                goto exit;
 
        if (l_ptr->net_plane != msg_net_plane(msg))
@@ -1525,15 +1465,8 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
                if (msg_linkprio(msg) > l_ptr->priority)
                        l_ptr->priority = msg_linkprio(msg);
 
-               max_pkt_info = msg_max_pkt(msg);
-               if (max_pkt_info) {
-                       if (max_pkt_info < l_ptr->max_pkt_target)
-                               l_ptr->max_pkt_target = max_pkt_info;
-                       if (l_ptr->max_pkt > l_ptr->max_pkt_target)
-                               l_ptr->max_pkt = l_ptr->max_pkt_target;
-               } else {
-                       l_ptr->max_pkt = l_ptr->max_pkt_target;
-               }
+               if (l_ptr->mtu > msg_max_pkt(msg))
+                       l_ptr->mtu = msg_max_pkt(msg);
 
                /* Synchronize broadcast link info, if not done previously */
                if (!tipc_node_is_up(l_ptr->owner)) {
@@ -1578,18 +1511,8 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
                                      mod(l_ptr->next_in_no));
                }
 
-               max_pkt_ack = msg_max_pkt(msg);
-               if (max_pkt_ack > l_ptr->max_pkt) {
-                       l_ptr->max_pkt = max_pkt_ack;
-                       l_ptr->max_pkt_probes = 0;
-               }
-
-               max_pkt_ack = 0;
-               if (msg_probe(msg)) {
+               if (msg_probe(msg))
                        l_ptr->stats.recv_probes++;
-                       if (msg_size(msg) > sizeof(l_ptr->proto_msg))
-                               max_pkt_ack = msg_size(msg);
-               }
 
                /* Protocol message before retransmits, reduce loss risk */
                if (l_ptr->owner->bclink.recv_permitted)
@@ -1597,8 +1520,8 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
                                                      msg_last_bcast(msg));
 
                if (rec_gap || (msg_probe(msg))) {
-                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, rec_gap, 0,
-                                            0, max_pkt_ack);
+                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 0,
+                                            rec_gap, 0, 0);
                }
                if (msg_seq_gap(msg)) {
                        l_ptr->stats.recv_nacks++;
@@ -1658,8 +1581,8 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
        if (!tunnel)
                return;
 
-       tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL,
-                     ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
+       tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL,
+                     FAILOVER_MSG, INT_H_SIZE, l_ptr->addr);
        skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
        tipc_link_purge_backlog(l_ptr);
        msgcount = skb_queue_len(&l_ptr->transmq);
@@ -1721,8 +1644,8 @@ void tipc_link_dup_queue_xmit(struct tipc_link *link,
        struct sk_buff_head *queue = &link->transmq;
        int mcnt;
 
-       tipc_msg_init(link_own_addr(link), &tnl_hdr, CHANGEOVER_PROTOCOL,
-                     DUPLICATE_MSG, INT_H_SIZE, link->addr);
+       tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL,
+                     SYNCH_MSG, INT_H_SIZE, link->addr);
        mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq);
        msg_set_msgcnt(&tnl_hdr, mcnt);
        msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id);
@@ -1755,101 +1678,63 @@ tunnel_queue:
        goto tunnel_queue;
 }
 
-/* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet.
- * Owner node is locked.
- */
-static void tipc_link_dup_rcv(struct tipc_link *link,
-                             struct sk_buff *skb)
-{
-       struct sk_buff *iskb;
-       int pos = 0;
-
-       if (!tipc_link_is_up(link))
-               return;
-
-       if (!tipc_msg_extract(skb, &iskb, &pos)) {
-               pr_warn("%sfailed to extract inner dup pkt\n", link_co_err);
-               return;
-       }
-       /* Append buffer to deferred queue, if applicable: */
-       link_handle_out_of_seq_msg(link, iskb);
-}
-
-/*  tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet
+/*  tipc_link_failover_rcv(): Receive a tunnelled FAILOVER_MSG packet
  *  Owner node is locked.
  */
-static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr,
-                                             struct sk_buff *t_buf)
+static bool tipc_link_failover_rcv(struct tipc_link *link,
+                                  struct sk_buff **skb)
 {
-       struct tipc_msg *t_msg = buf_msg(t_buf);
-       struct sk_buff *buf = NULL;
-       struct tipc_msg *msg;
+       struct tipc_msg *msg = buf_msg(*skb);
+       struct sk_buff *iskb = NULL;
+       struct tipc_link *pl = NULL;
+       int bearer_id = msg_bearer_id(msg);
        int pos = 0;
 
-       if (tipc_link_is_up(l_ptr))
-               tipc_link_reset(l_ptr);
-
-       /* First failover packet? */
-       if (l_ptr->exp_msg_count == START_CHANGEOVER)
-               l_ptr->exp_msg_count = msg_msgcnt(t_msg);
-
-       /* Should there be an inner packet? */
-       if (l_ptr->exp_msg_count) {
-               l_ptr->exp_msg_count--;
-               if (!tipc_msg_extract(t_buf, &buf, &pos)) {
-                       pr_warn("%sno inner failover pkt\n", link_co_err);
-                       goto exit;
-               }
-               msg = buf_msg(buf);
-
-               if (less(msg_seqno(msg), l_ptr->reset_checkpoint)) {
-                       kfree_skb(buf);
-                       buf = NULL;
-                       goto exit;
-               }
-               if (msg_user(msg) == MSG_FRAGMENTER) {
-                       l_ptr->stats.recv_fragments++;
-                       tipc_buf_append(&l_ptr->reasm_buf, &buf);
-               }
+       if (msg_type(msg) != FAILOVER_MSG) {
+               pr_warn("%sunknown tunnel pkt received\n", link_co_err);
+               goto exit;
        }
-exit:
-       if ((!l_ptr->exp_msg_count) && (l_ptr->flags & LINK_STOPPED))
-               tipc_link_delete(l_ptr);
-       return buf;
-}
+       if (bearer_id >= MAX_BEARERS)
+               goto exit;
 
-/*  tipc_link_tunnel_rcv(): Receive a tunnelled packet, sent
- *  via other link as result of a failover (ORIGINAL_MSG) or
- *  a new active link (DUPLICATE_MSG). Failover packets are
- *  returned to the active link for delivery upwards.
- *  Owner node is locked.
- */
-static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
-                               struct sk_buff **buf)
-{
-       struct sk_buff *t_buf = *buf;
-       struct tipc_link *l_ptr;
-       struct tipc_msg *t_msg = buf_msg(t_buf);
-       u32 bearer_id = msg_bearer_id(t_msg);
+       if (bearer_id == link->bearer_id)
+               goto exit;
 
-       *buf = NULL;
+       pl = link->owner->links[bearer_id];
+       if (pl && tipc_link_is_up(pl))
+               tipc_link_reset(pl);
 
-       if (bearer_id >= MAX_BEARERS)
+       if (link->failover_pkts == FIRST_FAILOVER)
+               link->failover_pkts = msg_msgcnt(msg);
+
+       /* Should we expect an inner packet? */
+       if (!link->failover_pkts)
                goto exit;
 
-       l_ptr = n_ptr->links[bearer_id];
-       if (!l_ptr)
+       if (!tipc_msg_extract(*skb, &iskb, &pos)) {
+               pr_warn("%sno inner failover pkt\n", link_co_err);
+               *skb = NULL;
                goto exit;
+       }
+       link->failover_pkts--;
+       *skb = NULL;
 
-       if (msg_type(t_msg) == DUPLICATE_MSG)
-               tipc_link_dup_rcv(l_ptr, t_buf);
-       else if (msg_type(t_msg) == ORIGINAL_MSG)
-               *buf = tipc_link_failover_rcv(l_ptr, t_buf);
-       else
-               pr_warn("%sunknown tunnel pkt received\n", link_co_err);
+       /* Was this packet already delivered? */
+       if (less(buf_seqno(iskb), link->failover_checkpt)) {
+               kfree_skb(iskb);
+               iskb = NULL;
+               goto exit;
+       }
+       if (msg_user(buf_msg(iskb)) == MSG_FRAGMENTER) {
+               link->stats.recv_fragments++;
+               tipc_buf_append(&link->failover_skb, &iskb);
+       }
 exit:
-       kfree_skb(t_buf);
-       return *buf != NULL;
+       if (!link->failover_pkts && pl)
+               pl->flags &= ~LINK_FAILINGOVER;
+       kfree_skb(*skb);
+       *skb = iskb;
+       return *skb;
 }
 
 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
@@ -1866,7 +1751,7 @@ static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
 
 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
 {
-       int max_bulk = TIPC_MAX_PUBLICATIONS / (l->max_pkt / ITEM_SIZE);
+       int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
 
        l->window = win;
        l->backlog[TIPC_LOW_IMPORTANCE].limit      = win / 2;
@@ -2038,14 +1923,14 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
 
                        tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
                        link_set_supervision_props(link, tol);
-                       tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0, 0);
+                       tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0);
                }
                if (props[TIPC_NLA_PROP_PRIO]) {
                        u32 prio;
 
                        prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
                        link->priority = prio;
-                       tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio, 0);
+                       tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio);
                }
                if (props[TIPC_NLA_PROP_WIN]) {
                        u32 win;
@@ -2150,7 +2035,7 @@ static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
        if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
                        tipc_cluster_mask(tn->own_addr)))
                goto attr_msg_full;
-       if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->max_pkt))
+       if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
                goto attr_msg_full;
        if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no))
                goto attr_msg_full;
index d2b5663643da5398abd1fc9f3ec447e66c74684a..b5b4e3554d4e896873eba6c58ccb3ec48f712025 100644 (file)
 
 /* Link endpoint execution states
  */
-#define LINK_STARTED    0x0001
-#define LINK_STOPPED    0x0002
-#define LINK_SYNCHING   0x0004
+#define LINK_STARTED     0x0001
+#define LINK_STOPPED     0x0002
+#define LINK_SYNCHING    0x0004
+#define LINK_FAILINGOVER 0x0008
 
 /* Starting value for maximum packet size negotiation on unicast links
  * (unless bearer MTU is less)
@@ -122,9 +123,8 @@ struct tipc_stats {
  * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
  * @exp_msg_count: # of tunnelled messages expected during link changeover
  * @reset_checkpoint: seq # of last acknowledged message at time of link reset
- * @max_pkt: current maximum packet size for this link
- * @max_pkt_target: desired maximum packet size for this link
- * @max_pkt_probes: # of probes based on current (max_pkt, max_pkt_target)
+ * @mtu: current maximum packet size for this link
+ * @advertised_mtu: advertised own mtu when link is being established
  * @transmitq: queue for sent, non-acked messages
  * @backlogq: queue for messages waiting to be sent
  * @next_out_no: next sequence number to use for outbound messages
@@ -167,16 +167,16 @@ struct tipc_link {
        struct tipc_msg *pmsg;
        u32 priority;
        char net_plane;
+       u16 synch_point;
 
-       /* Changeover */
-       u32 exp_msg_count;
-       u32 reset_checkpoint;
-       u32 synch_point;
+       /* Failover */
+       u16 failover_pkts;
+       u16 failover_checkpt;
+       struct sk_buff *failover_skb;
 
        /* Max packet negotiation */
-       u32 max_pkt;
-       u32 max_pkt_target;
-       u32 max_pkt_probes;
+       u16 mtu;
+       u16 advertised_mtu;
 
        /* Sending */
        struct sk_buff_head transmq;
@@ -201,7 +201,6 @@ struct tipc_link {
        struct sk_buff_head wakeupq;
 
        /* Fragmentation/reassembly */
-       u32 long_msg_seq_no;
        struct sk_buff *reasm_buf;
 
        /* Statistics */
@@ -232,7 +231,7 @@ int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dest,
 int __tipc_link_xmit(struct net *net, struct tipc_link *link,
                     struct sk_buff_head *list);
 void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
-                         u32 gap, u32 tolerance, u32 priority, u32 acked_mtu);
+                         u32 gap, u32 tolerance, u32 priority);
 void tipc_link_push_packets(struct tipc_link *l_ptr);
 u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *buf);
 void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window);
index 3bb499c619181b7275e9839b4d718543f5a87e6a..c3e96e8154188af27c0d5fc545fe95c54a29e9e9 100644 (file)
@@ -355,7 +355,7 @@ bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu)
        start = align(bsz);
        pad = start - bsz;
 
-       if (unlikely(msg_user(msg) == CHANGEOVER_PROTOCOL))
+       if (unlikely(msg_user(msg) == TUNNEL_PROTOCOL))
                return false;
        if (unlikely(msg_user(msg) == BCAST_PROTOCOL))
                return false;
@@ -433,7 +433,7 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, u32 mtu, u32 dnode)
 
        if (msg_user(msg) == MSG_FRAGMENTER)
                return false;
-       if (msg_user(msg) == CHANGEOVER_PROTOCOL)
+       if (msg_user(msg) == TUNNEL_PROTOCOL)
                return false;
        if (msg_user(msg) == BCAST_PROTOCOL)
                return false;
index d273207ede28abd1ed3a41b6f898cbf5064fe97a..e1d3595e2ee9577634b2bf5b215bd96f43ca473b 100644 (file)
@@ -72,7 +72,7 @@ struct plist;
 #define  MSG_BUNDLER          6
 #define  LINK_PROTOCOL        7
 #define  CONN_MANAGER         8
-#define  CHANGEOVER_PROTOCOL  10
+#define  TUNNEL_PROTOCOL      10
 #define  NAME_DISTRIBUTOR     11
 #define  MSG_FRAGMENTER       12
 #define  LINK_CONFIG          13
@@ -512,8 +512,8 @@ static inline void msg_set_nameupper(struct tipc_msg *m, u32 n)
 /*
  * Changeover tunnel message types
  */
-#define DUPLICATE_MSG          0
-#define ORIGINAL_MSG           1
+#define SYNCH_MSG              0
+#define FAILOVER_MSG           1
 
 /*
  * Config protocol message types
@@ -556,9 +556,9 @@ static inline void msg_set_node_capabilities(struct tipc_msg *m, u32 n)
 
 static inline bool msg_dup(struct tipc_msg *m)
 {
-       if (likely(msg_user(m) != CHANGEOVER_PROTOCOL))
+       if (likely(msg_user(m) != TUNNEL_PROTOCOL))
                return false;
-       if (msg_type(m) != DUPLICATE_MSG)
+       if (msg_type(m) != SYNCH_MSG)
                return false;
        return true;
 }
index 3e4f04897c03ea3d9e98d339aa0b5da72013b8bc..22c059ad29991abbdc40e3eca4a09de78df2c1d0 100644 (file)
@@ -254,8 +254,8 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
        active[0] = active[1] = l_ptr;
 exit:
        /* Leave room for changeover header when returning 'mtu' to users: */
-       n_ptr->act_mtus[0] = active[0]->max_pkt - INT_H_SIZE;
-       n_ptr->act_mtus[1] = active[1]->max_pkt - INT_H_SIZE;
+       n_ptr->act_mtus[0] = active[0]->mtu - INT_H_SIZE;
+       n_ptr->act_mtus[1] = active[1]->mtu - INT_H_SIZE;
 }
 
 /**
@@ -319,11 +319,10 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 
        /* Leave room for changeover header when returning 'mtu' to users: */
        if (active[0]) {
-               n_ptr->act_mtus[0] = active[0]->max_pkt - INT_H_SIZE;
-               n_ptr->act_mtus[1] = active[1]->max_pkt - INT_H_SIZE;
+               n_ptr->act_mtus[0] = active[0]->mtu - INT_H_SIZE;
+               n_ptr->act_mtus[1] = active[1]->mtu - INT_H_SIZE;
                return;
        }
-
        /* Loopback link went down? No fragmentation needed from now on. */
        if (n_ptr->addr == tn->own_addr) {
                n_ptr->act_mtus[0] = MAX_MSG_SIZE;
@@ -394,18 +393,17 @@ static void node_lost_contact(struct tipc_node *n_ptr)
                n_ptr->bclink.recv_permitted = false;
        }
 
-       /* Abort link changeover */
+       /* Abort any ongoing link failover */
        for (i = 0; i < MAX_BEARERS; i++) {
                struct tipc_link *l_ptr = n_ptr->links[i];
                if (!l_ptr)
                        continue;
-               l_ptr->reset_checkpoint = l_ptr->next_in_no;
-               l_ptr->exp_msg_count = 0;
+               l_ptr->flags &= ~LINK_FAILINGOVER;
+               l_ptr->failover_checkpt = 0;
+               l_ptr->failover_pkts = 0;
+               kfree_skb(l_ptr->failover_skb);
+               l_ptr->failover_skb = NULL;
                tipc_link_reset_fragments(l_ptr);
-
-               /* Link marked for deletion after failover? => do it now */
-               if (l_ptr->flags & LINK_STOPPED)
-                       tipc_link_delete(l_ptr);
        }
 
        n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN;
index ef3d7aa2854aabdf9b866ecd445f19c21435730e..66deebc66aa10820880bd51839bd2ae379eaeb86 100644 (file)
@@ -176,7 +176,8 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
                        goto tx_error;
                }
                ttl = ip4_dst_hoplimit(&rt->dst);
-               err = udp_tunnel_xmit_skb(rt, clone, src->ipv4.s_addr,
+               err = udp_tunnel_xmit_skb(rt, ub->ubsock->sk, clone,
+                                         src->ipv4.s_addr,
                                          dst->ipv4.s_addr, 0, ttl, 0,
                                          src->udp_port, dst->udp_port,
                                          false, true);
@@ -197,7 +198,8 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
                if (err)
                        goto tx_error;
                ttl = ip6_dst_hoplimit(ndst);
-               err = udp_tunnel6_xmit_skb(ndst, clone, ndst->dev, &src->ipv6,
+               err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, clone,
+                                          ndst->dev, &src->ipv6,
                                           &dst->ipv6, 0, ttl, src->udp_port,
                                           dst->udp_port, false);
 #endif
index 7c532856b39829f0cddf50baecc6d8c73c3422e7..fbcedbe33190346a40fc148369757a6ef64a2106 100644 (file)
@@ -19,7 +19,7 @@
 #include <net/dst.h>
 #include <net/xfrm.h>
 
-static int xfrm_output2(struct sk_buff *skb);
+static int xfrm_output2(struct sock *sk, struct sk_buff *skb);
 
 static int xfrm_skb_check_space(struct sk_buff *skb)
 {
@@ -130,7 +130,7 @@ int xfrm_output_resume(struct sk_buff *skb, int err)
                        return dst_output(skb);
 
                err = nf_hook(skb_dst(skb)->ops->family,
-                             NF_INET_POST_ROUTING, skb,
+                             NF_INET_POST_ROUTING, skb->sk, skb,
                              NULL, skb_dst(skb)->dev, xfrm_output2);
                if (unlikely(err != 1))
                        goto out;
@@ -144,12 +144,12 @@ out:
 }
 EXPORT_SYMBOL_GPL(xfrm_output_resume);
 
-static int xfrm_output2(struct sk_buff *skb)
+static int xfrm_output2(struct sock *sk, struct sk_buff *skb)
 {
        return xfrm_output_resume(skb, 1);
 }
 
-static int xfrm_output_gso(struct sk_buff *skb)
+static int xfrm_output_gso(struct sock *sk, struct sk_buff *skb)
 {
        struct sk_buff *segs;
 
@@ -165,7 +165,7 @@ static int xfrm_output_gso(struct sk_buff *skb)
                int err;
 
                segs->next = NULL;
-               err = xfrm_output2(segs);
+               err = xfrm_output2(sk, segs);
 
                if (unlikely(err)) {
                        kfree_skb_list(nskb);
@@ -178,13 +178,13 @@ static int xfrm_output_gso(struct sk_buff *skb)
        return 0;
 }
 
-int xfrm_output(struct sk_buff *skb)
+int xfrm_output(struct sock *sk, struct sk_buff *skb)
 {
        struct net *net = dev_net(skb_dst(skb)->dev);
        int err;
 
        if (skb_is_gso(skb))
-               return xfrm_output_gso(skb);
+               return xfrm_output_gso(sk, skb);
 
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                err = skb_checksum_help(skb);
@@ -195,7 +195,7 @@ int xfrm_output(struct sk_buff *skb)
                }
        }
 
-       return xfrm_output2(skb);
+       return xfrm_output2(sk, skb);
 }
 EXPORT_SYMBOL_GPL(xfrm_output);
 
index 7de2ed9ec46ddd003ef3db00554335a12f95cb89..2091664295bae1a3a4725287e5d0bf5cfb28357c 100644 (file)
@@ -2423,6 +2423,11 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
        const struct xfrm_link *link;
        int type, err;
 
+#ifdef CONFIG_COMPAT
+       if (is_compat_task())
+               return -ENOTSUPP;
+#endif
+
        type = nlh->nlmsg_type;
        if (type > XFRM_MSG_MAX)
                return -EINVAL;
index b5b3600dcdf5d004e01c1a558011519adfc386ba..d24f51bca465798cfcff96307eaf4aa101ad5833 100644 (file)
@@ -17,6 +17,7 @@ sockex2-objs := bpf_load.o libbpf.o sockex2_user.o
 always := $(hostprogs-y)
 always += sockex1_kern.o
 always += sockex2_kern.o
+always += tcbpf1_kern.o
 
 HOSTCFLAGS += -I$(objtree)/usr/include
 
index ca0333146006af20352ec704d5049085db975206..72540ec1f003accf249b33e669b9c31a00c364e8 100644 (file)
@@ -37,4 +37,11 @@ struct bpf_map_def {
        unsigned int max_entries;
 };
 
+static int (*bpf_skb_store_bytes)(void *ctx, int off, void *from, int len, int flags) =
+       (void *) BPF_FUNC_skb_store_bytes;
+static int (*bpf_l3_csum_replace)(void *ctx, int off, int from, int to, int flags) =
+       (void *) BPF_FUNC_l3_csum_replace;
+static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) =
+       (void *) BPF_FUNC_l4_csum_replace;
+
 #endif
diff --git a/samples/bpf/tcbpf1_kern.c b/samples/bpf/tcbpf1_kern.c
new file mode 100644 (file)
index 0000000..7cf3f42
--- /dev/null
@@ -0,0 +1,71 @@
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/if_packet.h>
+#include <uapi/linux/ip.h>
+#include <uapi/linux/in.h>
+#include <uapi/linux/tcp.h>
+#include "bpf_helpers.h"
+
+/* compiler workaround */
+#define _htonl __builtin_bswap32
+
+static inline void set_dst_mac(struct __sk_buff *skb, char *mac)
+{
+       bpf_skb_store_bytes(skb, 0, mac, ETH_ALEN, 1);
+}
+
+/* use 1 below for ingress qdisc and 0 for egress */
+#if 0
+#undef ETH_HLEN
+#define ETH_HLEN 0
+#endif
+
+#define IP_CSUM_OFF (ETH_HLEN + offsetof(struct iphdr, check))
+#define TOS_OFF (ETH_HLEN + offsetof(struct iphdr, tos))
+
+static inline void set_ip_tos(struct __sk_buff *skb, __u8 new_tos)
+{
+       __u8 old_tos = load_byte(skb, TOS_OFF);
+
+       bpf_l3_csum_replace(skb, IP_CSUM_OFF, htons(old_tos), htons(new_tos), 2);
+       bpf_skb_store_bytes(skb, TOS_OFF, &new_tos, sizeof(new_tos), 0);
+}
+
+#define TCP_CSUM_OFF (ETH_HLEN + sizeof(struct iphdr) + offsetof(struct tcphdr, check))
+#define IP_SRC_OFF (ETH_HLEN + offsetof(struct iphdr, saddr))
+
+#define IS_PSEUDO 0x10
+
+static inline void set_tcp_ip_src(struct __sk_buff *skb, __u32 new_ip)
+{
+       __u32 old_ip = _htonl(load_word(skb, IP_SRC_OFF));
+
+       bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_ip, new_ip, IS_PSEUDO | sizeof(new_ip));
+       bpf_l3_csum_replace(skb, IP_CSUM_OFF, old_ip, new_ip, sizeof(new_ip));
+       bpf_skb_store_bytes(skb, IP_SRC_OFF, &new_ip, sizeof(new_ip), 0);
+}
+
+#define TCP_DPORT_OFF (ETH_HLEN + sizeof(struct iphdr) + offsetof(struct tcphdr, dest))
+static inline void set_tcp_dest_port(struct __sk_buff *skb, __u16 new_port)
+{
+       __u16 old_port = htons(load_half(skb, TCP_DPORT_OFF));
+
+       bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_port, new_port, sizeof(new_port));
+       bpf_skb_store_bytes(skb, TCP_DPORT_OFF, &new_port, sizeof(new_port), 0);
+}
+
+SEC("classifier")
+int bpf_prog1(struct __sk_buff *skb)
+{
+       __u8 proto = load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol));
+       long *value;
+
+       if (proto == IPPROTO_TCP) {
+               set_ip_tos(skb, 8);
+               set_tcp_ip_src(skb, 0xA010101);
+               set_tcp_dest_port(skb, 5001);
+       }
+
+       return 0;
+}
+char _license[] SEC("license") = "GPL";
index edc66de39f2e2f665becf1028c9f1b85f4ea7af0..7e392edaab97bee0f66825b195562c3aa879c05a 100644 (file)
@@ -4852,21 +4852,17 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb,
 
 static unsigned int selinux_ipv4_forward(const struct nf_hook_ops *ops,
                                         struct sk_buff *skb,
-                                        const struct net_device *in,
-                                        const struct net_device *out,
-                                        int (*okfn)(struct sk_buff *))
+                                        const struct nf_hook_state *state)
 {
-       return selinux_ip_forward(skb, in, PF_INET);
+       return selinux_ip_forward(skb, state->in, PF_INET);
 }
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 static unsigned int selinux_ipv6_forward(const struct nf_hook_ops *ops,
                                         struct sk_buff *skb,
-                                        const struct net_device *in,
-                                        const struct net_device *out,
-                                        int (*okfn)(struct sk_buff *))
+                                        const struct nf_hook_state *state)
 {
-       return selinux_ip_forward(skb, in, PF_INET6);
+       return selinux_ip_forward(skb, state->in, PF_INET6);
 }
 #endif /* IPV6 */
 
@@ -4914,9 +4910,7 @@ static unsigned int selinux_ip_output(struct sk_buff *skb,
 
 static unsigned int selinux_ipv4_output(const struct nf_hook_ops *ops,
                                        struct sk_buff *skb,
-                                       const struct net_device *in,
-                                       const struct net_device *out,
-                                       int (*okfn)(struct sk_buff *))
+                                       const struct nf_hook_state *state)
 {
        return selinux_ip_output(skb, PF_INET);
 }
@@ -5091,21 +5085,17 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb,
 
 static unsigned int selinux_ipv4_postroute(const struct nf_hook_ops *ops,
                                           struct sk_buff *skb,
-                                          const struct net_device *in,
-                                          const struct net_device *out,
-                                          int (*okfn)(struct sk_buff *))
+                                          const struct nf_hook_state *state)
 {
-       return selinux_ip_postroute(skb, out, PF_INET);
+       return selinux_ip_postroute(skb, state->out, PF_INET);
 }
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 static unsigned int selinux_ipv6_postroute(const struct nf_hook_ops *ops,
                                           struct sk_buff *skb,
-                                          const struct net_device *in,
-                                          const struct net_device *out,
-                                          int (*okfn)(struct sk_buff *))
+                                          const struct nf_hook_state *state)
 {
-       return selinux_ip_postroute(skb, out, PF_INET6);
+       return selinux_ip_postroute(skb, state->out, PF_INET6);
 }
 #endif /* IPV6 */
 
index 2df7b900e25965828ed91e3bd376340a672d9c0f..4e21b72dd7093ff7ff0c116b44bb8d75ee2db3b5 100644 (file)
@@ -73,6 +73,9 @@ static struct nlmsg_perm nlmsg_route_perms[] =
        { RTM_NEWMDB,           NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
        { RTM_DELMDB,           NETLINK_ROUTE_SOCKET__NLMSG_WRITE  },
        { RTM_GETMDB,           NETLINK_ROUTE_SOCKET__NLMSG_READ  },
+       { RTM_NEWNSID,          NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+       { RTM_DELNSID,          NETLINK_ROUTE_SOCKET__NLMSG_READ  },
+       { RTM_GETNSID,          NETLINK_ROUTE_SOCKET__NLMSG_READ  },
 };
 
 static struct nlmsg_perm nlmsg_tcpdiag_perms[] =
@@ -100,6 +103,10 @@ static struct nlmsg_perm nlmsg_xfrm_perms[] =
        { XFRM_MSG_FLUSHPOLICY, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
        { XFRM_MSG_NEWAE,       NETLINK_XFRM_SOCKET__NLMSG_WRITE },
        { XFRM_MSG_GETAE,       NETLINK_XFRM_SOCKET__NLMSG_READ  },
+       { XFRM_MSG_NEWSADINFO,  NETLINK_XFRM_SOCKET__NLMSG_READ  },
+       { XFRM_MSG_GETSADINFO,  NETLINK_XFRM_SOCKET__NLMSG_READ  },
+       { XFRM_MSG_NEWSPDINFO,  NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+       { XFRM_MSG_GETSPDINFO,  NETLINK_XFRM_SOCKET__NLMSG_READ  },
 };
 
 static struct nlmsg_perm nlmsg_audit_perms[] =
index 1684bcc78b34e42395b1db335c9122c405987c23..5fde34326dcf28312ab4c36cb09623ecb811760b 100644 (file)
@@ -152,7 +152,7 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
                goto out;
 
        /* No partial writes. */
-       length = EINVAL;
+       length = -EINVAL;
        if (*ppos != 0)
                goto out;
 
index c952632afb0d4ac8e44e35209e56a9b30d083c51..a455cfc9ec1f614851aba10693800a6acd476049 100644 (file)
@@ -23,9 +23,7 @@
 
 static unsigned int smack_ipv6_output(const struct nf_hook_ops *ops,
                                        struct sk_buff *skb,
-                                       const struct net_device *in,
-                                       const struct net_device *out,
-                                       int (*okfn)(struct sk_buff *))
+                                       const struct nf_hook_state *state)
 {
        struct socket_smack *ssp;
        struct smack_known *skp;
@@ -42,9 +40,7 @@ static unsigned int smack_ipv6_output(const struct nf_hook_ops *ops,
 
 static unsigned int smack_ipv4_output(const struct nf_hook_ops *ops,
                                        struct sk_buff *skb,
-                                       const struct net_device *in,
-                                       const struct net_device *out,
-                                       int (*okfn)(struct sk_buff *))
+                                       const struct nf_hook_state *state)
 {
        struct socket_smack *ssp;
        struct smack_known *skp;
index 4ca3d5d02436daf0ec7ec7274c675c4beae74423..a8a1e14272a1e574302da56ad0ad7540bf8fd60d 100644 (file)
@@ -1989,7 +1989,7 @@ static const struct pci_device_id azx_ids[] = {
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
        /* Sunrise Point */
        { PCI_DEVICE(0x8086, 0xa170),
-         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
        /* Sunrise Point-LP */
        { PCI_DEVICE(0x8086, 0x9d70),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
index 526398a4a4428da11d5792c9c7f2a299cac9bd43..74382137b9f5abcd67b9c4c44581c9a80f26d17d 100644 (file)
@@ -396,7 +396,7 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on)
 {
        /* We currently only handle front, HP */
        static hda_nid_t pins[] = {
-               0x0f, 0x10, 0x14, 0x15, 0
+               0x0f, 0x10, 0x14, 0x15, 0x17, 0
        };
        hda_nid_t *p;
        for (p = pins; *p; p++)
@@ -5036,6 +5036,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
        SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
        SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
index 4e511221a0c11cd53588fc2ae507848ab94339cb..0db571340edbd94a7042a63f517f410b42c127d7 100644 (file)
@@ -22,6 +22,14 @@ TARGETS += vm
 TARGETS_HOTPLUG = cpu-hotplug
 TARGETS_HOTPLUG += memory-hotplug
 
+# Clear LDFLAGS and MAKEFLAGS if called from main
+# Makefile to avoid test build failures when test
+# Makefile doesn't have explicit build rules.
+ifeq (1,$(MAKELEVEL))
+undefine LDFLAGS
+override MAKEFLAGS =
+endif
+
 all:
        for TARGET in $(TARGETS); do \
                make -C $$TARGET; \
index a2214d9609bda59c8e48e841823da4f459081d52..cc6a25d95fbff532bf5b00b0c339bec91ddc5bcf 100644 (file)
@@ -471,7 +471,7 @@ static struct kvm *kvm_create_vm(unsigned long type)
        BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
 
        r = -ENOMEM;
-       kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+       kvm->memslots = kvm_kvzalloc(sizeof(struct kvm_memslots));
        if (!kvm->memslots)
                goto out_err_no_srcu;
 
@@ -522,7 +522,7 @@ out_err_no_srcu:
 out_err_no_disable:
        for (i = 0; i < KVM_NR_BUSES; i++)
                kfree(kvm->buses[i]);
-       kfree(kvm->memslots);
+       kvfree(kvm->memslots);
        kvm_arch_free_vm(kvm);
        return ERR_PTR(r);
 }
@@ -578,7 +578,7 @@ static void kvm_free_physmem(struct kvm *kvm)
        kvm_for_each_memslot(memslot, slots)
                kvm_free_physmem_slot(kvm, memslot, NULL);
 
-       kfree(kvm->memslots);
+       kvfree(kvm->memslots);
 }
 
 static void kvm_destroy_devices(struct kvm *kvm)
@@ -871,10 +871,10 @@ int __kvm_set_memory_region(struct kvm *kvm,
                        goto out_free;
        }
 
-       slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
-                       GFP_KERNEL);
+       slots = kvm_kvzalloc(sizeof(struct kvm_memslots));
        if (!slots)
                goto out_free;
+       memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
 
        if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
                slot = id_to_memslot(slots, mem->slot);
@@ -917,7 +917,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
        kvm_arch_commit_memory_region(kvm, mem, &old, change);
 
        kvm_free_physmem_slot(kvm, &old, &new);
-       kfree(old_memslots);
+       kvfree(old_memslots);
 
        /*
         * IOMMU mapping:  New slots need to be mapped.  Old slots need to be
@@ -936,7 +936,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
        return 0;
 
 out_slots:
-       kfree(slots);
+       kvfree(slots);
 out_free:
        kvm_free_physmem_slot(kvm, &new, &old);
 out: