Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 2 Oct 2012 20:38:27 +0000 (13:38 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 2 Oct 2012 20:38:27 +0000 (13:38 -0700)
Pull networking changes from David Miller:

 1) GRE now works over ipv6, from Dmitry Kozlov.

 2) Make SCTP more network namespace aware, from Eric Biederman.

 3) TEAM driver now works with non-ethernet devices, from Jiri Pirko.

 4) Make openvswitch network namespace aware, from Pravin B Shelar.

 5) IPV6 NAT implementation, from Patrick McHardy.

 6) Server side support for TCP Fast Open, from Jerry Chu and others.

 7) Packet BPF filter supports MOD and XOR, from Eric Dumazet and Daniel
    Borkmann.

 8) Increate the loopback default MTU to 64K, from Eric Dumazet.

 9) Use a per-task rather than per-socket page fragment allocator for
    outgoing networking traffic.  This benefits processes that have very
    many mostly idle sockets, which is quite common.

    From Eric Dumazet.

10) Use up to 32K for page fragment allocations, with fallbacks to
    smaller sizes when higher order page allocations fail.  Benefits are
    a) less segments for driver to process b) less calls to page
    allocator c) less waste of space.

    From Eric Dumazet.

11) Allow GRO to be used on GRE tunnels, from Eric Dumazet.

12) VXLAN device driver, one way to handle VLAN issues such as the
    limitation of 4096 VLAN IDs yet still have some level of isolation.
    From Stephen Hemminger.

13) As usual there is a large boatload of driver changes, with the scale
    perhaps tilted towards the wireless side this time around.

Fix up various fairly trivial conflicts, mostly caused by the user
namespace changes.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1012 commits)
  hyperv: Add buffer for extended info after the RNDIS response message.
  hyperv: Report actual status in receive completion packet
  hyperv: Remove extra allocated space for recv_pkt_list elements
  hyperv: Fix page buffer handling in rndis_filter_send_request()
  hyperv: Fix the missing return value in rndis_filter_set_packet_filter()
  hyperv: Fix the max_xfer_size in RNDIS initialization
  vxlan: put UDP socket in correct namespace
  vxlan: Depend on CONFIG_INET
  sfc: Fix the reported priorities of different filter types
  sfc: Remove EFX_FILTER_FLAG_RX_OVERRIDE_IP
  sfc: Fix loopback self-test with separate_tx_channels=1
  sfc: Fix MCDI structure field lookup
  sfc: Add parentheses around use of bitfield macro arguments
  sfc: Fix null function pointer in efx_sriov_channel_type
  vxlan: virtual extensible lan
  igmp: export symbol ip_mc_leave_group
  netlink: add attributes to fdb interface
  tg3: unconditionally select HWMON support when tg3 is enabled.
  Revert "net: ti cpsw ethernet: allow reading phy interface mode from DT"
  gre: fix sparse warning
  ...

967 files changed:
Documentation/ABI/testing/sysfs-ptp
Documentation/devicetree/bindings/net/can/c_can.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/cpsw.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/davinci-mdio.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/mdio-mux-mmioreg.txt [new file with mode: 0644]
Documentation/filesystems/nfs/nfsroot.txt
Documentation/infiniband/ipoib.txt
Documentation/networking/batman-adv.txt
Documentation/networking/bonding.txt
Documentation/networking/ip-sysctl.txt
Documentation/networking/stmmac.txt
Documentation/networking/vxlan.txt [new file with mode: 0644]
MAINTAINERS
arch/m68k/configs/amiga_defconfig
arch/m68k/configs/apollo_defconfig
arch/m68k/configs/atari_defconfig
arch/m68k/configs/bvme6000_defconfig
arch/m68k/configs/hp300_defconfig
arch/m68k/configs/mac_defconfig
arch/m68k/configs/multi_defconfig
arch/m68k/configs/mvme147_defconfig
arch/m68k/configs/mvme16x_defconfig
arch/m68k/configs/q40_defconfig
arch/m68k/configs/sun3_defconfig
arch/m68k/configs/sun3x_defconfig
arch/mips/configs/ar7_defconfig
arch/mips/configs/bcm47xx_defconfig
arch/mips/configs/ip22_defconfig
arch/mips/configs/jazz_defconfig
arch/mips/configs/malta_defconfig
arch/mips/configs/markeins_defconfig
arch/mips/configs/nlm_xlp_defconfig
arch/mips/configs/nlm_xlr_defconfig
arch/mips/configs/rm200_defconfig
arch/powerpc/configs/pmac32_defconfig
arch/powerpc/configs/ppc64_defconfig
arch/powerpc/configs/ppc64e_defconfig
arch/powerpc/configs/ppc6xx_defconfig
arch/sparc/net/bpf_jit_comp.c
arch/tile/configs/tilegx_defconfig
arch/tile/configs/tilepro_defconfig
arch/x86/net/bpf_jit_comp.c
crypto/crypto_user.c
drivers/Makefile
drivers/bcma/Kconfig
drivers/bcma/bcma_private.h
drivers/bcma/core.c
drivers/bcma/driver_chipcommon_nflash.c
drivers/bcma/driver_chipcommon_pmu.c
drivers/bcma/driver_chipcommon_sflash.c
drivers/bcma/driver_pci.c
drivers/bcma/driver_pci_host.c
drivers/bcma/host_pci.c
drivers/bcma/host_soc.c
drivers/bcma/main.c
drivers/bcma/sprom.c
drivers/bluetooth/bcm203x.c
drivers/bluetooth/bfusb.c
drivers/bluetooth/bluecard_cs.c
drivers/bluetooth/bpa10x.c
drivers/bluetooth/bt3c_cs.c
drivers/bluetooth/btmrvl_sdio.c
drivers/bluetooth/btsdio.c
drivers/bluetooth/btuart_cs.c
drivers/bluetooth/btusb.c
drivers/bluetooth/btwilink.c
drivers/bluetooth/dtl1_cs.c
drivers/bluetooth/hci_ldisc.c
drivers/bluetooth/hci_ll.c
drivers/bluetooth/hci_vhci.c
drivers/connector/connector.c
drivers/infiniband/core/netlink.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/ulp/ipoib/Makefile
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_netlink.c [new file with mode: 0644]
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
drivers/isdn/gigaset/common.c
drivers/net/Kconfig
drivers/net/Makefile
drivers/net/bonding/bond_main.c
drivers/net/can/c_can/c_can.c
drivers/net/can/c_can/c_can.h
drivers/net/can/c_can/c_can_pci.c
drivers/net/can/c_can/c_can_platform.c
drivers/net/can/flexcan.c
drivers/net/can/mscan/mpc5xxx_can.c
drivers/net/can/sja1000/sja1000.c
drivers/net/can/usb/peak_usb/pcan_usb_core.c
drivers/net/can/usb/peak_usb/pcan_usb_core.h
drivers/net/can/usb/peak_usb/pcan_usb_pro.c
drivers/net/ethernet/Kconfig
drivers/net/ethernet/Makefile
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/broadcom/cnic.h
drivers/net/ethernet/broadcom/cnic_defs.h
drivers/net/ethernet/broadcom/cnic_if.h
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/broadcom/tg3.h
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/Kconfig
drivers/net/ethernet/freescale/Makefile
drivers/net/ethernet/freescale/fsl_pq_mdio.c
drivers/net/ethernet/freescale/fsl_pq_mdio.h [deleted file]
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/gianfar.h
drivers/net/ethernet/freescale/gianfar_ptp.c
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/freescale/xgmac_mdio.c [new file with mode: 0644]
drivers/net/ethernet/i825xx/Kconfig
drivers/net/ethernet/i825xx/znet.c
drivers/net/ethernet/intel/e1000/e1000_ethtool.c
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/82571.c
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/e1000e/phy.c
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/e1000_defines.h
drivers/net/ethernet/intel/igb/e1000_phy.c
drivers/net/ethernet/intel/igb/e1000_phy.h
drivers/net/ethernet/intel/igb/e1000_regs.h
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igb/igb_ptp.c
drivers/net/ethernet/intel/ixgbe/Makefile
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c [new file with mode: 0644]
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/intel/ixgbevf/defines.h
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/intel/ixgbevf/mbx.c
drivers/net/ethernet/intel/ixgbevf/mbx.h
drivers/net/ethernet/intel/ixgbevf/vf.c
drivers/net/ethernet/intel/ixgbevf/vf.h
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mipsnet.c [deleted file]
drivers/net/ethernet/nvidia/forcedeth.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/sfc/Kconfig
drivers/net/ethernet/sfc/Makefile
drivers/net/ethernet/sfc/bitfield.h
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/efx.h
drivers/net/ethernet/sfc/ethtool.c
drivers/net/ethernet/sfc/falcon_boards.c
drivers/net/ethernet/sfc/filter.c
drivers/net/ethernet/sfc/filter.h
drivers/net/ethernet/sfc/mcdi.c
drivers/net/ethernet/sfc/mcdi.h
drivers/net/ethernet/sfc/mcdi_pcol.h
drivers/net/ethernet/sfc/mtd.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/nic.c
drivers/net/ethernet/sfc/nic.h
drivers/net/ethernet/sfc/ptp.c [new file with mode: 0644]
drivers/net/ethernet/sfc/rx.c
drivers/net/ethernet/sfc/selftest.c
drivers/net/ethernet/sfc/siena.c
drivers/net/ethernet/sfc/siena_sriov.c
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/sun/sunbmac.c
drivers/net/ethernet/ti/Kconfig
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/davinci_mdio.c
drivers/net/ethernet/tundra/tsi108_eth.c
drivers/net/ethernet/wiznet/w5100.c
drivers/net/ethernet/wiznet/w5300.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ieee802154/Kconfig [moved from drivers/ieee802154/Kconfig with 76% similarity]
drivers/net/ieee802154/Makefile [moved from drivers/ieee802154/Makefile with 74% similarity]
drivers/net/ieee802154/at86rf230.c [moved from drivers/ieee802154/at86rf230.c with 98% similarity]
drivers/net/ieee802154/fakehard.c [moved from drivers/ieee802154/fakehard.c with 99% similarity]
drivers/net/ieee802154/fakelb.c [moved from drivers/ieee802154/fakelb.c with 100% similarity]
drivers/net/ieee802154/mrf24j40.c [new file with mode: 0644]
drivers/net/loopback.c
drivers/net/macvlan.c
drivers/net/phy/Kconfig
drivers/net/phy/Makefile
drivers/net/phy/dp83640.c
drivers/net/phy/lxt.c
drivers/net/phy/mdio-gpio.c
drivers/net/phy/mdio-mux-mmioreg.c [new file with mode: 0644]
drivers/net/phy/phy.c
drivers/net/ppp/ppp_generic.c
drivers/net/team/Kconfig
drivers/net/team/team.c
drivers/net/team/team_mode_broadcast.c
drivers/net/team/team_mode_roundrobin.c
drivers/net/usb/asix_devices.c
drivers/net/usb/catc.c
drivers/net/usb/cx82310_eth.c
drivers/net/usb/gl620a.c
drivers/net/usb/kaweth.c
drivers/net/usb/net1080.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/rtl8150.c
drivers/net/usb/sierra_net.c
drivers/net/usb/smsc75xx.c
drivers/net/usb/smsc95xx.c
drivers/net/usb/smsc95xx.h
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/vxlan.c [new file with mode: 0644]
drivers/net/wimax/i2400m/driver.c
drivers/net/wireless/adm8211.c
drivers/net/wireless/airo.c
drivers/net/wireless/at76c50x-usb.c
drivers/net/wireless/ath/ath.h
drivers/net/wireless/ath/ath5k/ath5k.h
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath5k/mac80211-ops.c
drivers/net/wireless/ath/ath5k/phy.c
drivers/net/wireless/ath/ath6kl/cfg80211.c
drivers/net/wireless/ath/ath6kl/cfg80211.h
drivers/net/wireless/ath/ath9k/ani.c
drivers/net/wireless/ath/ath9k/antenna.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
drivers/net/wireless/ath/ath9k/ar9003_hw.c
drivers/net/wireless/ath/ath9k/ar9003_mac.c
drivers/net/wireless/ath/ath9k/ar9003_mci.c
drivers/net/wireless/ath/ath9k/ar9003_mci.h
drivers/net/wireless/ath/ath9k/ar9003_phy.c
drivers/net/wireless/ath/ath9k/ar9003_phy.h
drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/btcoex.c
drivers/net/wireless/ath/ath9k/btcoex.h
drivers/net/wireless/ath/ath9k/debug.c
drivers/net/wireless/ath/ath9k/debug.h
drivers/net/wireless/ath/ath9k/eeprom.h
drivers/net/wireless/ath/ath9k/gpio.c
drivers/net/wireless/ath/ath9k/hif_usb.c
drivers/net/wireless/ath/ath9k/hif_usb.h
drivers/net/wireless/ath/ath9k/htc.h
drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
drivers/net/wireless/ath/ath9k/htc_drv_init.c
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
drivers/net/wireless/ath/ath9k/hw-ops.h
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/hw.h
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/mci.c
drivers/net/wireless/ath/ath9k/pci.c
drivers/net/wireless/ath/ath9k/rc.c
drivers/net/wireless/ath/ath9k/rc.h
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/ath9k/reg.h
drivers/net/wireless/ath/ath9k/wow.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/carl9170/carl9170.h
drivers/net/wireless/ath/carl9170/fw.c
drivers/net/wireless/ath/carl9170/mac.c
drivers/net/wireless/ath/carl9170/main.c
drivers/net/wireless/ath/carl9170/rx.c
drivers/net/wireless/ath/carl9170/tx.c
drivers/net/wireless/b43/Makefile
drivers/net/wireless/b43/b43.h
drivers/net/wireless/b43/main.c
drivers/net/wireless/b43/phy_common.c
drivers/net/wireless/b43/phy_common.h
drivers/net/wireless/b43/phy_n.c
drivers/net/wireless/b43/phy_n.h
drivers/net/wireless/b43/radio_2057.c [new file with mode: 0644]
drivers/net/wireless/b43/radio_2057.h [new file with mode: 0644]
drivers/net/wireless/b43/tables_nphy.c
drivers/net/wireless/b43/tables_nphy.h
drivers/net/wireless/b43legacy/main.c
drivers/net/wireless/brcm80211/Kconfig
drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
drivers/net/wireless/brcm80211/brcmfmac/dhd.h
drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
drivers/net/wireless/brcm80211/brcmfmac/usb.c
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
drivers/net/wireless/brcm80211/brcmsmac/main.c
drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
drivers/net/wireless/brcm80211/include/brcmu_wifi.h
drivers/net/wireless/hostap/hostap_info.c
drivers/net/wireless/hostap/hostap_ioctl.c
drivers/net/wireless/hostap/hostap_main.c
drivers/net/wireless/ipw2x00/ipw2100.c
drivers/net/wireless/ipw2x00/ipw2200.c
drivers/net/wireless/ipw2x00/libipw_wx.c
drivers/net/wireless/iwlegacy/3945-mac.c
drivers/net/wireless/iwlegacy/4965-mac.c
drivers/net/wireless/iwlegacy/4965.h
drivers/net/wireless/iwlegacy/common.c
drivers/net/wireless/iwlegacy/common.h
drivers/net/wireless/iwlwifi/dvm/agn.h
drivers/net/wireless/iwlwifi/dvm/commands.h
drivers/net/wireless/iwlwifi/dvm/debugfs.c
drivers/net/wireless/iwlwifi/dvm/dev.h
drivers/net/wireless/iwlwifi/dvm/mac80211.c
drivers/net/wireless/iwlwifi/dvm/main.c
drivers/net/wireless/iwlwifi/dvm/rx.c
drivers/net/wireless/iwlwifi/dvm/scan.c
drivers/net/wireless/iwlwifi/dvm/sta.c
drivers/net/wireless/iwlwifi/dvm/tx.c
drivers/net/wireless/iwlwifi/dvm/ucode.c
drivers/net/wireless/iwlwifi/iwl-devtrace.h
drivers/net/wireless/iwlwifi/iwl-drv.c
drivers/net/wireless/iwlwifi/iwl-drv.h
drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
drivers/net/wireless/iwlwifi/iwl-fw.h
drivers/net/wireless/iwlwifi/iwl-op-mode.h
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/rx.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/wireless/libertas/cmd.c
drivers/net/wireless/libertas/cmd.h
drivers/net/wireless/libertas/main.c
drivers/net/wireless/libertas_tf/main.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mwifiex/11n.c
drivers/net/wireless/mwifiex/11n.h
drivers/net/wireless/mwifiex/11n_aggr.c
drivers/net/wireless/mwifiex/11n_rxreorder.c
drivers/net/wireless/mwifiex/11n_rxreorder.h
drivers/net/wireless/mwifiex/Makefile
drivers/net/wireless/mwifiex/cfg80211.c
drivers/net/wireless/mwifiex/cmdevt.c
drivers/net/wireless/mwifiex/decl.h
drivers/net/wireless/mwifiex/fw.h
drivers/net/wireless/mwifiex/ie.c
drivers/net/wireless/mwifiex/init.c
drivers/net/wireless/mwifiex/ioctl.h
drivers/net/wireless/mwifiex/main.c
drivers/net/wireless/mwifiex/main.h
drivers/net/wireless/mwifiex/scan.c
drivers/net/wireless/mwifiex/sta_cmd.c
drivers/net/wireless/mwifiex/sta_cmdresp.c
drivers/net/wireless/mwifiex/sta_event.c
drivers/net/wireless/mwifiex/sta_ioctl.c
drivers/net/wireless/mwifiex/sta_rx.c
drivers/net/wireless/mwifiex/sta_tx.c
drivers/net/wireless/mwifiex/txrx.c
drivers/net/wireless/mwifiex/uap_cmd.c
drivers/net/wireless/mwifiex/uap_event.c [new file with mode: 0644]
drivers/net/wireless/mwifiex/uap_txrx.c [new file with mode: 0644]
drivers/net/wireless/mwifiex/util.c
drivers/net/wireless/mwifiex/wmm.c
drivers/net/wireless/mwl8k.c
drivers/net/wireless/orinoco/wext.c
drivers/net/wireless/p54/eeprom.c
drivers/net/wireless/p54/eeprom.h
drivers/net/wireless/p54/lmac.h
drivers/net/wireless/p54/main.c
drivers/net/wireless/p54/p54pci.c
drivers/net/wireless/p54/p54pci.h
drivers/net/wireless/p54/txrx.c
drivers/net/wireless/rndis_wlan.c
drivers/net/wireless/rt2x00/rt2400pci.c
drivers/net/wireless/rt2x00/rt2400pci.h
drivers/net/wireless/rt2x00/rt2500pci.c
drivers/net/wireless/rt2x00/rt2500pci.h
drivers/net/wireless/rt2x00/rt2500usb.c
drivers/net/wireless/rt2x00/rt2500usb.h
drivers/net/wireless/rt2x00/rt2800.h
drivers/net/wireless/rt2x00/rt2800lib.c
drivers/net/wireless/rt2x00/rt2800lib.h
drivers/net/wireless/rt2x00/rt2800pci.c
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/wireless/rt2x00/rt2x00.h
drivers/net/wireless/rt2x00/rt2x00dev.c
drivers/net/wireless/rt2x00/rt2x00mac.c
drivers/net/wireless/rt2x00/rt2x00queue.c
drivers/net/wireless/rt2x00/rt61pci.c
drivers/net/wireless/rt2x00/rt61pci.h
drivers/net/wireless/rt2x00/rt73usb.c
drivers/net/wireless/rt2x00/rt73usb.h
drivers/net/wireless/rtl818x/rtl8180/dev.c
drivers/net/wireless/rtl818x/rtl8187/dev.c
drivers/net/wireless/rtlwifi/Kconfig
drivers/net/wireless/rtlwifi/base.c
drivers/net/wireless/rtlwifi/core.c
drivers/net/wireless/rtlwifi/pci.c
drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
drivers/net/wireless/rtlwifi/rtl8192de/dm.c
drivers/net/wireless/rtlwifi/rtl8192de/fw.c
drivers/net/wireless/rtlwifi/rtl8192de/phy.c
drivers/net/wireless/rtlwifi/rtl8192de/trx.c
drivers/net/wireless/rtlwifi/rtl8192de/trx.h
drivers/net/wireless/rtlwifi/rtl8192se/trx.c
drivers/net/wireless/rtlwifi/rtl8192se/trx.h
drivers/net/wireless/rtlwifi/usb.c
drivers/net/wireless/rtlwifi/wifi.h
drivers/net/wireless/ti/wl1251/main.c
drivers/net/wireless/ti/wl12xx/main.c
drivers/net/wireless/ti/wl12xx/wl12xx.h
drivers/net/wireless/ti/wl18xx/debugfs.c
drivers/net/wireless/ti/wl18xx/main.c
drivers/net/wireless/ti/wl18xx/wl18xx.h
drivers/net/wireless/ti/wlcore/cmd.c
drivers/net/wireless/ti/wlcore/cmd.h
drivers/net/wireless/ti/wlcore/conf.h
drivers/net/wireless/ti/wlcore/debug.h
drivers/net/wireless/ti/wlcore/debugfs.c
drivers/net/wireless/ti/wlcore/init.c
drivers/net/wireless/ti/wlcore/io.h
drivers/net/wireless/ti/wlcore/main.c
drivers/net/wireless/ti/wlcore/ps.c
drivers/net/wireless/ti/wlcore/rx.c
drivers/net/wireless/ti/wlcore/scan.c
drivers/net/wireless/ti/wlcore/spi.c
drivers/net/wireless/ti/wlcore/testmode.c
drivers/net/wireless/ti/wlcore/tx.c
drivers/net/wireless/ti/wlcore/tx.h
drivers/net/wireless/ti/wlcore/wlcore.h
drivers/net/wireless/ti/wlcore/wlcore_i.h
drivers/net/wireless/wl3501_cs.c
drivers/net/wireless/zd1211rw/zd_mac.c
drivers/net/xen-netfront.c
drivers/nfc/Kconfig
drivers/nfc/Makefile
drivers/nfc/nfcwilink.c
drivers/nfc/pn533.c
drivers/nfc/pn544.c [deleted file]
drivers/nfc/pn544_hci.c
drivers/ptp/ptp_clock.c
drivers/ptp/ptp_ixp46x.c
drivers/ptp/ptp_pch.c
drivers/ptp/ptp_private.h
drivers/s390/net/ctcm_fsms.c
drivers/s390/net/ctcm_main.c
drivers/s390/net/lcs.c
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l3_main.c
drivers/scsi/scsi_netlink.c
drivers/scsi/scsi_transport_iscsi.c
drivers/ssb/driver_mipscore.c
drivers/staging/gdm72xx/netlink_k.c
drivers/staging/winbond/wbusb.c
firmware/Makefile
firmware/cxgb3/t3fw-7.10.0.bin.ihex [deleted file]
fs/dlm/netlink.c
include/linux/Kbuild
include/linux/bcma/bcma_driver_chipcommon.h
include/linux/bcma/bcma_regs.h
include/linux/etherdevice.h
include/linux/ethtool.h
include/linux/filter.h
include/linux/hash.h
include/linux/ieee80211.h
include/linux/if_arp.h
include/linux/if_link.h
include/linux/if_team.h
include/linux/if_tunnel.h
include/linux/if_vlan.h
include/linux/inetdevice.h
include/linux/ip6_tunnel.h
include/linux/ipv6.h
include/linux/jiffies.h
include/linux/mdio.h
include/linux/netdevice.h
include/linux/netfilter.h
include/linux/netfilter/ipset/ip_set.h
include/linux/netfilter/ipset/ip_set_ahash.h
include/linux/netfilter/nf_conntrack_amanda.h
include/linux/netfilter/nf_conntrack_ftp.h
include/linux/netfilter/nf_conntrack_h323.h
include/linux/netfilter/nf_conntrack_irc.h
include/linux/netfilter/nf_conntrack_pptp.h
include/linux/netfilter/nf_conntrack_sip.h
include/linux/netfilter/nf_nat.h
include/linux/netfilter/nfnetlink_conntrack.h
include/linux/netfilter/nfnetlink_queue.h
include/linux/netfilter/xt_time.h
include/linux/netfilter_ipv4.h
include/linux/netfilter_ipv6/Kbuild
include/linux/netfilter_ipv6/ip6t_NPT.h [new file with mode: 0644]
include/linux/netlink.h
include/linux/nfc.h
include/linux/nl80211.h
include/linux/of_mdio.h
include/linux/packet_diag.h [new file with mode: 0644]
include/linux/pps_kernel.h
include/linux/ptp_clock_kernel.h
include/linux/rfkill.h
include/linux/sched.h
include/linux/skbuff.h
include/linux/snmp.h
include/linux/ssb/ssb_driver_chipcommon.h
include/linux/stmmac.h
include/linux/tcp.h
include/linux/tcp_metrics.h [new file with mode: 0644]
include/linux/tipc_config.h
include/net/addrconf.h
include/net/arp.h
include/net/bluetooth/bluetooth.h
include/net/bluetooth/hci.h
include/net/bluetooth/hci_core.h
include/net/bluetooth/l2cap.h
include/net/bluetooth/mgmt.h
include/net/bluetooth/smp.h
include/net/cfg80211.h
include/net/checksum.h
include/net/dst.h
include/net/genetlink.h
include/net/gro_cells.h [new file with mode: 0644]
include/net/ieee80211_radiotap.h
include/net/inet_ecn.h
include/net/inet_frag.h
include/net/inet_sock.h
include/net/ip.h
include/net/ip6_fib.h
include/net/ip6_tunnel.h
include/net/ip_vs.h
include/net/ipip.h
include/net/ipv6.h
include/net/llc.h
include/net/mac80211.h
include/net/ndisc.h
include/net/neighbour.h
include/net/net_namespace.h
include/net/netfilter/nf_conntrack_ecache.h
include/net/netfilter/nf_conntrack_expect.h
include/net/netfilter/nf_conntrack_timeout.h
include/net/netfilter/nf_nat.h
include/net/netfilter/nf_nat_core.h
include/net/netfilter/nf_nat_helper.h
include/net/netfilter/nf_nat_l3proto.h [new file with mode: 0644]
include/net/netfilter/nf_nat_l4proto.h [new file with mode: 0644]
include/net/netfilter/nf_nat_protocol.h [deleted file]
include/net/netfilter/nf_nat_rule.h [deleted file]
include/net/netlink.h
include/net/netns/conntrack.h
include/net/netns/ipv4.h
include/net/netns/ipv6.h
include/net/netns/packet.h
include/net/netns/sctp.h [new file with mode: 0644]
include/net/nfc/hci.h
include/net/nfc/llc.h [new file with mode: 0644]
include/net/nfc/nci.h
include/net/nfc/nci_core.h
include/net/nfc/nfc.h
include/net/nfc/shdlc.h [deleted file]
include/net/request_sock.h
include/net/scm.h
include/net/sctp/sctp.h
include/net/sctp/sm.h
include/net/sctp/structs.h
include/net/snmp.h
include/net/sock.h
include/net/tcp.h
include/net/xfrm.h
include/scsi/scsi_netlink.h
kernel/audit.c
kernel/exit.c
kernel/fork.c
kernel/taskstats.c
lib/kobject_uevent.c
lib/nlattr.c
net/8021q/vlan_core.c
net/Kconfig
net/atm/resources.c
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/bridge_loop_avoidance.h
net/batman-adv/debugfs.c
net/batman-adv/gateway_client.c
net/batman-adv/hard-interface.c
net/batman-adv/main.c
net/batman-adv/main.h
net/batman-adv/packet.h
net/batman-adv/routing.c
net/batman-adv/send.c
net/batman-adv/soft-interface.c
net/batman-adv/soft-interface.h
net/batman-adv/translation-table.c
net/batman-adv/translation-table.h
net/batman-adv/types.h
net/batman-adv/unicast.c
net/batman-adv/vis.c
net/batman-adv/vis.h
net/bluetooth/a2mp.c
net/bluetooth/af_bluetooth.c
net/bluetooth/bnep/sock.c
net/bluetooth/cmtp/sock.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bluetooth/hci_sock.c
net/bluetooth/hidp/sock.c
net/bluetooth/l2cap_core.c
net/bluetooth/l2cap_sock.c
net/bluetooth/mgmt.c
net/bluetooth/rfcomm/sock.c
net/bluetooth/sco.c
net/bridge/br_fdb.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/bridge/br_stp_timer.c
net/bridge/netfilter/ebt_ulog.c
net/bridge/netfilter/ebtable_filter.c
net/bridge/netfilter/ebtable_nat.c
net/can/gw.c
net/core/dev.c
net/core/dev_addr_lists.c
net/core/dst.c
net/core/ethtool.c
net/core/fib_rules.c
net/core/filter.c
net/core/link_watch.c
net/core/neighbour.c
net/core/net-sysfs.c
net/core/netpoll.c
net/core/netprio_cgroup.c
net/core/request_sock.c
net/core/rtnetlink.c
net/core/scm.c
net/core/secure_seq.c
net/core/skbuff.c
net/core/sock.c
net/core/sock_diag.c
net/core/utils.c
net/dcb/dcbnl.c
net/decnet/af_decnet.c
net/decnet/dn_dev.c
net/decnet/dn_route.c
net/decnet/dn_table.c
net/decnet/netfilter/dn_rtmsg.c
net/ieee802154/6lowpan.c
net/ieee802154/nl-mac.c
net/ieee802154/nl-phy.c
net/ipv4/af_inet.c
net/ipv4/devinet.c
net/ipv4/fib_frontend.c
net/ipv4/fib_semantics.c
net/ipv4/fib_trie.c
net/ipv4/igmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_diag.c
net/ipv4/inet_fragment.c
net/ipv4/ip_fragment.c
net/ipv4/ip_gre.c
net/ipv4/ip_output.c
net/ipv4/ip_vti.c
net/ipv4/ipconfig.c
net/ipv4/ipip.c
net/ipv4/ipmr.c
net/ipv4/netfilter.c
net/ipv4/netfilter/Kconfig
net/ipv4/netfilter/Makefile
net/ipv4/netfilter/ipt_MASQUERADE.c
net/ipv4/netfilter/ipt_NETMAP.c [deleted file]
net/ipv4/netfilter/ipt_REDIRECT.c [deleted file]
net/ipv4/netfilter/ipt_ULOG.c
net/ipv4/netfilter/ipt_rpfilter.c
net/ipv4/netfilter/iptable_filter.c
net/ipv4/netfilter/iptable_mangle.c
net/ipv4/netfilter/iptable_nat.c [moved from net/ipv4/netfilter/nf_nat_standalone.c with 52% similarity]
net/ipv4/netfilter/iptable_raw.c
net/ipv4/netfilter/iptable_security.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
net/ipv4/netfilter/nf_nat_h323.c
net/ipv4/netfilter/nf_nat_l3proto_ipv4.c [new file with mode: 0644]
net/ipv4/netfilter/nf_nat_pptp.c
net/ipv4/netfilter/nf_nat_proto_gre.c
net/ipv4/netfilter/nf_nat_proto_icmp.c
net/ipv4/netfilter/nf_nat_rule.c [deleted file]
net/ipv4/proc.c
net/ipv4/route.c
net/ipv4/syncookies.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_metrics.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c
net/ipv4/udp_diag.c
net/ipv6/Kconfig
net/ipv6/Makefile
net/ipv6/addrconf.c
net/ipv6/addrlabel.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c [new file with mode: 0644]
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6mr.c
net/ipv6/netfilter.c
net/ipv6/netfilter/Kconfig
net/ipv6/netfilter/Makefile
net/ipv6/netfilter/ip6t_MASQUERADE.c [new file with mode: 0644]
net/ipv6/netfilter/ip6t_NPT.c [new file with mode: 0644]
net/ipv6/netfilter/ip6table_filter.c
net/ipv6/netfilter/ip6table_mangle.c
net/ipv6/netfilter/ip6table_nat.c [new file with mode: 0644]
net/ipv6/netfilter/ip6table_raw.c
net/ipv6/netfilter/ip6table_security.c
net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/netfilter/nf_nat_l3proto_ipv6.c [new file with mode: 0644]
net/ipv6/netfilter/nf_nat_proto_icmpv6.c [new file with mode: 0644]
net/ipv6/reassembly.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/syncookies.c
net/ipv6/tcp_ipv6.c
net/irda/irnetlink.c
net/key/af_key.c
net/l2tp/Kconfig
net/l2tp/l2tp_eth.c
net/l2tp/l2tp_netlink.c
net/llc/llc_station.c
net/llc/sysctl_net_llc.c
net/mac80211/aes_cmac.c
net/mac80211/agg-tx.c
net/mac80211/cfg.c
net/mac80211/chan.c
net/mac80211/debugfs.c
net/mac80211/driver-ops.h
net/mac80211/ibss.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/key.c
net/mac80211/main.c
net/mac80211/mesh.c
net/mac80211/mesh.h
net/mac80211/mesh_hwmp.c
net/mac80211/mesh_pathtbl.c
net/mac80211/mesh_plink.c
net/mac80211/mlme.c
net/mac80211/offchannel.c
net/mac80211/rate.h
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mac80211/status.c
net/mac80211/trace.h
net/mac80211/tx.c
net/mac80211/util.c
net/netfilter/Kconfig
net/netfilter/Makefile
net/netfilter/core.c
net/netfilter/ipset/ip_set_bitmap_ip.c
net/netfilter/ipset/ip_set_bitmap_ipmac.c
net/netfilter/ipset/ip_set_bitmap_port.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipset/ip_set_hash_ip.c
net/netfilter/ipset/ip_set_hash_ipport.c
net/netfilter/ipset/ip_set_hash_ipportip.c
net/netfilter/ipset/ip_set_hash_ipportnet.c
net/netfilter/ipset/ip_set_hash_net.c
net/netfilter/ipset/ip_set_hash_netiface.c
net/netfilter/ipset/ip_set_hash_netport.c
net/netfilter/ipset/ip_set_list_set.c
net/netfilter/ipvs/Kconfig
net/netfilter/ipvs/ip_vs_app.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_ftp.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_conntrack_amanda.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_ecache.c
net/netfilter/nf_conntrack_ftp.c
net/netfilter/nf_conntrack_h323_main.c
net/netfilter/nf_conntrack_irc.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_pptp.c
net/netfilter/nf_conntrack_proto.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_conntrack_sip.c
net/netfilter/nf_internals.h
net/netfilter/nf_nat_amanda.c [moved from net/ipv4/netfilter/nf_nat_amanda.c with 96% similarity]
net/netfilter/nf_nat_core.c [moved from net/ipv4/netfilter/nf_nat_core.c with 51% similarity]
net/netfilter/nf_nat_ftp.c [moved from net/ipv4/netfilter/nf_nat_ftp.c with 79% similarity]
net/netfilter/nf_nat_helper.c [moved from net/ipv4/netfilter/nf_nat_helper.c with 82% similarity]
net/netfilter/nf_nat_irc.c [moved from net/ipv4/netfilter/nf_nat_irc.c with 89% similarity]
net/netfilter/nf_nat_proto_common.c [moved from net/ipv4/netfilter/nf_nat_proto_common.c with 62% similarity]
net/netfilter/nf_nat_proto_dccp.c [moved from net/ipv4/netfilter/nf_nat_proto_dccp.c with 61% similarity]
net/netfilter/nf_nat_proto_sctp.c [moved from net/ipv4/netfilter/nf_nat_proto_sctp.c with 61% similarity]
net/netfilter/nf_nat_proto_tcp.c [moved from net/ipv4/netfilter/nf_nat_proto_tcp.c with 65% similarity]
net/netfilter/nf_nat_proto_udp.c [moved from net/ipv4/netfilter/nf_nat_proto_udp.c with 60% similarity]
net/netfilter/nf_nat_proto_udplite.c [moved from net/ipv4/netfilter/nf_nat_proto_udplite.c with 58% similarity]
net/netfilter/nf_nat_proto_unknown.c [moved from net/ipv4/netfilter/nf_nat_proto_unknown.c with 76% similarity]
net/netfilter/nf_nat_sip.c [moved from net/ipv4/netfilter/nf_nat_sip.c with 62% similarity]
net/netfilter/nf_nat_tftp.c [moved from net/ipv4/netfilter/nf_nat_tftp.c with 97% similarity]
net/netfilter/nf_queue.c
net/netfilter/nfnetlink.c
net/netfilter/nfnetlink_acct.c
net/netfilter/nfnetlink_cthelper.c
net/netfilter/nfnetlink_cttimeout.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue_core.c
net/netfilter/xt_CT.c
net/netfilter/xt_NETMAP.c [new file with mode: 0644]
net/netfilter/xt_NFQUEUE.c
net/netfilter/xt_NOTRACK.c [deleted file]
net/netfilter/xt_REDIRECT.c [new file with mode: 0644]
net/netfilter/xt_nat.c [new file with mode: 0644]
net/netfilter/xt_osf.c
net/netfilter/xt_set.c
net/netfilter/xt_socket.c
net/netfilter/xt_time.c
net/netlabel/netlabel_cipso_v4.c
net/netlabel/netlabel_mgmt.c
net/netlabel/netlabel_unlabeled.c
net/netlink/af_netlink.c
net/netlink/genetlink.c
net/nfc/core.c
net/nfc/hci/Makefile
net/nfc/hci/command.c
net/nfc/hci/core.c
net/nfc/hci/hci.h
net/nfc/hci/hcp.c
net/nfc/hci/llc.c [new file with mode: 0644]
net/nfc/hci/llc.h [new file with mode: 0644]
net/nfc/hci/llc_nop.c [new file with mode: 0644]
net/nfc/hci/llc_shdlc.c [moved from net/nfc/hci/shdlc.c with 54% similarity]
net/nfc/llcp/commands.c
net/nfc/llcp/llcp.c
net/nfc/llcp/llcp.h
net/nfc/llcp/sock.c
net/nfc/nci/core.c
net/nfc/nci/ntf.c
net/nfc/nci/rsp.c
net/nfc/netlink.c
net/openvswitch/actions.c
net/openvswitch/datapath.c
net/openvswitch/datapath.h
net/openvswitch/dp_notify.c
net/openvswitch/flow.c
net/openvswitch/flow.h
net/openvswitch/vport-internal_dev.c
net/openvswitch/vport-netdev.c
net/openvswitch/vport.c
net/openvswitch/vport.h
net/packet/Kconfig
net/packet/Makefile
net/packet/af_packet.c
net/packet/diag.c [new file with mode: 0644]
net/packet/internal.h [new file with mode: 0644]
net/phonet/pn_netlink.c
net/rds/tcp_connect.c
net/rds/tcp_listen.c
net/rds/tcp_recv.c
net/rds/tcp_send.c
net/rfkill/core.c
net/sched/act_api.c
net/sched/cls_api.c
net/sched/em_meta.c
net/sched/sch_api.c
net/sched/sch_drr.c
net/sched/sch_generic.c
net/sched/sch_qfq.c
net/sctp/associola.c
net/sctp/auth.c
net/sctp/bind_addr.c
net/sctp/chunk.c
net/sctp/endpointola.c
net/sctp/input.c
net/sctp/ipv6.c
net/sctp/objcnt.c
net/sctp/output.c
net/sctp/outqueue.c
net/sctp/primitive.c
net/sctp/proc.c
net/sctp/protocol.c
net/sctp/sm_make_chunk.c
net/sctp/sm_sideeffect.c
net/sctp/sm_statefuns.c
net/sctp/sm_statetable.c
net/sctp/socket.c
net/sctp/sysctl.c
net/sctp/transport.c
net/sctp/ulpqueue.c
net/socket.c
net/tipc/bearer.c
net/tipc/config.c
net/tipc/core.c
net/tipc/core.h
net/tipc/eth_media.c
net/tipc/handler.c
net/tipc/link.c
net/tipc/name_table.c
net/tipc/net.c
net/tipc/net.h
net/tipc/netlink.c
net/tipc/subscr.c
net/unix/af_unix.c
net/unix/diag.c
net/wireless/chan.c
net/wireless/core.c
net/wireless/core.h
net/wireless/mlme.c
net/wireless/nl80211.c
net/wireless/nl80211.h
net/wireless/radiotap.c
net/wireless/reg.c
net/wireless/scan.c
net/wireless/util.c
net/wireless/wext-core.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
security/selinux/netlink.c

index d40d2b55050239cd2669095ab909f287208f8f24..05aeedf177946a9f9eb99a9f6b4b26a488f2f74a 100644 (file)
@@ -19,7 +19,11 @@ Date:                September 2010
 Contact:       Richard Cochran <richardcochran@gmail.com>
 Description:
                This file contains the name of the PTP hardware clock
-               as a human readable string.
+               as a human readable string. The purpose of this
+               attribute is to provide the user with a "friendly
+               name" and to help distinguish PHY based devices from
+               MAC based ones. The string does not necessarily have
+               to be any kind of unique id.
 
 What:          /sys/class/ptp/ptpN/max_adjustment
 Date:          September 2010
diff --git a/Documentation/devicetree/bindings/net/can/c_can.txt b/Documentation/devicetree/bindings/net/can/c_can.txt
new file mode 100644 (file)
index 0000000..8f1ae81
--- /dev/null
@@ -0,0 +1,49 @@
+Bosch C_CAN/D_CAN controller Device Tree Bindings
+-------------------------------------------------
+
+Required properties:
+- compatible           : Should be "bosch,c_can" for C_CAN controllers and
+                         "bosch,d_can" for D_CAN controllers.
+- reg                  : physical base address and size of the C_CAN/D_CAN
+                         registers map
+- interrupts           : property with a value describing the interrupt
+                         number
+
+Optional properties:
+- ti,hwmods            : Must be "d_can<n>" or "c_can<n>", n being the
+                         instance number
+
+Note: "ti,hwmods" field is used to fetch the base address and irq
+resources from TI, omap hwmod data base during device registration.
+Future plan is to migrate hwmod data base contents into device tree
+blob so that, all the required data will be used from device tree dts
+file.
+
+Example:
+
+Step1: SoC common .dtsi file
+
+       dcan1: d_can@481d0000 {
+               compatible = "bosch,d_can";
+               reg = <0x481d0000 0x2000>;
+               interrupts = <55>;
+               interrupt-parent = <&intc>;
+               status = "disabled";
+       };
+
+(or)
+
+       dcan1: d_can@481d0000 {
+               compatible = "bosch,d_can";
+               ti,hwmods = "d_can1";
+               reg = <0x481d0000 0x2000>;
+               interrupts = <55>;
+               interrupt-parent = <&intc>;
+               status = "disabled";
+       };
+
+Step 2: board specific .dts file
+
+       &dcan1 {
+               status = "okay";
+       };
diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt
new file mode 100644 (file)
index 0000000..dcaabe9
--- /dev/null
@@ -0,0 +1,109 @@
+TI SoC Ethernet Switch Controller Device Tree Bindings
+------------------------------------------------------
+
+Required properties:
+- compatible           : Should be "ti,cpsw"
+- reg                  : physical base address and size of the cpsw
+                         registers map
+- interrupts           : property with a value describing the interrupt
+                         number
+- interrupt-parent     : The parent interrupt controller
+- cpdma_channels       : Specifies number of channels in CPDMA
+- host_port_no         : Specifies host port shift
+- cpdma_reg_ofs                : Specifies CPDMA submodule register offset
+- cpdma_sram_ofs       : Specifies CPDMA SRAM offset
+- ale_reg_ofs          : Specifies ALE submodule register offset
+- ale_entries          : Specifies No of entries ALE can hold
+- host_port_reg_ofs    : Specifies host port register offset
+- hw_stats_reg_ofs     : Specifies hardware statistics register offset
+- bd_ram_ofs           : Specifies internal desciptor RAM offset
+- bd_ram_size          : Specifies internal descriptor RAM size
+- rx_descs             : Specifies number of Rx descriptors
+- mac_control          : Specifies Default MAC control register content
+                         for the specific platform
+- slaves               : Specifies number for slaves
+- slave_reg_ofs                : Specifies slave register offset
+- sliver_reg_ofs       : Specifies slave sliver register offset
+- phy_id               : Specifies slave phy id
+- mac-address          : Specifies slave MAC address
+
+Optional properties:
+- ti,hwmods            : Must be "cpgmac0"
+- no_bd_ram            : Must be 0 or 1
+
+Note: "ti,hwmods" field is used to fetch the base address and irq
+resources from TI, omap hwmod data base during device registration.
+Future plan is to migrate hwmod data base contents into device tree
+blob so that, all the required data will be used from device tree dts
+file.
+
+Examples:
+
+       mac: ethernet@4A100000 {
+               compatible = "ti,cpsw";
+               reg = <0x4A100000 0x1000>;
+               interrupts = <55 0x4>;
+               interrupt-parent = <&intc>;
+               cpdma_channels = <8>;
+               host_port_no = <0>;
+               cpdma_reg_ofs = <0x800>;
+               cpdma_sram_ofs = <0xa00>;
+               ale_reg_ofs = <0xd00>;
+               ale_entries = <1024>;
+               host_port_reg_ofs = <0x108>;
+               hw_stats_reg_ofs = <0x900>;
+               bd_ram_ofs = <0x2000>;
+               bd_ram_size = <0x2000>;
+               no_bd_ram = <0>;
+               rx_descs = <64>;
+               mac_control = <0x20>;
+               slaves = <2>;
+               cpsw_emac0: slave@0 {
+                       slave_reg_ofs = <0x208>;
+                       sliver_reg_ofs = <0xd80>;
+                       phy_id = "davinci_mdio.16:00";
+                       /* Filled in by U-Boot */
+                       mac-address = [ 00 00 00 00 00 00 ];
+               };
+               cpsw_emac1: slave@1 {
+                       slave_reg_ofs = <0x308>;
+                       sliver_reg_ofs = <0xdc0>;
+                       phy_id = "davinci_mdio.16:01";
+                       /* Filled in by U-Boot */
+                       mac-address = [ 00 00 00 00 00 00 ];
+               };
+       };
+
+(or)
+       mac: ethernet@4A100000 {
+               compatible = "ti,cpsw";
+               ti,hwmods = "cpgmac0";
+               cpdma_channels = <8>;
+               host_port_no = <0>;
+               cpdma_reg_ofs = <0x800>;
+               cpdma_sram_ofs = <0xa00>;
+               ale_reg_ofs = <0xd00>;
+               ale_entries = <1024>;
+               host_port_reg_ofs = <0x108>;
+               hw_stats_reg_ofs = <0x900>;
+               bd_ram_ofs = <0x2000>;
+               bd_ram_size = <0x2000>;
+               no_bd_ram = <0>;
+               rx_descs = <64>;
+               mac_control = <0x20>;
+               slaves = <2>;
+               cpsw_emac0: slave@0 {
+                       slave_reg_ofs = <0x208>;
+                       sliver_reg_ofs = <0xd80>;
+                       phy_id = "davinci_mdio.16:00";
+                       /* Filled in by U-Boot */
+                       mac-address = [ 00 00 00 00 00 00 ];
+               };
+               cpsw_emac1: slave@1 {
+                       slave_reg_ofs = <0x308>;
+                       sliver_reg_ofs = <0xdc0>;
+                       phy_id = "davinci_mdio.16:01";
+                       /* Filled in by U-Boot */
+                       mac-address = [ 00 00 00 00 00 00 ];
+               };
+       };
diff --git a/Documentation/devicetree/bindings/net/davinci-mdio.txt b/Documentation/devicetree/bindings/net/davinci-mdio.txt
new file mode 100644 (file)
index 0000000..72efaaf
--- /dev/null
@@ -0,0 +1,33 @@
+TI SoC Davinci MDIO Controller Device Tree Bindings
+---------------------------------------------------
+
+Required properties:
+- compatible           : Should be "ti,davinci_mdio"
+- reg                  : physical base address and size of the davinci mdio
+                         registers map
+- bus_freq             : Mdio Bus frequency
+
+Optional properties:
+- ti,hwmods            : Must be "davinci_mdio"
+
+Note: "ti,hwmods" field is used to fetch the base address and irq
+resources from TI, omap hwmod data base during device registration.
+Future plan is to migrate hwmod data base contents into device tree
+blob so that, all the required data will be used from device tree dts
+file.
+
+Examples:
+
+       mdio: davinci_mdio@4A101000 {
+               compatible = "ti,cpsw";
+               reg = <0x4A101000 0x1000>;
+               bus_freq = <1000000>;
+       };
+
+(or)
+
+       mdio: davinci_mdio@4A101000 {
+               compatible = "ti,cpsw";
+               ti,hwmods = "davinci_mdio";
+               bus_freq = <1000000>;
+       };
diff --git a/Documentation/devicetree/bindings/net/mdio-mux-mmioreg.txt b/Documentation/devicetree/bindings/net/mdio-mux-mmioreg.txt
new file mode 100644 (file)
index 0000000..8516929
--- /dev/null
@@ -0,0 +1,75 @@
+Properties for an MDIO bus multiplexer controlled by a memory-mapped device
+
+This is a special case of a MDIO bus multiplexer.  A memory-mapped device,
+like an FPGA, is used to control which child bus is connected.  The mdio-mux
+node must be a child of the memory-mapped device.  The driver currently only
+supports devices with eight-bit registers.
+
+Required properties in addition to the generic multiplexer properties:
+
+- compatible : string, must contain "mdio-mux-mmioreg"
+
+- reg : integer, contains the offset of the register that controls the bus
+       multiplexer.  The size field in the 'reg' property is the size of
+       register, and must therefore be 1.
+
+- mux-mask : integer, contains an eight-bit mask that specifies which
+       bits in the register control the actual bus multiplexer.  The
+       'reg' property of each child mdio-mux node must be constrained by
+       this mask.
+
+Example:
+
+The FPGA node defines a memory-mapped FPGA with a register space of 0x30 bytes.
+For the "EMI2" MDIO bus, register 9 (BRDCFG1) controls the mux on that bus.
+A bitmask of 0x6 means that bits 1 and 2 (bit 0 is lsb) are the bits on
+BRDCFG1 that control the actual mux.
+
+       /* The FPGA node */
+       fpga: board-control@3,0 {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               compatible = "fsl,p5020ds-fpga", "fsl,fpga-ngpixis";
+               reg = <3 0 0x30>;
+               ranges = <0 3 0 0x30>;
+
+               mdio-mux-emi2 {
+                       compatible = "mdio-mux-mmioreg", "mdio-mux";
+                       mdio-parent-bus = <&xmdio0>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <9 1>; // BRDCFG1
+                       mux-mask = <0x6>; // EMI2
+
+                       emi2_slot1: mdio@0 {    // Slot 1 XAUI (FM2)
+                               reg = <0>;
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+
+                               phy_xgmii_slot1: ethernet-phy@0 {
+                                       compatible = "ethernet-phy-ieee802.3-c45";
+                                       reg = <4>;
+                               };
+                       };
+
+                       emi2_slot2: mdio@2 {    // Slot 2 XAUI (FM1)
+                               reg = <2>;
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+
+                               phy_xgmii_slot2: ethernet-phy@4 {
+                                       compatible = "ethernet-phy-ieee802.3-c45";
+                                       reg = <0>;
+                               };
+                       };
+               };
+       };
+
+       /* The parent MDIO bus. */
+       xmdio0: mdio@f1000 {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               compatible = "fsl,fman-xmdio";
+               reg = <0xf1000 0x1000>;
+               interrupts = <100 1 0 0>;
+       };
index ffdd9d866ad76cb4bb21553e238a45c8fd3d3c7d..2d66ed688125f894bae223c7e50fa85327ade534 100644 (file)
@@ -78,7 +78,8 @@ nfsroot=[<server-ip>:]<root-dir>[,<nfs-options>]
                        flags           = hard, nointr, noposix, cto, ac
 
 
-ip=<client-ip>:<server-ip>:<gw-ip>:<netmask>:<hostname>:<device>:<autoconf>
+ip=<client-ip>:<server-ip>:<gw-ip>:<netmask>:<hostname>:<device>:<autoconf>:
+   <dns0-ip>:<dns1-ip>
 
   This parameter tells the kernel how to configure IP addresses of devices
   and also how to set up the IP routing table. It was originally called
@@ -158,6 +159,13 @@ ip=<client-ip>:<server-ip>:<gw-ip>:<netmask>:<hostname>:<device>:<autoconf>
 
                 Default: any
 
+  <dns0-ip>    IP address of first nameserver.
+               Value gets exported by /proc/net/pnp which is often linked
+               on embedded systems by /etc/resolv.conf.
+
+  <dns1-ip>    IP address of secound nameserver.
+               Same as above.
+
 
 nfsrootdebug
 
index 64eeb55d0c09d05f6ec5788a150ddcf2119fdfd8..f2cfe265e836e082727a4c5004d1fed264d5d526 100644 (file)
@@ -24,6 +24,9 @@ Partitions and P_Keys
   The P_Key for any interface is given by the "pkey" file, and the
   main interface for a subinterface is in "parent."
 
+  Child interface create/delete can also be done using IPoIB's
+  rtnl_link_ops, where childs created using either way behave the same.
+
 Datagram vs Connected modes
 
   The IPoIB driver supports two modes of operation: datagram and
index 8f3ae4a6147e2c114849296e37809317671db057..a173d2a879f5cf7619b221148edbbac4789bbe8a 100644 (file)
@@ -75,9 +75,10 @@ folder:
 
 There is a special folder for debugging information:
 
-#  ls /sys/kernel/debug/batman_adv/bat0/
-# bla_claim_table    log                socket             transtable_local
-# gateways           originators        transtable_global  vis_data
+# ls /sys/kernel/debug/batman_adv/bat0/
+# bla_backbone_table  log                 transtable_global
+# bla_claim_table     originators         transtable_local
+# gateways            socket              vis_data
 
 Some of the files contain all sort of status information  regard-
 ing  the  mesh  network.  For  example, you can view the table of
index 6b1c7110534e31bb2e04acd1505a07f6dd4db481..10a015c384b844b946ae8d7b71b81abc7fd33c79 100644 (file)
@@ -752,12 +752,22 @@ xmit_hash_policy
                protocol information to generate the hash.
 
                Uses XOR of hardware MAC addresses and IP addresses to
-               generate the hash.  The formula is
+               generate the hash.  The IPv4 formula is
 
                (((source IP XOR dest IP) AND 0xffff) XOR
                        ( source MAC XOR destination MAC ))
                                modulo slave count
 
+               The IPv6 formula is
+
+               hash = (source ip quad 2 XOR dest IP quad 2) XOR
+                      (source ip quad 3 XOR dest IP quad 3) XOR
+                      (source ip quad 4 XOR dest IP quad 4)
+
+               (((hash >> 24) XOR (hash >> 16) XOR (hash >> 8) XOR hash)
+                       XOR (source MAC XOR destination MAC))
+                               modulo slave count
+
                This algorithm will place all traffic to a particular
                network peer on the same slave.  For non-IP traffic,
                the formula is the same as for the layer2 transmit
@@ -778,19 +788,29 @@ xmit_hash_policy
                slaves, although a single connection will not span
                multiple slaves.
 
-               The formula for unfragmented TCP and UDP packets is
+               The formula for unfragmented IPv4 TCP and UDP packets is
 
                ((source port XOR dest port) XOR
                         ((source IP XOR dest IP) AND 0xffff)
                                modulo slave count
 
-               For fragmented TCP or UDP packets and all other IP
-               protocol traffic, the source and destination port
+               The formula for unfragmented IPv6 TCP and UDP packets is
+
+               hash = (source port XOR dest port) XOR
+                      ((source ip quad 2 XOR dest IP quad 2) XOR
+                       (source ip quad 3 XOR dest IP quad 3) XOR
+                       (source ip quad 4 XOR dest IP quad 4))
+
+               ((hash >> 24) XOR (hash >> 16) XOR (hash >> 8) XOR hash)
+                       modulo slave count
+
+               For fragmented TCP or UDP packets and all other IPv4 and
+               IPv6 protocol traffic, the source and destination port
                information is omitted.  For non-IP traffic, the
                formula is the same as for the layer2 transmit hash
                policy.
 
-               This policy is intended to mimic the behavior of
+               The IPv4 policy is intended to mimic the behavior of
                certain switches, notably Cisco switches with PFC2 as
                well as some Foundry and IBM products.
 
index ca447b35b8333106cdd19649d943d80cdb12cc1e..c7fc10724948629c98100b19250f8ee07d9dcd73 100644 (file)
@@ -439,7 +439,9 @@ tcp_stdurg - BOOLEAN
 tcp_synack_retries - INTEGER
        Number of times SYNACKs for a passive TCP connection attempt will
        be retransmitted. Should not be higher than 255. Default value
-       is 5, which corresponds to ~180seconds.
+       is 5, which corresponds to 31seconds till the last retransmission
+       with the current initial RTO of 1second. With this the final timeout
+       for a passive TCP connection will happen after 63seconds.
 
 tcp_syncookies - BOOLEAN
        Only valid when the kernel was compiled with CONFIG_SYNCOOKIES
@@ -465,20 +467,37 @@ tcp_syncookies - BOOLEAN
 tcp_fastopen - INTEGER
        Enable TCP Fast Open feature (draft-ietf-tcpm-fastopen) to send data
        in the opening SYN packet. To use this feature, the client application
-       must not use connect(). Instead, it should use sendmsg() or sendto()
-       with MSG_FASTOPEN flag which performs a TCP handshake automatically.
-
-       The values (bitmap) are:
-       1: Enables sending data in the opening SYN on the client
-       5: Enables sending data in the opening SYN on the client regardless
-          of cookie availability.
+       must use sendmsg() or sendto() with MSG_FASTOPEN flag rather than
+       connect() to perform a TCP handshake automatically.
+
+       The values (bitmap) are
+       1: Enables sending data in the opening SYN on the client.
+       2: Enables TCP Fast Open on the server side, i.e., allowing data in
+          a SYN packet to be accepted and passed to the application before
+          3-way hand shake finishes.
+       4: Send data in the opening SYN regardless of cookie availability and
+          without a cookie option.
+       0x100: Accept SYN data w/o validating the cookie.
+       0x200: Accept data-in-SYN w/o any cookie option present.
+       0x400/0x800: Enable Fast Open on all listeners regardless of the
+          TCP_FASTOPEN socket option. The two different flags designate two
+          different ways of setting max_qlen without the TCP_FASTOPEN socket
+          option.
 
        Default: 0
 
+       Note that the client & server side Fast Open flags (1 and 2
+       respectively) must be also enabled before the rest of flags can take
+       effect.
+
+       See include/net/tcp.h and the code for more details.
+
 tcp_syn_retries - INTEGER
        Number of times initial SYNs for an active TCP connection attempt
        will be retransmitted. Should not be higher than 255. Default value
-       is 5, which corresponds to ~180seconds.
+       is 6, which corresponds to 63seconds till the last restransmission
+       with the current initial RTO of 1second. With this the final timeout
+       for an active TCP connection attempt will happen after 127seconds.
 
 tcp_timestamps - BOOLEAN
        Enable timestamps as defined in RFC1323.
index c676b9cedbd0d90a802ce82c09e49a219560570c..ef9ee71b4d7fcc71a38dc94454828e74a606cded 100644 (file)
@@ -173,7 +173,6 @@ Where:
 For MDIO bus The we have:
 
  struct stmmac_mdio_bus_data {
-       int bus_id;
        int (*phy_reset)(void *priv);
        unsigned int phy_mask;
        int *irqs;
@@ -181,7 +180,6 @@ For MDIO bus The we have:
  };
 
 Where:
- o bus_id: bus identifier;
  o phy_reset: hook to reset the phy device attached to the bus.
  o phy_mask: phy mask passed when register the MDIO bus within the driver.
  o irqs: list of IRQs, one per PHY.
@@ -230,9 +228,6 @@ there are two MAC cores: one MAC is for MDIO Bus/PHY emulation
 with fixed_link support.
 
 static struct stmmac_mdio_bus_data stmmac1_mdio_bus = {
-       .bus_id = 1,
-               |
-               |-> phy device on the bus_id 1
        .phy_reset = phy_reset;
                |
                |-> function to provide the phy_reset on this board
diff --git a/Documentation/networking/vxlan.txt b/Documentation/networking/vxlan.txt
new file mode 100644 (file)
index 0000000..5b34b76
--- /dev/null
@@ -0,0 +1,47 @@
+Virtual eXtensible Local Area Networking documentation
+======================================================
+
+The VXLAN protocol is a tunnelling protocol that is designed to
+solve the problem of limited number of available VLAN's (4096).
+With VXLAN identifier is expanded to 24 bits.
+
+It is a draft RFC standard, that is implemented by Cisco Nexus,
+Vmware and Brocade. The protocol runs over UDP using a single
+destination port (still not standardized by IANA).
+This document describes the Linux kernel tunnel device,
+there is also an implantation of VXLAN for Openvswitch.
+
+Unlike most tunnels, a VXLAN is a 1 to N network, not just point
+to point. A VXLAN device can either dynamically learn the IP address
+of the other end, in a manner similar to a learning bridge, or the
+forwarding entries can be configured statically.
+
+The management of vxlan is done in a similar fashion to it's
+too closest neighbors GRE and VLAN. Configuring VXLAN requires
+the version of iproute2 that matches the kernel release
+where VXLAN was first merged upstream.
+
+1. Create vxlan device
+  # ip li add vxlan0 type vxlan id 42 group 239.1.1.1 dev eth1
+
+This creates a new device (vxlan0). The device uses the
+the multicast group 239.1.1.1 over eth1 to handle packets where
+no entry is in the forwarding table.
+
+2. Delete vxlan device
+  # ip link delete vxlan0
+
+3. Show vxlan info
+  # ip -d show vxlan0
+
+It is possible to create, destroy and display the vxlan
+forwarding table using the new bridge command.
+
+1. Create forwarding table entry
+  # bridge fdb add to 00:17:42:8a:b4:05 dst 192.19.0.2 dev vxlan0
+
+2. Delete forwarding table entry
+  # bridge fdb delete 00:17:42:8a:b4:05
+
+3. Show forwarding table
+  # bridge fdb show dev vxlan0
index 8c22b7f6f41ab3bef0007731e277adf1be252aa9..78336396a432e0a076a9cda21a60cc483c33497e 100644 (file)
@@ -4806,6 +4806,7 @@ M:        Lauro Ramos Venancio <lauro.venancio@openbossa.org>
 M:     Aloisio Almeida Jr <aloisio.almeida@openbossa.org>
 M:     Samuel Ortiz <sameo@linux.intel.com>
 L:     linux-wireless@vger.kernel.org
+L:     linux-nfc@lists.01.org (moderated for non-subscribers)
 S:     Maintained
 F:     net/nfc/
 F:     include/linux/nfc.h
index e93fdae10b2313d1034647e422411f8f7eb12e40..90d3109c82f402df0356d43be23d9d965af68630 100644 (file)
@@ -67,7 +67,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
index 66b26c1e848c5cc7a714c8a989088455e2cf8a64..8f4f657fdbc67987daf0bcba1948390e43f6c163 100644 (file)
@@ -67,7 +67,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
index 1513325159802ee618a518975cb74b895ab324e9..4571d33903fed1c1a1cc75f165e0c250e598183b 100644 (file)
@@ -65,7 +65,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
index 67bb6fc117f4fbb01f839a9d0a3b4aa43dbe3d62..12f211733ba02e8f58aa121e777b4b99c7cd6631 100644 (file)
@@ -65,7 +65,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
index 3e35ce5fa467cc1cbc34222df58a12f65c132601..215389a5407fa215af1501ed2ea7d9a4d95fca5a 100644 (file)
@@ -66,7 +66,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
index ae81e2d190c325fcfaab20e49769d42ee9c86859..cb9dfb30b6747c1fb472c0292af103021f23f001 100644 (file)
@@ -61,7 +61,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
index 55d394edf63341a73ca712384b9863edf3fa5d7c..8d5def4a31e026e657ea78429a3fa36ac3d4fe42 100644 (file)
@@ -80,7 +80,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
index af773743ee11e0557174f2870dab1a456105c981..e2af46f530c1c589d94e429149a78bc934f7d9f3 100644 (file)
@@ -64,7 +64,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
index cdb70d66e53569a40d8a03554acdc57f1b887e1b..7c9402b2097fcb89b81202970bb1bc88a6ef90d4 100644 (file)
@@ -65,7 +65,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
index 46bed78d0656484bdd9e90916df875a4a6227f06..19d23db690a4789bcf3d9369850170bd6c9f40bd 100644 (file)
@@ -61,7 +61,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
index 86f7772bafbedd99bcf7e06d13db7e138579628e..ca6c0b4cab7754be95b0ed9bf8564cd243287abb 100644 (file)
@@ -62,7 +62,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
index 288261456e1fd5f2d1dfce5dc648f16100ff04a4..c80941c7759e2f7530c61b2b3a73545dea1892af 100644 (file)
@@ -62,7 +62,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
index 6cd5a519ce5c26338b67a3f2c88238fd53aff5c6..80e012fa409c8da0ee7a7d65b541aa35e424abc6 100644 (file)
@@ -56,7 +56,6 @@ CONFIG_NF_CONNTRACK_MARK=y
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_IRC=m
 CONFIG_NF_CONNTRACK_TFTP=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_LIMIT=m
 CONFIG_NETFILTER_XT_MATCH_MAC=m
index ad15fb10322b28f8ccf0fbc8d29427ef0606ab93..b6fde2bb51b635390c88db8a118430233710e4cf 100644 (file)
@@ -96,7 +96,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_SECMARK=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
index d1606569b0019d735e571f6652c52816ce34c6f7..936ec5a5ed8d66c4f9f67c522c5b07234276cfa9 100644 (file)
@@ -87,7 +87,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TPROXY=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_SECMARK=m
index 92a60aecad5ceaac2450ca3692fa15419a26b0f0..0315ee37a20bc3e4c4c035185da68445f22dea94 100644 (file)
@@ -60,7 +60,6 @@ CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_SECMARK=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
index 5527abbb7dea56ea634a0ab1f32c628f373163e0..cd732e5b4fd5f856df3f46a0a2ee83d0d7491f66 100644 (file)
@@ -86,7 +86,6 @@ CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TPROXY=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_SECMARK=m
index 9c9a123016c056eb535193be96fff1d8eee76e56..636f82b89fd30e97ed54baa75c3bca623c46745f 100644 (file)
@@ -59,7 +59,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_SECMARK=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
index 28c6b276c21624a4550c2411042065936464d19c..84624b17b76918f9e76b57bbe64ba6988ffd1adb 100644 (file)
@@ -108,7 +108,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TPROXY=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_SECMARK=m
index 138f698d7c00048c4b172718891bac49ef778b61..44b473420d5198f86d5d77cdc99f7e50975fd990 100644 (file)
@@ -109,7 +109,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TPROXY=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_SECMARK=m
index 2c0230e76d20562d8035d56c6f8749d062eef110..59d9d2fdcd48880a833d73027e5da1eb02f3830a 100644 (file)
@@ -68,7 +68,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_SECMARK=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
index f8b394a76ac3bc4788724cc4c7078d8854a71c1e..29767a8dfea5173a5cf852f005d20fa60b7998bd 100644 (file)
@@ -55,7 +55,6 @@ CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
index db27c82e0542e44ebab171cee08701c0424fc68d..06b56245d78c090b0007668fff2b3446cb52ac0e 100644 (file)
@@ -92,7 +92,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TPROXY=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
index 7bd1763877babeb272b7e3b6a962330de7f6d91e..f55c27609fc6a8feca61ed7f3a7fe1648a169841 100644 (file)
@@ -66,7 +66,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TPROXY=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
index c47f2becfbc303c63a5e76b208ad4263a34b7563..be1cb6ea3a36191e6e34e7b9d0bd046028e32060 100644 (file)
@@ -167,7 +167,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TPROXY=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_SECMARK=m
index e9073e9501b37792e5e6941253b6c13f601102c3..28368701ef796ff5c4268398264cdfb504a0be84 100644 (file)
@@ -464,8 +464,12 @@ void bpf_jit_compile(struct sk_filter *fp)
                                emit_alu_K(OR, K);
                                break;
                        case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
+                       case BPF_S_ALU_XOR_X:
                                emit_alu_X(XOR);
                                break;
+                       case BPF_S_ALU_XOR_K:   /* A ^= K */
+                               emit_alu_K(XOR, K);
+                               break;
                        case BPF_S_ALU_LSH_X:   /* A <<= X */
                                emit_alu_X(SLL);
                                break;
index 0270620a16926956b6b6a6e95422a439c8b09cb2..8c5eff6d6df5577ea987d01cbf79cde62cab3506 100644 (file)
@@ -134,7 +134,6 @@ CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TEE=m
 CONFIG_NETFILTER_XT_TARGET_TPROXY=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
index c11de27a9bcb232061be13744514589282ef68ed..e7a3dfcbcda7094ef4c7fa818c650716d2103397 100644 (file)
@@ -132,7 +132,6 @@ CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TEE=m
 CONFIG_NETFILTER_XT_TARGET_TPROXY=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
index 33643a8bcbbb0f1887dc2f815fe469aaf0e0507a..520d2bd0b9c583de16ce246ffbf938c5c9f48b33 100644 (file)
@@ -280,6 +280,31 @@ void bpf_jit_compile(struct sk_filter *fp)
                                }
                                EMIT4(0x31, 0xd2, 0xf7, 0xf3); /* xor %edx,%edx; div %ebx */
                                break;
+                       case BPF_S_ALU_MOD_X: /* A %= X; */
+                               seen |= SEEN_XREG;
+                               EMIT2(0x85, 0xdb);      /* test %ebx,%ebx */
+                               if (pc_ret0 > 0) {
+                                       /* addrs[pc_ret0 - 1] is start address of target
+                                        * (addrs[i] - 6) is the address following this jmp
+                                        * ("xor %edx,%edx; div %ebx;mov %edx,%eax" being 6 bytes long)
+                                        */
+                                       EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
+                                                               (addrs[i] - 6));
+                               } else {
+                                       EMIT_COND_JMP(X86_JNE, 2 + 5);
+                                       CLEAR_A();
+                                       EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 6)); /* jmp .+off32 */
+                               }
+                               EMIT2(0x31, 0xd2);      /* xor %edx,%edx */
+                               EMIT2(0xf7, 0xf3);      /* div %ebx */
+                               EMIT2(0x89, 0xd0);      /* mov %edx,%eax */
+                               break;
+                       case BPF_S_ALU_MOD_K: /* A %= K; */
+                               EMIT2(0x31, 0xd2);      /* xor %edx,%edx */
+                               EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
+                               EMIT2(0xf7, 0xf1);      /* div %ecx */
+                               EMIT2(0x89, 0xd0);      /* mov %edx,%eax */
+                               break;
                        case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
                                EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
                                EMIT(K, 4);
@@ -310,9 +335,18 @@ void bpf_jit_compile(struct sk_filter *fp)
                                        EMIT1_off32(0x0d, K);   /* or imm32,%eax */
                                break;
                        case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
+                       case BPF_S_ALU_XOR_X:
                                seen |= SEEN_XREG;
                                EMIT2(0x31, 0xd8);              /* xor %ebx,%eax */
                                break;
+                       case BPF_S_ALU_XOR_K: /* A ^= K; */
+                               if (K == 0)
+                                       break;
+                               if (is_imm8(K))
+                                       EMIT3(0x83, 0xf0, K);   /* xor imm8,%eax */
+                               else
+                                       EMIT1_off32(0x35, K);   /* xor imm32,%eax */
+                               break;
                        case BPF_S_ALU_LSH_X: /* A <<= X; */
                                seen |= SEEN_XREG;
                                EMIT4(0x89, 0xd9, 0xd3, 0xe0);  /* mov %ebx,%ecx; shl %cl,%eax */
index ba2c611154af5e0df373a04b22eed782da0daac5..6bba414d0c619d1fba31c1136aea411a09a930c0 100644 (file)
@@ -166,7 +166,7 @@ static int crypto_report_alg(struct crypto_alg *alg,
        struct crypto_user_alg *ualg;
        int err = 0;
 
-       nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, info->nlmsg_seq,
+       nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq,
                        CRYPTO_MSG_GETALG, sizeof(*ualg), info->nlmsg_flags);
        if (!nlh) {
                err = -EMSGSIZE;
@@ -216,7 +216,7 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
        if (err)
                return err;
 
-       return nlmsg_unicast(crypto_nlsk, skb, NETLINK_CB(in_skb).pid);
+       return nlmsg_unicast(crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
 }
 
 static int crypto_dump_report(struct sk_buff *skb, struct netlink_callback *cb)
@@ -500,8 +500,7 @@ static int __init crypto_user_init(void)
                .input  = crypto_netlink_rcv,
        };
 
-       crypto_nlsk = netlink_kernel_create(&init_net, NETLINK_CRYPTO,
-                                           THIS_MODULE, &cfg);
+       crypto_nlsk = netlink_kernel_create(&init_net, NETLINK_CRYPTO, &cfg);
        if (!crypto_nlsk)
                return -ENOMEM;
 
index acb48fa4531cd9e707643717b88bd37104d60a81..03da5b663aef8ea6c2991484b98cb315a16b87db 100644 (file)
@@ -123,7 +123,6 @@ obj-$(CONFIG_VHOST_NET)             += vhost/
 obj-$(CONFIG_VLYNQ)            += vlynq/
 obj-$(CONFIG_STAGING)          += staging/
 obj-y                          += platform/
-obj-y                          += ieee802154/
 #common clk code
 obj-y                          += clk/
 
index 06b3207adebdcec34d75a9bcc352343cad73fd8d..a533af218368ec7723754a3cf28d8d4cb5d4711a 100644 (file)
@@ -48,12 +48,12 @@ config BCMA_DRIVER_MIPS
 
 config BCMA_SFLASH
        bool
-       depends on BCMA_DRIVER_MIPS && BROKEN
+       depends on BCMA_DRIVER_MIPS
        default y
 
 config BCMA_NFLASH
        bool
-       depends on BCMA_DRIVER_MIPS && BROKEN
+       depends on BCMA_DRIVER_MIPS
        default y
 
 config BCMA_DRIVER_GMAC_CMN
index 3cf9cc923cd27fa5765e4c22f9b78ff813a8e355..169fc58427d3cefc343c2e1ac229a5c2c280655f 100644 (file)
@@ -54,6 +54,7 @@ u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc);
 #ifdef CONFIG_BCMA_SFLASH
 /* driver_chipcommon_sflash.c */
 int bcma_sflash_init(struct bcma_drv_cc *cc);
+extern struct platform_device bcma_sflash_dev;
 #else
 static inline int bcma_sflash_init(struct bcma_drv_cc *cc)
 {
@@ -65,6 +66,7 @@ static inline int bcma_sflash_init(struct bcma_drv_cc *cc)
 #ifdef CONFIG_BCMA_NFLASH
 /* driver_chipcommon_nflash.c */
 int bcma_nflash_init(struct bcma_drv_cc *cc);
+extern struct platform_device bcma_nflash_dev;
 #else
 static inline int bcma_nflash_init(struct bcma_drv_cc *cc)
 {
index 63c8b470536f7278c164f762c1033c0bd27ca9d9..03bbe104338ff70db5abb6fbace10b1595f438a0 100644 (file)
@@ -65,7 +65,7 @@ void bcma_core_set_clockmode(struct bcma_device *core,
        switch (clkmode) {
        case BCMA_CLKMODE_FAST:
                bcma_set32(core, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
-               udelay(64);
+               usleep_range(64, 300);
                for (i = 0; i < 1500; i++) {
                        if (bcma_read32(core, BCMA_CLKCTLST) &
                            BCMA_CLKCTLST_HAVEHT) {
index 574d62435bc2f01718d8bdb5113a3e95ec8e66ab..9042781edec340e932e39d2b75c7dba495764e42 100644 (file)
@@ -5,15 +5,37 @@
  * Licensed under the GNU/GPL. See COPYING for details.
  */
 
+#include <linux/platform_device.h>
 #include <linux/bcma/bcma.h>
-#include <linux/bcma/bcma_driver_chipcommon.h>
-#include <linux/delay.h>
 
 #include "bcma_private.h"
 
+struct platform_device bcma_nflash_dev = {
+       .name           = "bcma_nflash",
+       .num_resources  = 0,
+};
+
 /* Initialize NAND flash access */
 int bcma_nflash_init(struct bcma_drv_cc *cc)
 {
-       bcma_err(cc->core->bus, "NAND flash support is broken\n");
+       struct bcma_bus *bus = cc->core->bus;
+
+       if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 &&
+           cc->core->id.rev != 0x38) {
+               bcma_err(bus, "NAND flash on unsupported board!\n");
+               return -ENOTSUPP;
+       }
+
+       if (!(cc->capabilities & BCMA_CC_CAP_NFLASH)) {
+               bcma_err(bus, "NAND flash not present according to ChipCommon\n");
+               return -ENODEV;
+       }
+
+       cc->nflash.present = true;
+
+       /* Prepare platform device, but don't register it yet. It's too early,
+        * malloc (required by device_private_init) is not available yet. */
+       bcma_nflash_dev.dev.platform_data = &cc->nflash;
+
        return 0;
 }
index c9a4f46c5143e28309ca55b788471f1241c2afff..201faf106b3f4e342337e2219213a72c670ef447 100644 (file)
@@ -76,7 +76,10 @@ static void bcma_pmu_resources_init(struct bcma_drv_cc *cc)
        if (max_msk)
                bcma_cc_write32(cc, BCMA_CC_PMU_MAXRES_MSK, max_msk);
 
-       /* Add some delay; allow resources to come up and settle. */
+       /*
+        * Add some delay; allow resources to come up and settle.
+        * Delay is required for SoC (early init).
+        */
        mdelay(2);
 }
 
@@ -101,7 +104,7 @@ void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable)
        bcma_cc_write32(cc, BCMA_CC_CHIPCTL, val);
 }
 
-void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
+static void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
 {
        struct bcma_bus *bus = cc->core->bus;
 
@@ -257,7 +260,7 @@ static u32 bcma_pmu_clock_bcm4706(struct bcma_drv_cc *cc, u32 pll0, u32 m)
 }
 
 /* query bus clock frequency for PMU-enabled chipcommon */
-u32 bcma_pmu_get_clockcontrol(struct bcma_drv_cc *cc)
+static u32 bcma_pmu_get_clockcontrol(struct bcma_drv_cc *cc)
 {
        struct bcma_bus *bus = cc->core->bus;
 
index 6e157a58a1d7f9dc68ea10251c4157a2c2f2c012..2c4eec2ca5a0784bd50d392a84edc74911a58916 100644 (file)
  * Licensed under the GNU/GPL. See COPYING for details.
  */
 
+#include <linux/platform_device.h>
 #include <linux/bcma/bcma.h>
-#include <linux/bcma/bcma_driver_chipcommon.h>
-#include <linux/delay.h>
 
 #include "bcma_private.h"
 
+static struct resource bcma_sflash_resource = {
+       .name   = "bcma_sflash",
+       .start  = BCMA_SFLASH,
+       .end    = 0,
+       .flags  = IORESOURCE_MEM | IORESOURCE_READONLY,
+};
+
+struct platform_device bcma_sflash_dev = {
+       .name           = "bcma_sflash",
+       .resource       = &bcma_sflash_resource,
+       .num_resources  = 1,
+};
+
+struct bcma_sflash_tbl_e {
+       char *name;
+       u32 id;
+       u32 blocksize;
+       u16 numblocks;
+};
+
+static struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = {
+       { "", 0x14, 0x10000, 32, },
+       { 0 },
+};
+
+static struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = {
+       { 0 },
+};
+
+static struct bcma_sflash_tbl_e bcma_sflash_at_tbl[] = {
+       { 0 },
+};
+
+static void bcma_sflash_cmd(struct bcma_drv_cc *cc, u32 opcode)
+{
+       int i;
+       bcma_cc_write32(cc, BCMA_CC_FLASHCTL,
+                       BCMA_CC_FLASHCTL_START | opcode);
+       for (i = 0; i < 1000; i++) {
+               if (!(bcma_cc_read32(cc, BCMA_CC_FLASHCTL) &
+                     BCMA_CC_FLASHCTL_BUSY))
+                       return;
+               cpu_relax();
+       }
+       bcma_err(cc->core->bus, "SFLASH control command failed (timeout)!\n");
+}
+
 /* Initialize serial flash access */
 int bcma_sflash_init(struct bcma_drv_cc *cc)
 {
-       bcma_err(cc->core->bus, "Serial flash support is broken\n");
+       struct bcma_bus *bus = cc->core->bus;
+       struct bcma_sflash *sflash = &cc->sflash;
+       struct bcma_sflash_tbl_e *e;
+       u32 id, id2;
+
+       switch (cc->capabilities & BCMA_CC_CAP_FLASHT) {
+       case BCMA_CC_FLASHT_STSER:
+               bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_DP);
+
+               bcma_cc_write32(cc, BCMA_CC_FLASHADDR, 0);
+               bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_RES);
+               id = bcma_cc_read32(cc, BCMA_CC_FLASHDATA);
+
+               bcma_cc_write32(cc, BCMA_CC_FLASHADDR, 1);
+               bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_RES);
+               id2 = bcma_cc_read32(cc, BCMA_CC_FLASHDATA);
+
+               switch (id) {
+               case 0xbf:
+                       for (e = bcma_sflash_sst_tbl; e->name; e++) {
+                               if (e->id == id2)
+                                       break;
+                       }
+                       break;
+               default:
+                       for (e = bcma_sflash_st_tbl; e->name; e++) {
+                               if (e->id == id)
+                                       break;
+                       }
+                       break;
+               }
+               if (!e->name) {
+                       bcma_err(bus, "Unsupported ST serial flash (id: 0x%X, id2: 0x%X)\n", id, id2);
+                       return -ENOTSUPP;
+               }
+
+               break;
+       case BCMA_CC_FLASHT_ATSER:
+               bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_AT_STATUS);
+               id = bcma_cc_read32(cc, BCMA_CC_FLASHDATA) & 0x3c;
+
+               for (e = bcma_sflash_at_tbl; e->name; e++) {
+                       if (e->id == id)
+                               break;
+               }
+               if (!e->name) {
+                       bcma_err(bus, "Unsupported Atmel serial flash (id: 0x%X)\n", id);
+                       return -ENOTSUPP;
+               }
+
+               break;
+       default:
+               bcma_err(bus, "Unsupported flash type\n");
+               return -ENOTSUPP;
+       }
+
+       sflash->window = BCMA_SFLASH;
+       sflash->blocksize = e->blocksize;
+       sflash->numblocks = e->numblocks;
+       sflash->size = sflash->blocksize * sflash->numblocks;
+       sflash->present = true;
+
+       bcma_info(bus, "Found %s serial flash (size: %dKiB, blocksize: 0x%X, blocks: %d)\n",
+                 e->name, sflash->size / 1024, sflash->blocksize,
+                 sflash->numblocks);
+
+       /* Prepare platform device, but don't register it yet. It's too early,
+        * malloc (required by device_private_init) is not available yet. */
+       bcma_sflash_dev.resource[0].end = bcma_sflash_dev.resource[0].start +
+                                         sflash->size;
+       bcma_sflash_dev.dev.platform_data = sflash;
+
        return 0;
 }
index c32ebd537abe3a3e5f8f5e777f119c8768c9ea38..c39ee6d458506d6bb9c44eae651742e692255207 100644 (file)
@@ -51,7 +51,7 @@ static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u8 phy)
                v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
                if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE)
                        break;
-               msleep(1);
+               usleep_range(1000, 2000);
        }
 }
 
@@ -92,7 +92,7 @@ static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u8 device, u8 address)
                        ret = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_DATA);
                        break;
                }
-               msleep(1);
+               usleep_range(1000, 2000);
        }
        pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
        return ret;
@@ -132,7 +132,7 @@ static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u8 device,
                v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
                if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE)
                        break;
-               msleep(1);
+               usleep_range(1000, 2000);
        }
        pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
 }
index cbae2c2313366c8792d3a08e5a963aa05a35655a..9baf886e82df39f710b897a0b864824fbedfebad 100644 (file)
@@ -425,9 +425,9 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
        pc_host->io_resource.flags = IORESOURCE_IO | IORESOURCE_PCI_FIXED;
 
        /* Reset RC */
-       udelay(3000);
+       usleep_range(3000, 5000);
        pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST_OE);
-       udelay(1000);
+       usleep_range(1000, 2000);
        pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST |
                        BCMA_CORE_PCI_CTL_RST_OE);
 
@@ -481,7 +481,7 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
         * before issuing configuration requests to PCI Express
         * devices.
         */
-       udelay(100000);
+       msleep(100);
 
        bcma_core_pci_enable_crs(pc);
 
@@ -501,7 +501,7 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
        set_io_port_base(pc_host->pci_controller.io_map_base);
        /* Give some time to the PCI controller to configure itself with the new
         * values. Not waiting at this point causes crashes of the machine. */
-       mdelay(10);
+       usleep_range(10000, 15000);
        register_pci_controller(&pc_host->pci_controller);
        return;
 }
index a6e5672c67e77f473a8685884ff4626791e99ab2..b6b4b5ebd4c2560255b3a53bbceae0a814723e96 100644 (file)
@@ -77,8 +77,8 @@ static void bcma_host_pci_write32(struct bcma_device *core, u16 offset,
 }
 
 #ifdef CONFIG_BCMA_BLOCKIO
-void bcma_host_pci_block_read(struct bcma_device *core, void *buffer,
-                             size_t count, u16 offset, u8 reg_width)
+static void bcma_host_pci_block_read(struct bcma_device *core, void *buffer,
+                                    size_t count, u16 offset, u8 reg_width)
 {
        void __iomem *addr = core->bus->mmio + offset;
        if (core->bus->mapped_core != core)
@@ -100,8 +100,9 @@ void bcma_host_pci_block_read(struct bcma_device *core, void *buffer,
        }
 }
 
-void bcma_host_pci_block_write(struct bcma_device *core, const void *buffer,
-                              size_t count, u16 offset, u8 reg_width)
+static void bcma_host_pci_block_write(struct bcma_device *core,
+                                     const void *buffer, size_t count,
+                                     u16 offset, u8 reg_width)
 {
        void __iomem *addr = core->bus->mmio + offset;
        if (core->bus->mapped_core != core)
@@ -139,7 +140,7 @@ static void bcma_host_pci_awrite32(struct bcma_device *core, u16 offset,
        iowrite32(value, core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset);
 }
 
-const struct bcma_host_ops bcma_host_pci_ops = {
+static const struct bcma_host_ops bcma_host_pci_ops = {
        .read8          = bcma_host_pci_read8,
        .read16         = bcma_host_pci_read16,
        .read32         = bcma_host_pci_read32,
@@ -272,6 +273,7 @@ static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
+       { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4358) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
        { 0, },
index 3c381fb8f9c4797c2a413246ed90ac26305582cd..3475e600011a5c5ce0ec6ffe2b41f4f68b04c0e7 100644 (file)
@@ -143,7 +143,7 @@ static void bcma_host_soc_awrite32(struct bcma_device *core, u16 offset,
        writel(value, core->io_wrap + offset);
 }
 
-const struct bcma_host_ops bcma_host_soc_ops = {
+static const struct bcma_host_ops bcma_host_soc_ops = {
        .read8          = bcma_host_soc_read8,
        .read16         = bcma_host_soc_read16,
        .read32         = bcma_host_soc_read32,
index 758af9ccdef0fa3b71b891bb1d75c3bbc90f6f05..432aeeedfd5e6992c28ea081e4185bf88ffeed8e 100644 (file)
@@ -7,6 +7,7 @@
 
 #include "bcma_private.h"
 #include <linux/module.h>
+#include <linux/platform_device.h>
 #include <linux/bcma/bcma.h>
 #include <linux/slab.h>
 
@@ -136,6 +137,22 @@ static int bcma_register_cores(struct bcma_bus *bus)
                dev_id++;
        }
 
+#ifdef CONFIG_BCMA_SFLASH
+       if (bus->drv_cc.sflash.present) {
+               err = platform_device_register(&bcma_sflash_dev);
+               if (err)
+                       bcma_err(bus, "Error registering serial flash\n");
+       }
+#endif
+
+#ifdef CONFIG_BCMA_NFLASH
+       if (bus->drv_cc.nflash.present) {
+               err = platform_device_register(&bcma_nflash_dev);
+               if (err)
+                       bcma_err(bus, "Error registering NAND flash\n");
+       }
+#endif
+
        return 0;
 }
 
@@ -210,7 +227,17 @@ int __devinit bcma_bus_register(struct bcma_bus *bus)
 
 void bcma_bus_unregister(struct bcma_bus *bus)
 {
+       struct bcma_device *cores[3];
+
+       cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
+       cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE);
+       cores[2] = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON);
+
        bcma_unregister_cores(bus);
+
+       kfree(cores[2]);
+       kfree(cores[1]);
+       kfree(cores[0]);
 }
 
 int __init bcma_bus_early_register(struct bcma_bus *bus,
index 9ea4627dc0c233a808f322816c2560fd9c145d3d..0d546b64be341239a5ee405970f166be47f83081 100644 (file)
@@ -507,7 +507,9 @@ static bool bcma_sprom_onchip_available(struct bcma_bus *bus)
                /* for these chips OTP is always available */
                present = true;
                break;
+       case BCMA_CHIP_ID_BCM43227:
        case BCMA_CHIP_ID_BCM43228:
+       case BCMA_CHIP_ID_BCM43428:
                present = chip_status & BCMA_CC_CHIPST_43228_OTP_PRESENT;
                break;
        default:
index 37ae175162f346a3316b4864b2f1eebefd9e9d71..364f82b34d036bca12cbf0d48ef16af9b8ad68d2 100644 (file)
@@ -177,7 +177,7 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
        if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
                return -ENODEV;
 
-       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL);
        if (!data) {
                BT_ERR("Can't allocate memory for data structure");
                return -ENOMEM;
@@ -189,14 +189,12 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
        data->urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!data->urb) {
                BT_ERR("Can't allocate URB");
-               kfree(data);
                return -ENOMEM;
        }
 
        if (request_firmware(&firmware, "BCM2033-MD.hex", &udev->dev) < 0) {
                BT_ERR("Mini driver request failed");
                usb_free_urb(data->urb);
-               kfree(data);
                return -EIO;
        }
 
@@ -209,7 +207,6 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
                BT_ERR("Can't allocate memory for mini driver");
                release_firmware(firmware);
                usb_free_urb(data->urb);
-               kfree(data);
                return -ENOMEM;
        }
 
@@ -224,7 +221,6 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
                BT_ERR("Firmware request failed");
                usb_free_urb(data->urb);
                kfree(data->buffer);
-               kfree(data);
                return -EIO;
        }
 
@@ -236,7 +232,6 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
                release_firmware(firmware);
                usb_free_urb(data->urb);
                kfree(data->buffer);
-               kfree(data);
                return -ENOMEM;
        }
 
@@ -271,7 +266,6 @@ static void bcm203x_disconnect(struct usb_interface *intf)
        usb_free_urb(data->urb);
        kfree(data->fw_data);
        kfree(data->buffer);
-       kfree(data);
 }
 
 static struct usb_driver bcm203x_driver = {
index 32e825144fe9835bb30ef4693bf902d6a13fc87e..995aee9cba22a8d871289004a4cef5ec60cdeaa2 100644 (file)
@@ -653,7 +653,7 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
        }
 
        /* Initialize control structure and load firmware */
-       data = kzalloc(sizeof(struct bfusb_data), GFP_KERNEL);
+       data = devm_kzalloc(&intf->dev, sizeof(struct bfusb_data), GFP_KERNEL);
        if (!data) {
                BT_ERR("Can't allocate memory for control structure");
                goto done;
@@ -674,7 +674,7 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
 
        if (request_firmware(&firmware, "bfubase.frm", &udev->dev) < 0) {
                BT_ERR("Firmware request failed");
-               goto error;
+               goto done;
        }
 
        BT_DBG("firmware data %p size %zu", firmware->data, firmware->size);
@@ -690,7 +690,7 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
        hdev = hci_alloc_dev();
        if (!hdev) {
                BT_ERR("Can't allocate HCI device");
-               goto error;
+               goto done;
        }
 
        data->hdev = hdev;
@@ -708,7 +708,7 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
        if (hci_register_dev(hdev) < 0) {
                BT_ERR("Can't register HCI device");
                hci_free_dev(hdev);
-               goto error;
+               goto done;
        }
 
        usb_set_intfdata(intf, data);
@@ -718,9 +718,6 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
 release:
        release_firmware(firmware);
 
-error:
-       kfree(data);
-
 done:
        return -EIO;
 }
@@ -741,7 +738,6 @@ static void bfusb_disconnect(struct usb_interface *intf)
 
        hci_unregister_dev(hdev);
        hci_free_dev(hdev);
-       kfree(data);
 }
 
 static struct usb_driver bfusb_driver = {
index 66c3a6770c417a5dbb714a9c10ae029fc2675109..0d26851d6e495a8624e611394709a1bf1246ef90 100644 (file)
@@ -681,7 +681,7 @@ static int bluecard_hci_send_frame(struct sk_buff *skb)
        case HCI_SCODATA_PKT:
                hdev->stat.sco_tx++;
                break;
-       };
+       }
 
        /* Prepend skb with frame type */
        memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
@@ -849,7 +849,7 @@ static int bluecard_probe(struct pcmcia_device *link)
        bluecard_info_t *info;
 
        /* Create new info device */
-       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL);
        if (!info)
                return -ENOMEM;
 
@@ -864,10 +864,7 @@ static int bluecard_probe(struct pcmcia_device *link)
 
 static void bluecard_detach(struct pcmcia_device *link)
 {
-       bluecard_info_t *info = link->priv;
-
        bluecard_release(link);
-       kfree(info);
 }
 
 
index 29caaed2d715bd6de4f8b355bc8bc11b9580a7b6..2fe4a8031348f0c8b05074eb9889a1d2e02a7a91 100644 (file)
@@ -443,7 +443,7 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *
        if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
                return -ENODEV;
 
-       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
@@ -453,10 +453,8 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *
        init_usb_anchor(&data->rx_anchor);
 
        hdev = hci_alloc_dev();
-       if (!hdev) {
-               kfree(data);
+       if (!hdev)
                return -ENOMEM;
-       }
 
        hdev->bus = HCI_USB;
        hci_set_drvdata(hdev, data);
@@ -475,7 +473,6 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *
        err = hci_register_dev(hdev);
        if (err < 0) {
                hci_free_dev(hdev);
-               kfree(data);
                return err;
        }
 
@@ -500,7 +497,6 @@ static void bpa10x_disconnect(struct usb_interface *intf)
        hci_free_dev(data->hdev);
        kfree_skb(data->rx_skb[0]);
        kfree_skb(data->rx_skb[1]);
-       kfree(data);
 }
 
 static struct usb_driver bpa10x_driver = {
index 8925b6d672a6ef7c14dc89741e7662fc3e057ef4..7ffd3f407144dc05c2bb2a9d9b29848efeeba347 100644 (file)
@@ -638,7 +638,7 @@ static int bt3c_probe(struct pcmcia_device *link)
        bt3c_info_t *info;
 
        /* Create new info device */
-       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL);
        if (!info)
                return -ENOMEM;
 
@@ -654,10 +654,7 @@ static int bt3c_probe(struct pcmcia_device *link)
 
 static void bt3c_detach(struct pcmcia_device *link)
 {
-       bt3c_info_t *info = link->priv;
-
        bt3c_release(link);
-       kfree(info);
 }
 
 static int bt3c_check_config(struct pcmcia_device *p_dev, void *priv_data)
index 6a9e9717d3ab8327da49160823ef136ada65e053..3f4bfc814dc7d5a0382635dbe16a41af413e59fe 100644 (file)
@@ -600,8 +600,7 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
 exit:
        if (ret) {
                hdev->stat.err_rx++;
-               if (skb)
-                       kfree_skb(skb);
+               kfree_skb(skb);
        }
 
        return ret;
@@ -956,11 +955,9 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
        BT_INFO("vendor=0x%x, device=0x%x, class=%d, fn=%d",
                        id->vendor, id->device, id->class, func->num);
 
-       card = kzalloc(sizeof(*card), GFP_KERNEL);
-       if (!card) {
-               ret = -ENOMEM;
-               goto done;
-       }
+       card = devm_kzalloc(&func->dev, sizeof(*card), GFP_KERNEL);
+       if (!card)
+               return -ENOMEM;
 
        card->func = func;
 
@@ -974,8 +971,7 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
 
        if (btmrvl_sdio_register_dev(card) < 0) {
                BT_ERR("Failed to register BT device!");
-               ret = -ENODEV;
-               goto free_card;
+               return -ENODEV;
        }
 
        /* Disable the interrupts on the card */
@@ -1023,9 +1019,6 @@ disable_host_int:
        btmrvl_sdio_disable_host_int(card);
 unreg_dev:
        btmrvl_sdio_unregister_dev(card);
-free_card:
-       kfree(card);
-done:
        return ret;
 }
 
@@ -1047,7 +1040,6 @@ static void btmrvl_sdio_remove(struct sdio_func *func)
                        BT_DBG("unregester dev");
                        btmrvl_sdio_unregister_dev(card);
                        btmrvl_remove_card(card->priv);
-                       kfree(card);
                }
        }
 }
index e10ea03470510f876bd3b09ef572630c4ef83e29..4a9909713874dd03eb52240b148ed83b9a22b9d8 100644 (file)
@@ -304,7 +304,7 @@ static int btsdio_probe(struct sdio_func *func,
                tuple = tuple->next;
        }
 
-       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       data = devm_kzalloc(&func->dev, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
@@ -315,10 +315,8 @@ static int btsdio_probe(struct sdio_func *func,
        skb_queue_head_init(&data->txq);
 
        hdev = hci_alloc_dev();
-       if (!hdev) {
-               kfree(data);
+       if (!hdev)
                return -ENOMEM;
-       }
 
        hdev->bus = HCI_SDIO;
        hci_set_drvdata(hdev, data);
@@ -340,7 +338,6 @@ static int btsdio_probe(struct sdio_func *func,
        err = hci_register_dev(hdev);
        if (err < 0) {
                hci_free_dev(hdev);
-               kfree(data);
                return err;
        }
 
@@ -366,7 +363,6 @@ static void btsdio_remove(struct sdio_func *func)
        hci_unregister_dev(hdev);
 
        hci_free_dev(hdev);
-       kfree(data);
 }
 
 static struct sdio_driver btsdio_driver = {
index 21e803a6a281690af1d6598ba73d800fbf70c5dc..35a553a90616d85f07bf5c03d5d24e5373494ac6 100644 (file)
@@ -446,7 +446,7 @@ static int btuart_hci_send_frame(struct sk_buff *skb)
        case HCI_SCODATA_PKT:
                hdev->stat.sco_tx++;
                break;
-       };
+       }
 
        /* Prepend skb with frame type */
        memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
@@ -567,7 +567,7 @@ static int btuart_probe(struct pcmcia_device *link)
        btuart_info_t *info;
 
        /* Create new info device */
-       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL);
        if (!info)
                return -ENOMEM;
 
@@ -583,10 +583,7 @@ static int btuart_probe(struct pcmcia_device *link)
 
 static void btuart_detach(struct pcmcia_device *link)
 {
-       btuart_info_t *info = link->priv;
-
        btuart_release(link);
-       kfree(info);
 }
 
 static int btuart_check_config(struct pcmcia_device *p_dev, void *priv_data)
index 654e248763efb98024bd81b57118cd2ad1d77cdf..debda27df9b0452e3d7f3e5d2b06855c1b7af1f4 100644 (file)
@@ -96,11 +96,12 @@ static struct usb_device_id btusb_table[] = {
        { USB_DEVICE(0x0c10, 0x0000) },
 
        /* Broadcom BCM20702A0 */
+       { USB_DEVICE(0x04ca, 0x2003) },
        { USB_DEVICE(0x0489, 0xe042) },
        { USB_DEVICE(0x413c, 0x8197) },
 
        /* Foxconn - Hon Hai */
-       { USB_DEVICE(0x0489, 0xe033) },
+       { USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01) },
 
        /*Broadcom devices with vendor specific id */
        { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) },
@@ -956,7 +957,7 @@ static int btusb_probe(struct usb_interface *intf,
                        return -ENODEV;
        }
 
-       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
@@ -979,10 +980,8 @@ static int btusb_probe(struct usb_interface *intf,
                }
        }
 
-       if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep) {
-               kfree(data);
+       if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep)
                return -ENODEV;
-       }
 
        data->cmdreq_type = USB_TYPE_CLASS;
 
@@ -1002,10 +1001,8 @@ static int btusb_probe(struct usb_interface *intf,
        init_usb_anchor(&data->deferred);
 
        hdev = hci_alloc_dev();
-       if (!hdev) {
-               kfree(data);
+       if (!hdev)
                return -ENOMEM;
-       }
 
        hdev->bus = HCI_USB;
        hci_set_drvdata(hdev, data);
@@ -1073,7 +1070,6 @@ static int btusb_probe(struct usb_interface *intf,
                                                        data->isoc, data);
                if (err < 0) {
                        hci_free_dev(hdev);
-                       kfree(data);
                        return err;
                }
        }
@@ -1081,7 +1077,6 @@ static int btusb_probe(struct usb_interface *intf,
        err = hci_register_dev(hdev);
        if (err < 0) {
                hci_free_dev(hdev);
-               kfree(data);
                return err;
        }
 
@@ -1114,7 +1109,6 @@ static void btusb_disconnect(struct usb_interface *intf)
                usb_driver_release_interface(&btusb_driver, data->isoc);
 
        hci_free_dev(hdev);
-       kfree(data);
 }
 
 #ifdef CONFIG_PM
index 88694697f34f68386cdb4273289ccea7baaeaa5b..60abf596f60ea21f9354ae1b3d3608bb04e4142e 100644 (file)
@@ -297,16 +297,14 @@ static int bt_ti_probe(struct platform_device *pdev)
        struct hci_dev *hdev;
        int err;
 
-       hst = kzalloc(sizeof(struct ti_st), GFP_KERNEL);
+       hst = devm_kzalloc(&pdev->dev, sizeof(struct ti_st), GFP_KERNEL);
        if (!hst)
                return -ENOMEM;
 
        /* Expose "hciX" device to user space */
        hdev = hci_alloc_dev();
-       if (!hdev) {
-               kfree(hst);
+       if (!hdev)
                return -ENOMEM;
-       }
 
        BT_DBG("hdev %p", hdev);
 
@@ -321,7 +319,6 @@ static int bt_ti_probe(struct platform_device *pdev)
        err = hci_register_dev(hdev);
        if (err < 0) {
                BT_ERR("Can't register HCI device error %d", err);
-               kfree(hst);
                hci_free_dev(hdev);
                return err;
        }
@@ -347,7 +344,6 @@ static int bt_ti_remove(struct platform_device *pdev)
        hci_unregister_dev(hdev);
 
        hci_free_dev(hdev);
-       kfree(hst);
 
        dev_set_drvdata(&pdev->dev, NULL);
        return 0;
@@ -362,21 +358,7 @@ static struct platform_driver btwilink_driver = {
        },
 };
 
-/* ------- Module Init/Exit interfaces ------ */
-static int __init btwilink_init(void)
-{
-       BT_INFO("Bluetooth Driver for TI WiLink - Version %s", VERSION);
-
-       return platform_driver_register(&btwilink_driver);
-}
-
-static void __exit btwilink_exit(void)
-{
-       platform_driver_unregister(&btwilink_driver);
-}
-
-module_init(btwilink_init);
-module_exit(btwilink_exit);
+module_platform_driver(btwilink_driver);
 
 /* ------ Module Info ------ */
 
index 97a7784db4a2d4b6431aa2e15d8ff122b40ccdf4..036cb366fe6e77d7c8e202cf2aee0f0d3fde2880 100644 (file)
@@ -550,7 +550,7 @@ static int dtl1_probe(struct pcmcia_device *link)
        dtl1_info_t *info;
 
        /* Create new info device */
-       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL);
        if (!info)
                return -ENOMEM;
 
@@ -569,7 +569,6 @@ static void dtl1_detach(struct pcmcia_device *link)
 
        dtl1_close(info);
        pcmcia_disable_device(link);
-       kfree(info);
 }
 
 static int dtl1_confcheck(struct pcmcia_device *p_dev, void *priv_data)
index 74e0966b3ead0bbcf3678ebea52ea6886a65baee..c8abce3d2d9c0618f2092c94398e3197d0396b27 100644 (file)
@@ -531,7 +531,7 @@ static int hci_uart_tty_ioctl(struct tty_struct *tty, struct file * file,
        default:
                err = n_tty_ioctl_helper(tty, file, cmd, arg);
                break;
-       };
+       }
 
        return err;
 }
index ff6d589c34a5e900eff0eb498782f5597a6a8bee..cfc7679385890b6e0feaadee1462a1e85330cc59 100644 (file)
@@ -481,7 +481,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
                        hu->hdev->stat.err_rx++;
                        ptr++; count--;
                        continue;
-               };
+               }
 
                ptr++; count--;
 
index 3f72595a60178a7f23ca45ac6418c6e5cb53faa6..d8b7aed6e4a96f6d6997ab9fac2a1ac84fbc4700 100644 (file)
@@ -156,7 +156,7 @@ static inline ssize_t vhci_put_user(struct vhci_data *data,
        case HCI_SCODATA_PKT:
                data->hdev->stat.sco_tx++;
                break;
-       };
+       }
 
        return total;
 }
index 82fa4f0f91d6ebdb13534997807b5da125e126fa..965b7811e04f37100151dc441b7713cdee065bbf 100644 (file)
@@ -264,8 +264,7 @@ static int __devinit cn_init(void)
                .input  = dev->input,
        };
 
-       dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR,
-                                        THIS_MODULE, &cfg);
+       dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, &cfg);
        if (!dev->nls)
                return -EIO;
 
index 3ae2bfd310158d58dd6051961358010947070fdd..fe10a949aef9b6bd0533da3c4886933ee4a2091f 100644 (file)
@@ -177,7 +177,7 @@ int __init ibnl_init(void)
                .input  = ibnl_rcv,
        };
 
-       nls = netlink_kernel_create(&init_net, NETLINK_RDMA, THIS_MODULE, &cfg);
+       nls = netlink_kernel_create(&init_net, NETLINK_RDMA, &cfg);
        if (!nls) {
                pr_warn("Failed to create netlink socket\n");
                return -ENOMEM;
index 45aedf1d9338a2d9e274778fa025d496bb6f09fc..5213bab2d19bc2092c6f5cd948f5da8d5104736a 100644 (file)
@@ -1155,7 +1155,7 @@ static int ring_kernel_db(struct c4iw_qp *qhp, u32 qid, u16 inc)
                 */
                if (cxgb4_dbfifo_count(qhp->rhp->rdev.lldi.ports[0], 1) <
                    (qhp->rhp->rdev.lldi.dbfifo_int_thresh << 5)) {
-                       writel(V_QID(qid) | V_PIDX(inc), qhp->wq.db);
+                       writel(QID(qid) | PIDX(inc), qhp->wq.db);
                        break;
                }
                set_current_state(TASK_UNINTERRUPTIBLE);
index 3090100f0de7598fac29096f9b9e490397d60338..e5430dd50764c9c6e13b871a51ee8c273a3f9941 100644 (file)
@@ -5,7 +5,8 @@ ib_ipoib-y                                      := ipoib_main.o \
                                                   ipoib_multicast.o \
                                                   ipoib_verbs.o \
                                                   ipoib_vlan.o \
-                                                  ipoib_ethtool.o
+                                                  ipoib_ethtool.o \
+                                                  ipoib_netlink.o
 ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_CM)         += ipoib_cm.o
 ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_DEBUG)      += ipoib_fs.o
 
index 0af216d21f8790c31af507022bbc722627f41671..196eb52f003519c15cafdf97dd0a08703e33111b 100644 (file)
@@ -104,6 +104,10 @@ enum {
 
        MAX_SEND_CQE              = 16,
        IPOIB_CM_COPYBREAK        = 256,
+
+       IPOIB_NON_CHILD           = 0,
+       IPOIB_LEGACY_CHILD        = 1,
+       IPOIB_RTNL_CHILD          = 2,
 };
 
 #define        IPOIB_OP_RECV   (1ul << 31)
@@ -353,6 +357,7 @@ struct ipoib_dev_priv {
        struct net_device *parent;
        struct list_head child_intfs;
        struct list_head list;
+       int    child_type;
 
 #ifdef CONFIG_INFINIBAND_IPOIB_CM
        struct ipoib_cm_dev_priv cm;
@@ -512,6 +517,17 @@ void ipoib_event(struct ib_event_handler *handler,
 int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey);
 int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey);
 
+int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
+                    u16 pkey, int child_type);
+
+int  __init ipoib_netlink_init(void);
+void __exit ipoib_netlink_fini(void);
+
+void ipoib_set_umcast(struct net_device *ndev, int umcast_val);
+int  ipoib_set_mode(struct net_device *dev, const char *buf);
+
+void ipoib_setup(struct net_device *dev);
+
 void ipoib_pkey_poll(struct work_struct *work);
 int ipoib_pkey_dev_delay_open(struct net_device *dev);
 void ipoib_drain_cq(struct net_device *dev);
index 24683fda8e21cdbaca2973cc69454013a2bbb995..175581cf478c2188c101dbb14a264b4e35c14c1b 100644 (file)
@@ -1448,15 +1448,10 @@ static ssize_t show_mode(struct device *d, struct device_attribute *attr,
                return sprintf(buf, "datagram\n");
 }
 
-static ssize_t set_mode(struct device *d, struct device_attribute *attr,
-                       const char *buf, size_t count)
+int ipoib_set_mode(struct net_device *dev, const char *buf)
 {
-       struct net_device *dev = to_net_dev(d);
        struct ipoib_dev_priv *priv = netdev_priv(dev);
 
-       if (!rtnl_trylock())
-               return restart_syscall();
-
        /* flush paths if we switch modes so that connections are restarted */
        if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
                set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
@@ -1467,7 +1462,8 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
                priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
 
                ipoib_flush_paths(dev);
-               return count;
+               rtnl_lock();
+               return 0;
        }
 
        if (!strcmp(buf, "datagram\n")) {
@@ -1476,14 +1472,32 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
                dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
                rtnl_unlock();
                ipoib_flush_paths(dev);
-
-               return count;
+               rtnl_lock();
+               return 0;
        }
-       rtnl_unlock();
 
        return -EINVAL;
 }
 
+static ssize_t set_mode(struct device *d, struct device_attribute *attr,
+                       const char *buf, size_t count)
+{
+       struct net_device *dev = to_net_dev(d);
+       int ret;
+
+       if (!rtnl_trylock())
+               return restart_syscall();
+
+       ret = ipoib_set_mode(dev, buf);
+
+       rtnl_unlock();
+
+       if (!ret)
+               return count;
+
+       return ret;
+}
+
 static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
 
 int ipoib_cm_add_mode_attr(struct net_device *dev)
index 1e19b5ae7c479a5865837ffa4b08f7f5c89bdcdc..3f9a9ba2f9ecc3a6414c2978738e15fb703e9e06 100644 (file)
@@ -173,6 +173,11 @@ static int ipoib_stop(struct net_device *dev)
        return 0;
 }
 
+static void ipoib_uninit(struct net_device *dev)
+{
+       ipoib_dev_cleanup(dev);
+}
+
 static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -1257,6 +1262,9 @@ out:
 void ipoib_dev_cleanup(struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
+       LIST_HEAD(head);
+
+       ASSERT_RTNL();
 
        ipoib_delete_debug_files(dev);
 
@@ -1265,10 +1273,9 @@ void ipoib_dev_cleanup(struct net_device *dev)
                /* Stop GC on child */
                set_bit(IPOIB_STOP_NEIGH_GC, &cpriv->flags);
                cancel_delayed_work(&cpriv->neigh_reap_task);
-               unregister_netdev(cpriv->dev);
-               ipoib_dev_cleanup(cpriv->dev);
-               free_netdev(cpriv->dev);
+               unregister_netdevice_queue(cpriv->dev, &head);
        }
+       unregister_netdevice_many(&head);
 
        ipoib_ib_dev_cleanup(dev);
 
@@ -1286,6 +1293,7 @@ static const struct header_ops ipoib_header_ops = {
 };
 
 static const struct net_device_ops ipoib_netdev_ops = {
+       .ndo_uninit              = ipoib_uninit,
        .ndo_open                = ipoib_open,
        .ndo_stop                = ipoib_stop,
        .ndo_change_mtu          = ipoib_change_mtu,
@@ -1295,7 +1303,7 @@ static const struct net_device_ops ipoib_netdev_ops = {
        .ndo_set_rx_mode         = ipoib_set_mcast_list,
 };
 
-static void ipoib_setup(struct net_device *dev)
+void ipoib_setup(struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
 
@@ -1373,12 +1381,9 @@ static ssize_t show_umcast(struct device *dev,
        return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
 }
 
-static ssize_t set_umcast(struct device *dev,
-                         struct device_attribute *attr,
-                         const char *buf, size_t count)
+void ipoib_set_umcast(struct net_device *ndev, int umcast_val)
 {
-       struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
-       unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
+       struct ipoib_dev_priv *priv = netdev_priv(ndev);
 
        if (umcast_val > 0) {
                set_bit(IPOIB_FLAG_UMCAST, &priv->flags);
@@ -1386,6 +1391,15 @@ static ssize_t set_umcast(struct device *dev,
                                "by userspace\n");
        } else
                clear_bit(IPOIB_FLAG_UMCAST, &priv->flags);
+}
+
+static ssize_t set_umcast(struct device *dev,
+                         struct device_attribute *attr,
+                         const char *buf, size_t count)
+{
+       unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
+
+       ipoib_set_umcast(to_net_dev(dev), umcast_val);
 
        return count;
 }
@@ -1657,7 +1671,6 @@ static void ipoib_remove_one(struct ib_device *device)
                flush_workqueue(ipoib_workqueue);
 
                unregister_netdev(priv->dev);
-               ipoib_dev_cleanup(priv->dev);
                free_netdev(priv->dev);
        }
 
@@ -1709,8 +1722,15 @@ static int __init ipoib_init_module(void)
        if (ret)
                goto err_sa;
 
+       ret = ipoib_netlink_init();
+       if (ret)
+               goto err_client;
+
        return 0;
 
+err_client:
+       ib_unregister_client(&ipoib_client);
+
 err_sa:
        ib_sa_unregister_client(&ipoib_sa_client);
        destroy_workqueue(ipoib_workqueue);
@@ -1723,6 +1743,7 @@ err_fs:
 
 static void __exit ipoib_cleanup_module(void)
 {
+       ipoib_netlink_fini();
        ib_unregister_client(&ipoib_client);
        ib_sa_unregister_client(&ipoib_sa_client);
        ipoib_unregister_debugfs();
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
new file mode 100644 (file)
index 0000000..7468593
--- /dev/null
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2012 Mellanox Technologies. -  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/module.h>
+#include <net/rtnetlink.h>
+#include "ipoib.h"
+
+static const struct nla_policy ipoib_policy[IFLA_IPOIB_MAX + 1] = {
+       [IFLA_IPOIB_PKEY]       = { .type = NLA_U16 },
+       [IFLA_IPOIB_MODE]       = { .type = NLA_U16 },
+       [IFLA_IPOIB_UMCAST]     = { .type = NLA_U16 },
+};
+
+static int ipoib_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+       struct ipoib_dev_priv *priv = netdev_priv(dev);
+       u16 val;
+
+       if (nla_put_u16(skb, IFLA_IPOIB_PKEY, priv->pkey))
+               goto nla_put_failure;
+
+       val = test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
+       if (nla_put_u16(skb, IFLA_IPOIB_MODE, val))
+               goto nla_put_failure;
+
+       val = test_bit(IPOIB_FLAG_UMCAST, &priv->flags);
+       if (nla_put_u16(skb, IFLA_IPOIB_UMCAST, val))
+               goto nla_put_failure;
+
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static int ipoib_changelink(struct net_device *dev,
+                           struct nlattr *tb[], struct nlattr *data[])
+{
+       u16 mode, umcast;
+       int ret = 0;
+
+       if (data[IFLA_IPOIB_MODE]) {
+               mode  = nla_get_u16(data[IFLA_IPOIB_MODE]);
+               if (mode == IPOIB_MODE_DATAGRAM)
+                       ret = ipoib_set_mode(dev, "datagram\n");
+               else if (mode == IPOIB_MODE_CONNECTED)
+                       ret = ipoib_set_mode(dev, "connected\n");
+               else
+                       ret = -EINVAL;
+
+               if (ret < 0)
+                       goto out_err;
+       }
+
+       if (data[IFLA_IPOIB_UMCAST]) {
+               umcast = nla_get_u16(data[IFLA_IPOIB_UMCAST]);
+               ipoib_set_umcast(dev, umcast);
+       }
+
+out_err:
+       return ret;
+}
+
+static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
+                              struct nlattr *tb[], struct nlattr *data[])
+{
+       struct net_device *pdev;
+       struct ipoib_dev_priv *ppriv;
+       u16 child_pkey;
+       int err;
+
+       if (!tb[IFLA_LINK])
+               return -EINVAL;
+
+       pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+       if (!pdev)
+               return -ENODEV;
+
+       ppriv = netdev_priv(pdev);
+
+       if (test_bit(IPOIB_FLAG_SUBINTERFACE, &ppriv->flags)) {
+               ipoib_warn(ppriv, "child creation disallowed for child devices\n");
+               return -EINVAL;
+       }
+
+       if (!data || !data[IFLA_IPOIB_PKEY]) {
+               ipoib_dbg(ppriv, "no pkey specified, using parent pkey\n");
+               child_pkey  = ppriv->pkey;
+       } else
+               child_pkey  = nla_get_u16(data[IFLA_IPOIB_PKEY]);
+
+       err = __ipoib_vlan_add(ppriv, netdev_priv(dev), child_pkey, IPOIB_RTNL_CHILD);
+
+       if (!err && data)
+               err = ipoib_changelink(dev, tb, data);
+       return err;
+}
+
+static void ipoib_unregister_child_dev(struct net_device *dev, struct list_head *head)
+{
+       struct ipoib_dev_priv *priv, *ppriv;
+
+       priv = netdev_priv(dev);
+       ppriv = netdev_priv(priv->parent);
+
+       mutex_lock(&ppriv->vlan_mutex);
+       unregister_netdevice_queue(dev, head);
+       list_del(&priv->list);
+       mutex_unlock(&ppriv->vlan_mutex);
+}
+
+static size_t ipoib_get_size(const struct net_device *dev)
+{
+       return nla_total_size(2) +      /* IFLA_IPOIB_PKEY   */
+               nla_total_size(2) +     /* IFLA_IPOIB_MODE   */
+               nla_total_size(2);      /* IFLA_IPOIB_UMCAST */
+}
+
+static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
+       .kind           = "ipoib",
+       .maxtype        = IFLA_IPOIB_MAX,
+       .policy         = ipoib_policy,
+       .priv_size      = sizeof(struct ipoib_dev_priv),
+       .setup          = ipoib_setup,
+       .newlink        = ipoib_new_child_link,
+       .changelink     = ipoib_changelink,
+       .dellink        = ipoib_unregister_child_dev,
+       .get_size       = ipoib_get_size,
+       .fill_info      = ipoib_fill_info,
+};
+
+int __init ipoib_netlink_init(void)
+{
+       return rtnl_link_register(&ipoib_link_ops);
+}
+
+void __exit ipoib_netlink_fini(void)
+{
+       rtnl_link_unregister(&ipoib_link_ops);
+}
+
+MODULE_ALIAS_RTNL_LINK("ipoib");
index d7e9740c724804afcdf20ba9c54fbe4fe91fdb87..8292554bccb5de2387d7e858eaaa1d27ddb6b8b4 100644 (file)
@@ -49,47 +49,11 @@ static ssize_t show_parent(struct device *d, struct device_attribute *attr,
 }
 static DEVICE_ATTR(parent, S_IRUGO, show_parent, NULL);
 
-int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
+int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
+                    u16 pkey, int type)
 {
-       struct ipoib_dev_priv *ppriv, *priv;
-       char intf_name[IFNAMSIZ];
        int result;
 
-       if (!capable(CAP_NET_ADMIN))
-               return -EPERM;
-
-       ppriv = netdev_priv(pdev);
-
-       if (!rtnl_trylock())
-               return restart_syscall();
-       mutex_lock(&ppriv->vlan_mutex);
-
-       /*
-        * First ensure this isn't a duplicate. We check the parent device and
-        * then all of the child interfaces to make sure the Pkey doesn't match.
-        */
-       if (ppriv->pkey == pkey) {
-               result = -ENOTUNIQ;
-               priv = NULL;
-               goto err;
-       }
-
-       list_for_each_entry(priv, &ppriv->child_intfs, list) {
-               if (priv->pkey == pkey) {
-                       result = -ENOTUNIQ;
-                       priv = NULL;
-                       goto err;
-               }
-       }
-
-       snprintf(intf_name, sizeof intf_name, "%s.%04x",
-                ppriv->dev->name, pkey);
-       priv = ipoib_intf_alloc(intf_name);
-       if (!priv) {
-               result = -ENOMEM;
-               goto err;
-       }
-
        priv->max_ib_mtu = ppriv->max_ib_mtu;
        /* MTU will be reset when mcast join happens */
        priv->dev->mtu   = IPOIB_UD_MTU(priv->max_ib_mtu);
@@ -124,24 +88,27 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
 
        ipoib_create_debug_files(priv->dev);
 
-       if (ipoib_cm_add_mode_attr(priv->dev))
-               goto sysfs_failed;
-       if (ipoib_add_pkey_attr(priv->dev))
-               goto sysfs_failed;
-       if (ipoib_add_umcast_attr(priv->dev))
-               goto sysfs_failed;
-
-       if (device_create_file(&priv->dev->dev, &dev_attr_parent))
-               goto sysfs_failed;
+       /* RTNL childs don't need proprietary sysfs entries */
+       if (type == IPOIB_LEGACY_CHILD) {
+               if (ipoib_cm_add_mode_attr(priv->dev))
+                       goto sysfs_failed;
+               if (ipoib_add_pkey_attr(priv->dev))
+                       goto sysfs_failed;
+               if (ipoib_add_umcast_attr(priv->dev))
+                       goto sysfs_failed;
+
+               if (device_create_file(&priv->dev->dev, &dev_attr_parent))
+                       goto sysfs_failed;
+       }
 
+       priv->child_type  = type;
+       priv->dev->iflink = ppriv->dev->ifindex;
        list_add_tail(&priv->list, &ppriv->child_intfs);
 
-       mutex_unlock(&ppriv->vlan_mutex);
-       rtnl_unlock();
-
        return 0;
 
 sysfs_failed:
+       result = -ENOMEM;
        ipoib_delete_debug_files(priv->dev);
        unregister_netdevice(priv->dev);
 
@@ -149,11 +116,60 @@ register_failed:
        ipoib_dev_cleanup(priv->dev);
 
 err:
+       return result;
+}
+
+int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
+{
+       struct ipoib_dev_priv *ppriv, *priv;
+       char intf_name[IFNAMSIZ];
+       struct ipoib_dev_priv *tpriv;
+       int result;
+
+       if (!capable(CAP_NET_ADMIN))
+               return -EPERM;
+
+       ppriv = netdev_priv(pdev);
+
+       snprintf(intf_name, sizeof intf_name, "%s.%04x",
+                ppriv->dev->name, pkey);
+       priv = ipoib_intf_alloc(intf_name);
+       if (!priv)
+               return -ENOMEM;
+
+       if (!rtnl_trylock())
+               return restart_syscall();
+
+       mutex_lock(&ppriv->vlan_mutex);
+
+       /*
+        * First ensure this isn't a duplicate. We check the parent device and
+        * then all of the legacy child interfaces to make sure the Pkey
+        * doesn't match.
+        */
+       if (ppriv->pkey == pkey) {
+               result = -ENOTUNIQ;
+               goto out;
+       }
+
+       list_for_each_entry(tpriv, &ppriv->child_intfs, list) {
+               if (tpriv->pkey == pkey &&
+                   tpriv->child_type == IPOIB_LEGACY_CHILD) {
+                       result = -ENOTUNIQ;
+                       goto out;
+               }
+       }
+
+       result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD);
+
+out:
        mutex_unlock(&ppriv->vlan_mutex);
-       rtnl_unlock();
-       if (priv)
+
+       if (result)
                free_netdev(priv->dev);
 
+       rtnl_unlock();
+
        return result;
 }
 
@@ -171,9 +187,9 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
                return restart_syscall();
        mutex_lock(&ppriv->vlan_mutex);
        list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
-               if (priv->pkey == pkey) {
+               if (priv->pkey == pkey &&
+                   priv->child_type == IPOIB_LEGACY_CHILD) {
                        unregister_netdevice(priv->dev);
-                       ipoib_dev_cleanup(priv->dev);
                        list_del(&priv->list);
                        dev = priv->dev;
                        break;
index aa41485bc594beb300bf5ee96f07cfcd331bb83e..30a6b174fbb08ea6f55aa5baada5b23f0ec01ac4 100644 (file)
@@ -1123,7 +1123,6 @@ struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors,
        return drv;
 
 error:
-       kfree(drv->cs);
        kfree(drv);
        return NULL;
 }
index 0c2bd806950e16313605ee62151e25e2c4a3730b..6a70184c3f237d832a265afbef7779e659686df2 100644 (file)
@@ -107,8 +107,6 @@ config MII
          or internal device.  It is safe to say Y or M here even if your
          ethernet card lacks MII.
 
-source "drivers/ieee802154/Kconfig"
-
 config IFB
        tristate "Intermediate Functional Block support"
        depends on NET_CLS_ACT
@@ -151,6 +149,19 @@ config MACVTAP
          To compile this driver as a module, choose M here: the module
          will be called macvtap.
 
+config VXLAN
+       tristate "Virtual eXtensible Local Area Network (VXLAN)"
+       depends on EXPERIMENTAL && INET
+       ---help---
+         This allows one to create vxlan virtual interfaces that provide
+         Layer 2 Networks over Layer 3 Networks. VXLAN is often used
+         to tunnel virtual network infrastructure in virtualized environments.
+         For more information see:
+           http://tools.ietf.org/html/draft-mahalingam-dutt-dcops-vxlan-02
+
+         To compile this driver as a module, choose M here: the module
+         will be called vxlan.
+
 config NETCONSOLE
        tristate "Network console logging support"
        ---help---
@@ -290,6 +301,8 @@ source "drivers/net/wimax/Kconfig"
 
 source "drivers/net/wan/Kconfig"
 
+source "drivers/net/ieee802154/Kconfig"
+
 config XEN_NETDEV_FRONTEND
        tristate "Xen network device frontend driver"
        depends on XEN
index 3d375ca128a6d1a657782a467db56e9116587f74..335db78fd987a289b4db696c60a89f682be5c5ed 100644 (file)
@@ -21,6 +21,7 @@ obj-$(CONFIG_NET_TEAM) += team/
 obj-$(CONFIG_TUN) += tun.o
 obj-$(CONFIG_VETH) += veth.o
 obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
+obj-$(CONFIG_VXLAN) += vxlan.o
 
 #
 # Networking Drivers
@@ -53,6 +54,7 @@ obj-$(CONFIG_SUNGEM_PHY) += sungem_phy.o
 obj-$(CONFIG_WAN) += wan/
 obj-$(CONFIG_WLAN) += wireless/
 obj-$(CONFIG_WIMAX) += wimax/
+obj-$(CONFIG_IEEE802154) += ieee802154/
 
 obj-$(CONFIG_VMXNET3) += vmxnet3/
 obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
index d688a8af432c52a8ea3dc5e45607c2801e9fb831..7858c58df4a3a12c47de7c1a015b613781352c19 100644 (file)
@@ -1120,10 +1120,10 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
                        write_unlock_bh(&bond->curr_slave_lock);
                        read_unlock(&bond->lock);
 
-                       netdev_bonding_change(bond->dev, NETDEV_BONDING_FAILOVER);
+                       call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
                        if (should_notify_peers)
-                               netdev_bonding_change(bond->dev,
-                                                     NETDEV_NOTIFY_PEERS);
+                               call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
+                                                        bond->dev);
 
                        read_lock(&bond->lock);
                        write_lock_bh(&bond->curr_slave_lock);
@@ -1558,8 +1558,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                                 bond_dev->name,
                                 bond_dev->type, slave_dev->type);
 
-                       res = netdev_bonding_change(bond_dev,
-                                                   NETDEV_PRE_TYPE_CHANGE);
+                       res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
+                                                      bond_dev);
                        res = notifier_to_errno(res);
                        if (res) {
                                pr_err("%s: refused to change device type\n",
@@ -1579,8 +1579,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                                bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
                        }
 
-                       netdev_bonding_change(bond_dev,
-                                             NETDEV_POST_TYPE_CHANGE);
+                       call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
+                                                bond_dev);
                }
        } else if (bond_dev->type != slave_dev->type) {
                pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n",
@@ -1941,7 +1941,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
        }
 
        block_netpoll_tx();
-       netdev_bonding_change(bond_dev, NETDEV_RELEASE);
+       call_netdevice_notifiers(NETDEV_RELEASE, bond_dev);
        write_lock_bh(&bond->lock);
 
        slave = bond_get_slave_by_dev(bond, slave_dev);
@@ -2584,7 +2584,7 @@ re_arm:
                        read_unlock(&bond->lock);
                        return;
                }
-               netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS);
+               call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
                rtnl_unlock();
        }
 }
@@ -2811,12 +2811,13 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
                                            arp_work.work);
        struct slave *slave, *oldcurrent;
        int do_failover = 0;
-       int delta_in_ticks;
+       int delta_in_ticks, extra_ticks;
        int i;
 
        read_lock(&bond->lock);
 
        delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
+       extra_ticks = delta_in_ticks / 2;
 
        if (bond->slave_cnt == 0)
                goto re_arm;
@@ -2839,10 +2840,10 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
                if (slave->link != BOND_LINK_UP) {
                        if (time_in_range(jiffies,
                                trans_start - delta_in_ticks,
-                               trans_start + delta_in_ticks) &&
+                               trans_start + delta_in_ticks + extra_ticks) &&
                            time_in_range(jiffies,
                                slave->dev->last_rx - delta_in_ticks,
-                               slave->dev->last_rx + delta_in_ticks)) {
+                               slave->dev->last_rx + delta_in_ticks + extra_ticks)) {
 
                                slave->link  = BOND_LINK_UP;
                                bond_set_active_slave(slave);
@@ -2872,10 +2873,10 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
                         */
                        if (!time_in_range(jiffies,
                                trans_start - delta_in_ticks,
-                               trans_start + 2 * delta_in_ticks) ||
+                               trans_start + 2 * delta_in_ticks + extra_ticks) ||
                            !time_in_range(jiffies,
                                slave->dev->last_rx - delta_in_ticks,
-                               slave->dev->last_rx + 2 * delta_in_ticks)) {
+                               slave->dev->last_rx + 2 * delta_in_ticks + extra_ticks)) {
 
                                slave->link  = BOND_LINK_DOWN;
                                bond_set_backup_slave(slave);
@@ -2933,6 +2934,14 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
        struct slave *slave;
        int i, commit = 0;
        unsigned long trans_start;
+       int extra_ticks;
+
+       /* All the time comparisons below need some extra time. Otherwise, on
+        * fast networks the ARP probe/reply may arrive within the same jiffy
+        * as it was sent.  Then, the next time the ARP monitor is run, one
+        * arp_interval will already have passed in the comparisons.
+        */
+       extra_ticks = delta_in_ticks / 2;
 
        bond_for_each_slave(bond, slave, i) {
                slave->new_link = BOND_LINK_NOCHANGE;
@@ -2940,7 +2949,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
                if (slave->link != BOND_LINK_UP) {
                        if (time_in_range(jiffies,
                                slave_last_rx(bond, slave) - delta_in_ticks,
-                               slave_last_rx(bond, slave) + delta_in_ticks)) {
+                               slave_last_rx(bond, slave) + delta_in_ticks + extra_ticks)) {
 
                                slave->new_link = BOND_LINK_UP;
                                commit++;
@@ -2956,7 +2965,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
                 */
                if (time_in_range(jiffies,
                                  slave->jiffies - delta_in_ticks,
-                                 slave->jiffies + 2 * delta_in_ticks))
+                                 slave->jiffies + 2 * delta_in_ticks + extra_ticks))
                        continue;
 
                /*
@@ -2976,7 +2985,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
                    !bond->current_arp_slave &&
                    !time_in_range(jiffies,
                        slave_last_rx(bond, slave) - delta_in_ticks,
-                       slave_last_rx(bond, slave) + 3 * delta_in_ticks)) {
+                       slave_last_rx(bond, slave) + 3 * delta_in_ticks + extra_ticks)) {
 
                        slave->new_link = BOND_LINK_DOWN;
                        commit++;
@@ -2992,10 +3001,10 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
                if (bond_is_active_slave(slave) &&
                    (!time_in_range(jiffies,
                        trans_start - delta_in_ticks,
-                       trans_start + 2 * delta_in_ticks) ||
+                       trans_start + 2 * delta_in_ticks + extra_ticks) ||
                     !time_in_range(jiffies,
                        slave_last_rx(bond, slave) - delta_in_ticks,
-                       slave_last_rx(bond, slave) + 2 * delta_in_ticks))) {
+                       slave_last_rx(bond, slave) + 2 * delta_in_ticks + extra_ticks))) {
 
                        slave->new_link = BOND_LINK_DOWN;
                        commit++;
@@ -3027,7 +3036,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
                        if ((!bond->curr_active_slave &&
                             time_in_range(jiffies,
                                           trans_start - delta_in_ticks,
-                                          trans_start + delta_in_ticks)) ||
+                                          trans_start + delta_in_ticks + delta_in_ticks / 2)) ||
                            bond->curr_active_slave != slave) {
                                slave->link = BOND_LINK_UP;
                                if (bond->current_arp_slave) {
@@ -3203,7 +3212,7 @@ re_arm:
                        read_unlock(&bond->lock);
                        return;
                }
-               netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS);
+               call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
                rtnl_unlock();
        }
 }
@@ -3351,57 +3360,94 @@ static struct notifier_block bond_netdev_notifier = {
 
 /*---------------------------- Hashing Policies -----------------------------*/
 
+/*
+ * Hash for the output device based upon layer 2 data
+ */
+static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
+{
+       struct ethhdr *data = (struct ethhdr *)skb->data;
+
+       if (skb_headlen(skb) >= offsetof(struct ethhdr, h_proto))
+               return (data->h_dest[5] ^ data->h_source[5]) % count;
+
+       return 0;
+}
+
 /*
  * Hash for the output device based upon layer 2 and layer 3 data. If
- * the packet is not IP mimic bond_xmit_hash_policy_l2()
+ * the packet is not IP, fall back on bond_xmit_hash_policy_l2()
  */
 static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
 {
        struct ethhdr *data = (struct ethhdr *)skb->data;
-       struct iphdr *iph = ip_hdr(skb);
-
-       if (skb->protocol == htons(ETH_P_IP)) {
+       struct iphdr *iph;
+       struct ipv6hdr *ipv6h;
+       u32 v6hash;
+       __be32 *s, *d;
+
+       if (skb->protocol == htons(ETH_P_IP) &&
+           skb_network_header_len(skb) >= sizeof(*iph)) {
+               iph = ip_hdr(skb);
                return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
                        (data->h_dest[5] ^ data->h_source[5])) % count;
+       } else if (skb->protocol == htons(ETH_P_IPV6) &&
+                  skb_network_header_len(skb) >= sizeof(*ipv6h)) {
+               ipv6h = ipv6_hdr(skb);
+               s = &ipv6h->saddr.s6_addr32[0];
+               d = &ipv6h->daddr.s6_addr32[0];
+               v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
+               v6hash ^= (v6hash >> 24) ^ (v6hash >> 16) ^ (v6hash >> 8);
+               return (v6hash ^ data->h_dest[5] ^ data->h_source[5]) % count;
        }
 
-       return (data->h_dest[5] ^ data->h_source[5]) % count;
+       return bond_xmit_hash_policy_l2(skb, count);
 }
 
 /*
  * Hash for the output device based upon layer 3 and layer 4 data. If
  * the packet is a frag or not TCP or UDP, just use layer 3 data.  If it is
- * altogether not IP, mimic bond_xmit_hash_policy_l2()
+ * altogether not IP, fall back on bond_xmit_hash_policy_l2()
  */
 static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
 {
-       struct ethhdr *data = (struct ethhdr *)skb->data;
-       struct iphdr *iph = ip_hdr(skb);
-       __be16 *layer4hdr = (__be16 *)((u32 *)iph + iph->ihl);
-       int layer4_xor = 0;
-
-       if (skb->protocol == htons(ETH_P_IP)) {
+       u32 layer4_xor = 0;
+       struct iphdr *iph;
+       struct ipv6hdr *ipv6h;
+       __be32 *s, *d;
+       __be16 *layer4hdr;
+
+       if (skb->protocol == htons(ETH_P_IP) &&
+           skb_network_header_len(skb) >= sizeof(*iph)) {
+               iph = ip_hdr(skb);
                if (!ip_is_fragment(iph) &&
                    (iph->protocol == IPPROTO_TCP ||
-                    iph->protocol == IPPROTO_UDP)) {
-                       layer4_xor = ntohs((*layer4hdr ^ *(layer4hdr + 1)));
+                    iph->protocol == IPPROTO_UDP) &&
+                   (skb_headlen(skb) - skb_network_offset(skb) >=
+                    iph->ihl * sizeof(u32) + sizeof(*layer4hdr) * 2)) {
+                       layer4hdr = (__be16 *)((u32 *)iph + iph->ihl);
+                       layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1));
                }
                return (layer4_xor ^
                        ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count;
-
+       } else if (skb->protocol == htons(ETH_P_IPV6) &&
+                  skb_network_header_len(skb) >= sizeof(*ipv6h)) {
+               ipv6h = ipv6_hdr(skb);
+               if ((ipv6h->nexthdr == IPPROTO_TCP ||
+                    ipv6h->nexthdr == IPPROTO_UDP) &&
+                   (skb_headlen(skb) - skb_network_offset(skb) >=
+                    sizeof(*ipv6h) + sizeof(*layer4hdr) * 2)) {
+                       layer4hdr = (__be16 *)(ipv6h + 1);
+                       layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1));
+               }
+               s = &ipv6h->saddr.s6_addr32[0];
+               d = &ipv6h->daddr.s6_addr32[0];
+               layer4_xor ^= (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
+               layer4_xor ^= (layer4_xor >> 24) ^ (layer4_xor >> 16) ^
+                              (layer4_xor >> 8);
+               return layer4_xor % count;
        }
 
-       return (data->h_dest[5] ^ data->h_source[5]) % count;
-}
-
-/*
- * Hash for the output device based upon layer 2 data
- */
-static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
-{
-       struct ethhdr *data = (struct ethhdr *)skb->data;
-
-       return (data->h_dest[5] ^ data->h_source[5]) % count;
+       return bond_xmit_hash_policy_l2(skb, count);
 }
 
 /*-------------------------- Device entry points ----------------------------*/
index 4c538e3886553f678729ebe2aa42affd021cc755..e5180dfddba54dc6918b8dbb3e2c8643316e130b 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/if_ether.h>
 #include <linux/list.h>
 #include <linux/io.h>
+#include <linux/pm_runtime.h>
 
 #include <linux/can.h>
 #include <linux/can/dev.h>
@@ -45,6 +46,9 @@
 #define IF_ENUM_REG_LEN                11
 #define C_CAN_IFACE(reg, iface)        (C_CAN_IF1_##reg + (iface) * IF_ENUM_REG_LEN)
 
+/* control extension register D_CAN specific */
+#define CONTROL_EX_PDR         BIT(8)
+
 /* control register */
 #define CONTROL_TEST           BIT(7)
 #define CONTROL_CCE            BIT(6)
@@ -64,6 +68,7 @@
 #define TEST_BASIC             BIT(2)
 
 /* status register */
+#define STATUS_PDA             BIT(10)
 #define STATUS_BOFF            BIT(7)
 #define STATUS_EWARN           BIT(6)
 #define STATUS_EPASS           BIT(5)
 /* minimum timeout for checking BUSY status */
 #define MIN_TIMEOUT_VALUE      6
 
+/* Wait for ~1 sec for INIT bit */
+#define INIT_WAIT_MS           1000
+
 /* napi related */
 #define C_CAN_NAPI_WEIGHT      C_CAN_MSG_OBJ_RX_NUM
 
@@ -201,6 +209,30 @@ static const struct can_bittiming_const c_can_bittiming_const = {
        .brp_inc = 1,
 };
 
+static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv)
+{
+       if (priv->device)
+               pm_runtime_enable(priv->device);
+}
+
+static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv)
+{
+       if (priv->device)
+               pm_runtime_disable(priv->device);
+}
+
+static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv)
+{
+       if (priv->device)
+               pm_runtime_get_sync(priv->device);
+}
+
+static inline void c_can_pm_runtime_put_sync(const struct c_can_priv *priv)
+{
+       if (priv->device)
+               pm_runtime_put_sync(priv->device);
+}
+
 static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
 {
        return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
@@ -673,11 +705,15 @@ static int c_can_get_berr_counter(const struct net_device *dev,
        unsigned int reg_err_counter;
        struct c_can_priv *priv = netdev_priv(dev);
 
+       c_can_pm_runtime_get_sync(priv);
+
        reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
        bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
                                ERR_CNT_REC_SHIFT;
        bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
 
+       c_can_pm_runtime_put_sync(priv);
+
        return 0;
 }
 
@@ -1053,11 +1089,13 @@ static int c_can_open(struct net_device *dev)
        int err;
        struct c_can_priv *priv = netdev_priv(dev);
 
+       c_can_pm_runtime_get_sync(priv);
+
        /* open the can device */
        err = open_candev(dev);
        if (err) {
                netdev_err(dev, "failed to open can device\n");
-               return err;
+               goto exit_open_fail;
        }
 
        /* register interrupt handler */
@@ -1079,6 +1117,8 @@ static int c_can_open(struct net_device *dev)
 
 exit_irq_fail:
        close_candev(dev);
+exit_open_fail:
+       c_can_pm_runtime_put_sync(priv);
        return err;
 }
 
@@ -1091,6 +1131,7 @@ static int c_can_close(struct net_device *dev)
        c_can_stop(dev);
        free_irq(dev->irq, dev);
        close_candev(dev);
+       c_can_pm_runtime_put_sync(priv);
 
        return 0;
 }
@@ -1119,6 +1160,77 @@ struct net_device *alloc_c_can_dev(void)
 }
 EXPORT_SYMBOL_GPL(alloc_c_can_dev);
 
+#ifdef CONFIG_PM
+int c_can_power_down(struct net_device *dev)
+{
+       u32 val;
+       unsigned long time_out;
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       if (!(dev->flags & IFF_UP))
+               return 0;
+
+       WARN_ON(priv->type != BOSCH_D_CAN);
+
+       /* set PDR value so the device goes to power down mode */
+       val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
+       val |= CONTROL_EX_PDR;
+       priv->write_reg(priv, C_CAN_CTRL_EX_REG, val);
+
+       /* Wait for the PDA bit to get set */
+       time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS);
+       while (!(priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) &&
+                               time_after(time_out, jiffies))
+               cpu_relax();
+
+       if (time_after(jiffies, time_out))
+               return -ETIMEDOUT;
+
+       c_can_stop(dev);
+
+       c_can_pm_runtime_put_sync(priv);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(c_can_power_down);
+
+int c_can_power_up(struct net_device *dev)
+{
+       u32 val;
+       unsigned long time_out;
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       if (!(dev->flags & IFF_UP))
+               return 0;
+
+       WARN_ON(priv->type != BOSCH_D_CAN);
+
+       c_can_pm_runtime_get_sync(priv);
+
+       /* Clear PDR and INIT bits */
+       val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
+       val &= ~CONTROL_EX_PDR;
+       priv->write_reg(priv, C_CAN_CTRL_EX_REG, val);
+       val = priv->read_reg(priv, C_CAN_CTRL_REG);
+       val &= ~CONTROL_INIT;
+       priv->write_reg(priv, C_CAN_CTRL_REG, val);
+
+       /* Wait for the PDA bit to get clear */
+       time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS);
+       while ((priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) &&
+                               time_after(time_out, jiffies))
+               cpu_relax();
+
+       if (time_after(jiffies, time_out))
+               return -ETIMEDOUT;
+
+       c_can_start(dev);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(c_can_power_up);
+#endif
+
 void free_c_can_dev(struct net_device *dev)
 {
        free_candev(dev);
@@ -1133,10 +1245,19 @@ static const struct net_device_ops c_can_netdev_ops = {
 
 int register_c_can_dev(struct net_device *dev)
 {
+       struct c_can_priv *priv = netdev_priv(dev);
+       int err;
+
+       c_can_pm_runtime_enable(priv);
+
        dev->flags |= IFF_ECHO; /* we support local echo */
        dev->netdev_ops = &c_can_netdev_ops;
 
-       return register_candev(dev);
+       err = register_candev(dev);
+       if (err)
+               c_can_pm_runtime_disable(priv);
+
+       return err;
 }
 EXPORT_SYMBOL_GPL(register_c_can_dev);
 
@@ -1144,10 +1265,9 @@ void unregister_c_can_dev(struct net_device *dev)
 {
        struct c_can_priv *priv = netdev_priv(dev);
 
-       /* disable all interrupts */
-       c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
-
        unregister_candev(dev);
+
+       c_can_pm_runtime_disable(priv);
 }
 EXPORT_SYMBOL_GPL(unregister_c_can_dev);
 
index 01a7049ab990eacd173c76edecede0b912e14382..e5ed41dafa1b94aa234749064813c587aa79c468 100644 (file)
@@ -24,6 +24,7 @@
 
 enum reg {
        C_CAN_CTRL_REG = 0,
+       C_CAN_CTRL_EX_REG,
        C_CAN_STS_REG,
        C_CAN_ERR_CNT_REG,
        C_CAN_BTR_REG,
@@ -104,6 +105,7 @@ static const u16 reg_map_c_can[] = {
 
 static const u16 reg_map_d_can[] = {
        [C_CAN_CTRL_REG]        = 0x00,
+       [C_CAN_CTRL_EX_REG]     = 0x02,
        [C_CAN_STS_REG]         = 0x04,
        [C_CAN_ERR_CNT_REG]     = 0x08,
        [C_CAN_BTR_REG]         = 0x0C,
@@ -143,8 +145,9 @@ static const u16 reg_map_d_can[] = {
 };
 
 enum c_can_dev_id {
-       C_CAN_DEVTYPE,
-       D_CAN_DEVTYPE,
+       BOSCH_C_CAN_PLATFORM,
+       BOSCH_C_CAN,
+       BOSCH_D_CAN,
 };
 
 /* c_can private data structure */
@@ -152,6 +155,7 @@ struct c_can_priv {
        struct can_priv can;    /* must be the first member */
        struct napi_struct napi;
        struct net_device *dev;
+       struct device *device;
        int tx_object;
        int current_status;
        int last_status;
@@ -164,6 +168,7 @@ struct c_can_priv {
        unsigned int tx_echo;
        void *priv;             /* for board-specific data */
        u16 irqstatus;
+       enum c_can_dev_id type;
 };
 
 struct net_device *alloc_c_can_dev(void);
@@ -171,4 +176,9 @@ void free_c_can_dev(struct net_device *dev);
 int register_c_can_dev(struct net_device *dev);
 void unregister_c_can_dev(struct net_device *dev);
 
+#ifdef CONFIG_PM
+int c_can_power_up(struct net_device *dev);
+int c_can_power_down(struct net_device *dev);
+#endif
+
 #endif /* C_CAN_H */
index 1011146ea51319accbdcf6658d9c22de723e3be7..3d7830bcd2bf83fa55346754f74c2205eae4b3f7 100644 (file)
@@ -120,10 +120,10 @@ static int __devinit c_can_pci_probe(struct pci_dev *pdev,
 
        /* Configure CAN type */
        switch (c_can_pci_data->type) {
-       case C_CAN_DEVTYPE:
+       case BOSCH_C_CAN:
                priv->regs = reg_map_c_can;
                break;
-       case D_CAN_DEVTYPE:
+       case BOSCH_D_CAN:
                priv->regs = reg_map_d_can;
                priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
                break;
@@ -192,7 +192,7 @@ static void __devexit c_can_pci_remove(struct pci_dev *pdev)
 }
 
 static struct c_can_pci_data c_can_sta2x11= {
-       .type = C_CAN_DEVTYPE,
+       .type = BOSCH_C_CAN,
        .reg_align = C_CAN_REG_ALIGN_32,
        .freq = 52000000, /* 52 Mhz */
 };
index 6ff7ad006c300b5a9c499e6bf3465e4cb8b7f2bc..ee1416132aba2e1f9582b7911f67e94eb21b3084 100644 (file)
@@ -30,6 +30,9 @@
 #include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
 
 #include <linux/can/dev.h>
 
@@ -65,17 +68,58 @@ static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
        writew(val, priv->base + 2 * priv->regs[index]);
 }
 
+static struct platform_device_id c_can_id_table[] = {
+       [BOSCH_C_CAN_PLATFORM] = {
+               .name = KBUILD_MODNAME,
+               .driver_data = BOSCH_C_CAN,
+       },
+       [BOSCH_C_CAN] = {
+               .name = "c_can",
+               .driver_data = BOSCH_C_CAN,
+       },
+       [BOSCH_D_CAN] = {
+               .name = "d_can",
+               .driver_data = BOSCH_D_CAN,
+       }, {
+       }
+};
+
+static const struct of_device_id c_can_of_table[] = {
+       { .compatible = "bosch,c_can", .data = &c_can_id_table[BOSCH_C_CAN] },
+       { .compatible = "bosch,d_can", .data = &c_can_id_table[BOSCH_D_CAN] },
+       { /* sentinel */ },
+};
+
 static int __devinit c_can_plat_probe(struct platform_device *pdev)
 {
        int ret;
        void __iomem *addr;
        struct net_device *dev;
        struct c_can_priv *priv;
+       const struct of_device_id *match;
        const struct platform_device_id *id;
+       struct pinctrl *pinctrl;
        struct resource *mem;
        int irq;
        struct clk *clk;
 
+       if (pdev->dev.of_node) {
+               match = of_match_device(c_can_of_table, &pdev->dev);
+               if (!match) {
+                       dev_err(&pdev->dev, "Failed to find matching dt id\n");
+                       ret = -EINVAL;
+                       goto exit;
+               }
+               id = match->data;
+       } else {
+               id = platform_get_device_id(pdev);
+       }
+
+       pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+       if (IS_ERR(pinctrl))
+               dev_warn(&pdev->dev,
+                       "failed to configure pins from driver\n");
+
        /* get the appropriate clk */
        clk = clk_get(&pdev->dev, NULL);
        if (IS_ERR(clk)) {
@@ -114,9 +158,8 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
        }
 
        priv = netdev_priv(dev);
-       id = platform_get_device_id(pdev);
        switch (id->driver_data) {
-       case C_CAN_DEVTYPE:
+       case BOSCH_C_CAN:
                priv->regs = reg_map_c_can;
                switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
                case IORESOURCE_MEM_32BIT:
@@ -130,7 +173,7 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
                        break;
                }
                break;
-       case D_CAN_DEVTYPE:
+       case BOSCH_D_CAN:
                priv->regs = reg_map_d_can;
                priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
                priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
@@ -143,8 +186,10 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
 
        dev->irq = irq;
        priv->base = addr;
+       priv->device = &pdev->dev;
        priv->can.clock.freq = clk_get_rate(clk);
        priv->priv = clk;
+       priv->type = id->driver_data;
 
        platform_set_drvdata(pdev, dev);
        SET_NETDEV_DEV(dev, &pdev->dev);
@@ -195,27 +240,75 @@ static int __devexit c_can_plat_remove(struct platform_device *pdev)
        return 0;
 }
 
-static const struct platform_device_id c_can_id_table[] = {
-       {
-               .name = KBUILD_MODNAME,
-               .driver_data = C_CAN_DEVTYPE,
-       }, {
-               .name = "c_can",
-               .driver_data = C_CAN_DEVTYPE,
-       }, {
-               .name = "d_can",
-               .driver_data = D_CAN_DEVTYPE,
-       }, {
+#ifdef CONFIG_PM
+static int c_can_suspend(struct platform_device *pdev, pm_message_t state)
+{
+       int ret;
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct c_can_priv *priv = netdev_priv(ndev);
+
+       if (priv->type != BOSCH_D_CAN) {
+               dev_warn(&pdev->dev, "Not supported\n");
+               return 0;
        }
-};
+
+       if (netif_running(ndev)) {
+               netif_stop_queue(ndev);
+               netif_device_detach(ndev);
+       }
+
+       ret = c_can_power_down(ndev);
+       if (ret) {
+               netdev_err(ndev, "failed to enter power down mode\n");
+               return ret;
+       }
+
+       priv->can.state = CAN_STATE_SLEEPING;
+
+       return 0;
+}
+
+static int c_can_resume(struct platform_device *pdev)
+{
+       int ret;
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct c_can_priv *priv = netdev_priv(ndev);
+
+       if (priv->type != BOSCH_D_CAN) {
+               dev_warn(&pdev->dev, "Not supported\n");
+               return 0;
+       }
+
+       ret = c_can_power_up(ndev);
+       if (ret) {
+               netdev_err(ndev, "Still in power down mode\n");
+               return ret;
+       }
+
+       priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+       if (netif_running(ndev)) {
+               netif_device_attach(ndev);
+               netif_start_queue(ndev);
+       }
+
+       return 0;
+}
+#else
+#define c_can_suspend NULL
+#define c_can_resume NULL
+#endif
 
 static struct platform_driver c_can_plat_driver = {
        .driver = {
                .name = KBUILD_MODNAME,
                .owner = THIS_MODULE,
+               .of_match_table = of_match_ptr(c_can_of_table),
        },
        .probe = c_can_plat_probe,
        .remove = __devexit_p(c_can_plat_remove),
+       .suspend = c_can_suspend,
+       .resume = c_can_resume,
        .id_table = c_can_id_table,
 };
 
index c5f143165f80cec199b870a17227a4bd42c3a610..c78ecfca1e4582b8e57a2a42a2e14152f3640483 100644 (file)
 
 #define FLEXCAN_MB_CODE_MASK           (0xf0ffffff)
 
+/* FLEXCAN hardware feature flags */
+#define FLEXCAN_HAS_V10_FEATURES       BIT(1) /* For core version >= 10 */
+#define FLEXCAN_HAS_BROKEN_ERR_STATE   BIT(2) /* Broken error state handling */
+
 /* Structure of the message buffer */
 struct flexcan_mb {
        u32 can_ctrl;
@@ -178,7 +182,7 @@ struct flexcan_regs {
 };
 
 struct flexcan_devtype_data {
-       u32 hw_ver;     /* hardware controller version */
+       u32 features;   /* hardware controller features */
 };
 
 struct flexcan_priv {
@@ -197,11 +201,11 @@ struct flexcan_priv {
 };
 
 static struct flexcan_devtype_data fsl_p1010_devtype_data = {
-       .hw_ver = 3,
+       .features = FLEXCAN_HAS_BROKEN_ERR_STATE,
 };
-
+static struct flexcan_devtype_data fsl_imx28_devtype_data;
 static struct flexcan_devtype_data fsl_imx6q_devtype_data = {
-       .hw_ver = 10,
+       .features = FLEXCAN_HAS_V10_FEATURES | FLEXCAN_HAS_BROKEN_ERR_STATE,
 };
 
 static const struct can_bittiming_const flexcan_bittiming_const = {
@@ -741,15 +745,19 @@ static int flexcan_chip_start(struct net_device *dev)
         * enable tx and rx warning interrupt
         * enable bus off interrupt
         * (== FLEXCAN_CTRL_ERR_STATE)
-        *
-        * _note_: we enable the "error interrupt"
-        * (FLEXCAN_CTRL_ERR_MSK), too. Otherwise we don't get any
-        * warning or bus passive interrupts.
         */
        reg_ctrl = flexcan_read(&regs->ctrl);
        reg_ctrl &= ~FLEXCAN_CTRL_TSYN;
        reg_ctrl |= FLEXCAN_CTRL_BOFF_REC | FLEXCAN_CTRL_LBUF |
-               FLEXCAN_CTRL_ERR_STATE | FLEXCAN_CTRL_ERR_MSK;
+               FLEXCAN_CTRL_ERR_STATE;
+       /*
+        * enable the "error interrupt" (FLEXCAN_CTRL_ERR_MSK),
+        * on most Flexcan cores, too. Otherwise we don't get
+        * any error warning or passive interrupts.
+        */
+       if (priv->devtype_data->features & FLEXCAN_HAS_BROKEN_ERR_STATE ||
+           priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+               reg_ctrl |= FLEXCAN_CTRL_ERR_MSK;
 
        /* save for later use */
        priv->reg_ctrl_default = reg_ctrl;
@@ -772,7 +780,7 @@ static int flexcan_chip_start(struct net_device *dev)
        flexcan_write(0x0, &regs->rx14mask);
        flexcan_write(0x0, &regs->rx15mask);
 
-       if (priv->devtype_data->hw_ver >= 10)
+       if (priv->devtype_data->features & FLEXCAN_HAS_V10_FEATURES)
                flexcan_write(0x0, &regs->rxfgmask);
 
        flexcan_transceiver_switch(priv, 1);
@@ -954,6 +962,7 @@ static void __devexit unregister_flexcandev(struct net_device *dev)
 
 static const struct of_device_id flexcan_of_match[] = {
        { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
+       { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
        { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
        { /* sentinel */ },
 };
index 8a8df82988d13d238e2b7b8448e6b691bc82300a..c975999bb05587e400e635ecd65c7d51c8dd2d24 100644 (file)
@@ -181,7 +181,7 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
 
                if (!clock_name || !strcmp(clock_name, "sys")) {
                        sys_clk = clk_get(&ofdev->dev, "sys_clk");
-                       if (!sys_clk) {
+                       if (IS_ERR(sys_clk)) {
                                dev_err(&ofdev->dev, "couldn't get sys_clk\n");
                                goto exit_unmap;
                        }
@@ -204,7 +204,7 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
 
                if (clocksrc < 0) {
                        ref_clk = clk_get(&ofdev->dev, "ref_clk");
-                       if (!ref_clk) {
+                       if (IS_ERR(ref_clk)) {
                                dev_err(&ofdev->dev, "couldn't get ref_clk\n");
                                goto exit_unmap;
                        }
index 4c4f33d482d2faa44d36bb1cfe669470032eb8b3..25011dbe1b96f15b951ba555477f4ba14be94b0f 100644 (file)
@@ -156,8 +156,13 @@ static void set_normal_mode(struct net_device *dev)
                }
 
                /* set chip to normal mode */
-               priv->write_reg(priv, REG_MOD, 0x00);
+               if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+                       priv->write_reg(priv, REG_MOD, MOD_LOM);
+               else
+                       priv->write_reg(priv, REG_MOD, 0x00);
+
                udelay(10);
+
                status = priv->read_reg(priv, REG_MOD);
        }
 
@@ -310,7 +315,10 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
 
        can_put_echo_skb(skb, dev, 0);
 
-       sja1000_write_cmdreg(priv, CMD_TR);
+       if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+               sja1000_write_cmdreg(priv, CMD_TR | CMD_AT);
+       else
+               sja1000_write_cmdreg(priv, CMD_TR);
 
        return NETDEV_TX_OK;
 }
@@ -505,10 +513,18 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
                        netdev_warn(dev, "wakeup interrupt\n");
 
                if (isrc & IRQ_TI) {
-                       /* transmission complete interrupt */
-                       stats->tx_bytes += priv->read_reg(priv, REG_FI) & 0xf;
-                       stats->tx_packets++;
-                       can_get_echo_skb(dev, 0);
+                       /* transmission buffer released */
+                       if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT &&
+                           !(status & SR_TCS)) {
+                               stats->tx_errors++;
+                               can_free_echo_skb(dev, 0);
+                       } else {
+                               /* transmission complete */
+                               stats->tx_bytes +=
+                                       priv->read_reg(priv, REG_FI) & 0xf;
+                               stats->tx_packets++;
+                               can_get_echo_skb(dev, 0);
+                       }
                        netif_wake_queue(dev);
                }
                if (isrc & IRQ_RI) {
@@ -605,7 +621,8 @@ struct net_device *alloc_sja1000dev(int sizeof_priv)
        priv->can.do_set_mode = sja1000_set_mode;
        priv->can.do_get_berr_counter = sja1000_get_berr_counter;
        priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
-               CAN_CTRLMODE_BERR_REPORTING;
+               CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_LISTENONLY |
+               CAN_CTRLMODE_ONE_SHOT;
 
        spin_lock_init(&priv->cmdreg_lock);
 
index d2f91f737871889e59f83b227abed4751f04c171..c4643c400d462bfc63ce7b4df6c76558e4e92e50 100644 (file)
@@ -53,7 +53,7 @@ static struct peak_usb_adapter *peak_usb_adapters_list[] = {
  * dump memory
  */
 #define DUMP_WIDTH     16
-void dump_mem(char *prompt, void *p, int l)
+void pcan_dump_mem(char *prompt, void *p, int l)
 {
        pr_info("%s dumping %s (%d bytes):\n",
                PCAN_USB_DRIVER_NAME, prompt ? prompt : "memory", l);
@@ -203,9 +203,9 @@ static void peak_usb_read_bulk_callback(struct urb *urb)
                if (dev->state & PCAN_USB_STATE_STARTED) {
                        err = dev->adapter->dev_decode_buf(dev, urb);
                        if (err)
-                               dump_mem("received usb message",
-                                       urb->transfer_buffer,
-                                       urb->transfer_buffer_length);
+                               pcan_dump_mem("received usb message",
+                                             urb->transfer_buffer,
+                                             urb->transfer_buffer_length);
                }
        }
 
index 4c775b620be287b9a08eac5ed27a8d390f1930ba..c8e5e91d7cb571f350eef5ff2b0e26cf317d6460 100644 (file)
@@ -131,7 +131,7 @@ struct peak_usb_device {
        struct peak_usb_device *next_siblings;
 };
 
-void dump_mem(char *prompt, void *p, int l);
+void pcan_dump_mem(char *prompt, void *p, int l);
 
 /* common timestamp management */
 void peak_usb_init_time_ref(struct peak_time_ref *time_ref,
index 629c4ba5d49d95792717f75b3a8919543a60b5d0..e1626d92511adc88d084345f7fdbf098a4aafb09 100644 (file)
@@ -292,8 +292,8 @@ static int pcan_usb_pro_wait_rsp(struct peak_usb_device *dev,
                        if (!rec_len) {
                                netdev_err(dev->netdev,
                                           "got unprocessed record in msg\n");
-                               dump_mem("rcvd rsp msg", pum->u.rec_buffer,
-                                        actual_length);
+                               pcan_dump_mem("rcvd rsp msg", pum->u.rec_buffer,
+                                             actual_length);
                                break;
                        }
 
@@ -756,8 +756,8 @@ static int pcan_usb_pro_decode_buf(struct peak_usb_device *dev, struct urb *urb)
 
 fail:
        if (err)
-               dump_mem("received msg",
-                        urb->transfer_buffer, urb->actual_length);
+               pcan_dump_mem("received msg",
+                             urb->transfer_buffer, urb->actual_length);
 
        return err;
 }
index a11af5cc484477283ccb624df88bf072fc2f8088..e4ff38949112d8f245df2e481c28ba0ac7ede17c 100644 (file)
@@ -89,15 +89,6 @@ source "drivers/net/ethernet/marvell/Kconfig"
 source "drivers/net/ethernet/mellanox/Kconfig"
 source "drivers/net/ethernet/micrel/Kconfig"
 source "drivers/net/ethernet/microchip/Kconfig"
-
-config MIPS_SIM_NET
-       tristate "MIPS simulator Network device"
-       depends on MIPS_SIM
-       ---help---
-         The MIPSNET device is a simple Ethernet network device which is
-         emulated by the MIPS Simulator.
-         If you are not using a MIPSsim or are unsure, say N.
-
 source "drivers/net/ethernet/myricom/Kconfig"
 
 config FEALNX
index 878ad32b93f21c8fd8191c152c721c409bd81d25..d4473072654abcf0da782651a76c8d036f1fca9a 100644 (file)
@@ -40,7 +40,6 @@ obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
 obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
 obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
 obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
-obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o
 obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
 obj-$(CONFIG_FEALNX) += fealnx.o
 obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
index f15e72e81ac4db2c1fa623c8f09a9820e59e9408..4bd416b72e65a9a5423930e9a52340bb087f2174 100644 (file)
@@ -101,6 +101,7 @@ config TIGON3
        tristate "Broadcom Tigon3 support"
        depends on PCI
        select PHYLIB
+       select HWMON
        ---help---
          This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
 
index eac25236856c85b174cf5c124dab5612d3bb68ab..72897c47b8c849c31cf387c6cd63f42d624be07c 100644 (file)
@@ -23,8 +23,8 @@
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
-#define DRV_MODULE_VERSION      "1.72.51-0"
-#define DRV_MODULE_RELDATE      "2012/06/18"
+#define DRV_MODULE_VERSION      "1.78.00-0"
+#define DRV_MODULE_RELDATE      "2012/09/27"
 #define BNX2X_BC_VER            0x040200
 
 #if defined(CONFIG_DCB)
index e8e97a7d1d06df9a209c741406af32c19a1313e2..30f04a389227bbfcb9cdaf29b563c005afbfa26a 100644 (file)
@@ -2285,7 +2285,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
        /* Wait for all pending SP commands to complete */
        if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
                BNX2X_ERR("Timeout waiting for SP elements to complete\n");
-               bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+               bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
                return -EBUSY;
        }
 
@@ -2333,7 +2333,7 @@ load_error0:
 }
 
 /* must be called with rtnl_lock */
-int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
+int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
 {
        int i;
        bool global = false;
@@ -2395,7 +2395,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
 
        /* Cleanup the chip if needed */
        if (unload_mode != UNLOAD_RECOVERY)
-               bnx2x_chip_cleanup(bp, unload_mode);
+               bnx2x_chip_cleanup(bp, unload_mode, keep_link);
        else {
                /* Send the UNLOAD_REQUEST to the MCP */
                bnx2x_send_unload_req(bp, unload_mode);
@@ -2419,7 +2419,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
                bnx2x_free_irq(bp);
 
                /* Report UNLOAD_DONE to MCP */
-               bnx2x_send_unload_done(bp);
+               bnx2x_send_unload_done(bp, false);
        }
 
        /*
@@ -3026,8 +3026,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
        first_bd = tx_start_bd;
 
        tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
-       SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
-                mac_type);
+       SET_FLAG(tx_start_bd->general_data,
+                ETH_TX_START_BD_PARSE_NBDS,
+                0);
 
        /* header nbd */
        SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
@@ -3077,13 +3078,20 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                              &pbd_e2->dst_mac_addr_lo,
                                              eth->h_dest);
                }
+
+               SET_FLAG(pbd_e2_parsing_data,
+                        ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
        } else {
+               u16 global_data = 0;
                pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
                memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
                /* Set PBD in checksum offload case */
                if (xmit_type & XMIT_CSUM)
                        hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
 
+               SET_FLAG(global_data,
+                        ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
+               pbd_e1x->global_data |= cpu_to_le16(global_data);
        }
 
        /* Setup the data pointer of the first BD of the packet */
@@ -3770,7 +3778,7 @@ int bnx2x_reload_if_running(struct net_device *dev)
        if (unlikely(!netif_running(dev)))
                return 0;
 
-       bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+       bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
        return bnx2x_nic_load(bp, LOAD_NORMAL);
 }
 
@@ -3967,7 +3975,7 @@ int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
 
        netif_device_detach(dev);
 
-       bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+       bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
 
        bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
 
index dfd86a55f1dcab583ad08342a21eaae9a97be53f..9c5ea6c5b4c7597059644c57d27f2943cb65b792 100644 (file)
@@ -83,8 +83,9 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode);
  * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
  *
  * @bp:                driver handle
+ * @keep_link:         true iff link should be kept up
  */
-void bnx2x_send_unload_done(struct bnx2x *bp);
+void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link);
 
 /**
  * bnx2x_config_rss_pf - configure RSS parameters in a PF.
@@ -152,6 +153,14 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
  */
 void bnx2x_link_set(struct bnx2x *bp);
 
+/**
+ * bnx2x_force_link_reset - Forces link reset, and put the PHY
+ * in reset as well.
+ *
+ * @bp:                driver handle
+ */
+void bnx2x_force_link_reset(struct bnx2x *bp);
+
 /**
  * bnx2x_link_test - query link status.
  *
@@ -312,12 +321,13 @@ void bnx2x_set_num_queues(struct bnx2x *bp);
  *
  * @bp:                        driver handle
  * @unload_mode:       COMMON, PORT, FUNCTION
+ * @keep_link:         true iff link should be kept up.
  *
  * - Cleanup MAC configuration.
  * - Closes clients.
  * - etc.
  */
-void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode);
+void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link);
 
 /**
  * bnx2x_acquire_hw_lock - acquire HW lock.
@@ -446,7 +456,7 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
 bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err);
 
 /* dev_close main block */
-int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
+int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link);
 
 /* dev_open main block */
 int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
index 8a73374e52a763ba16d8526a2d3f1aa8b80fb11e..2245c3895409d149c402fa70bdff529790046e28 100644 (file)
@@ -91,25 +91,21 @@ static void bnx2x_pfc_set(struct bnx2x *bp)
        /*
         * Rx COS configuration
         * Changing PFC RX configuration .
-        * In RX COS0 will always be configured to lossy and COS1 to lossless
+        * In RX COS0 will always be configured to lossless and COS1 to lossy
         */
        for (i = 0 ; i < MAX_PFC_PRIORITIES ; i++) {
                pri_bit = 1 << i;
 
-               if (pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp))
+               if (!(pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp)))
                        val |= 1 << (i * 4);
        }
 
        pfc_params.pkt_priority_to_cos = val;
 
        /* RX COS0 */
-       pfc_params.llfc_low_priority_classes = 0;
+       pfc_params.llfc_low_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp);
        /* RX COS1 */
-       pfc_params.llfc_high_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp);
-
-       /* BRB configuration */
-       pfc_params.cos0_pauseable = false;
-       pfc_params.cos1_pauseable = true;
+       pfc_params.llfc_high_priority_classes = 0;
 
        bnx2x_acquire_phy_lock(bp);
        bp->link_params.feature_config_flags |= FEATURE_CONFIG_PFC_ENABLED;
index ebf40cd7aa1050d716683e806eda505050bf1e40..c65295dded39aa5b1965a9dd07e00974863ca297 100644 (file)
@@ -905,6 +905,7 @@ static int bnx2x_nway_reset(struct net_device *dev)
 
        if (netif_running(dev)) {
                bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+               bnx2x_force_link_reset(bp);
                bnx2x_link_set(bp);
        }
 
@@ -1606,7 +1607,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
        return 0;
 }
 
-static char *bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF] = {
+static const char bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF][ETH_GSTRING_LEN] = {
        "register_test (offline)    ",
        "memory_test (offline)      ",
        "int_loopback_test (offline)",
@@ -1653,7 +1654,7 @@ static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
                return -EOPNOTSUPP;
        }
 
-       eee_cfg = SHMEM2_RD(bp, eee_status[BP_PORT(bp)]);
+       eee_cfg = bp->link_vars.eee_status;
 
        edata->supported =
                bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
@@ -1690,7 +1691,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
                return -EOPNOTSUPP;
        }
 
-       eee_cfg = SHMEM2_RD(bp, eee_status[BP_PORT(bp)]);
+       eee_cfg = bp->link_vars.eee_status;
 
        if (!(eee_cfg & SHMEM_EEE_SUPPORTED_MASK)) {
                DP(BNX2X_MSG_ETHTOOL, "Board does not support EEE!\n");
@@ -1739,6 +1740,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
        /* Restart link to propogate changes */
        if (netif_running(dev)) {
                bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+               bnx2x_force_link_reset(bp);
                bnx2x_link_set(bp);
        }
 
@@ -2038,8 +2040,6 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
        u16 pkt_prod, bd_prod;
        struct sw_tx_bd *tx_buf;
        struct eth_tx_start_bd *tx_start_bd;
-       struct eth_tx_parse_bd_e1x  *pbd_e1x = NULL;
-       struct eth_tx_parse_bd_e2  *pbd_e2 = NULL;
        dma_addr_t mapping;
        union eth_rx_cqe *cqe;
        u8 cqe_fp_flags, cqe_fp_type;
@@ -2130,22 +2130,33 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
        tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
        tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
        tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
-       SET_FLAG(tx_start_bd->general_data,
-                ETH_TX_START_BD_ETH_ADDR_TYPE,
-                UNICAST_ADDRESS);
        SET_FLAG(tx_start_bd->general_data,
                 ETH_TX_START_BD_HDR_NBDS,
                 1);
+       SET_FLAG(tx_start_bd->general_data,
+                ETH_TX_START_BD_PARSE_NBDS,
+                0);
 
        /* turn on parsing and get a BD */
        bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
 
-       pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
-       pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
-
-       memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
-       memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
-
+       if (CHIP_IS_E1x(bp)) {
+               u16 global_data = 0;
+               struct eth_tx_parse_bd_e1x  *pbd_e1x =
+                       &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
+               memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
+               SET_FLAG(global_data,
+                        ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, UNICAST_ADDRESS);
+               pbd_e1x->global_data = cpu_to_le16(global_data);
+       } else {
+               u32 parsing_data = 0;
+               struct eth_tx_parse_bd_e2  *pbd_e2 =
+                       &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
+               memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
+               SET_FLAG(parsing_data,
+                        ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, UNICAST_ADDRESS);
+               pbd_e2->parsing_data = cpu_to_le32(parsing_data);
+       }
        wmb();
 
        txdata->tx_db.data.prod += 2;
@@ -2263,7 +2274,7 @@ static int bnx2x_test_ext_loopback(struct bnx2x *bp)
        if (!netif_running(bp->dev))
                return BNX2X_EXT_LOOPBACK_FAILED;
 
-       bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+       bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
        rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT);
        if (rc) {
                DP(BNX2X_MSG_ETHTOOL,
@@ -2414,7 +2425,7 @@ static void bnx2x_self_test(struct net_device *dev,
 
                link_up = bp->link_vars.link_up;
 
-               bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+               bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
                rc = bnx2x_nic_load(bp, LOAD_DIAG);
                if (rc) {
                        etest->flags |= ETH_TEST_FL_FAILED;
@@ -2446,7 +2457,7 @@ static void bnx2x_self_test(struct net_device *dev,
                        etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
                }
 
-               bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+               bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
 
                /* restore input for TX port IF */
                REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
@@ -2534,7 +2545,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
 {
        struct bnx2x *bp = netdev_priv(dev);
-       int i, j, k, offset, start;
+       int i, j, k, start;
        char queue_name[MAX_QUEUE_NAME_LEN+1];
 
        switch (stringset) {
@@ -2570,13 +2581,8 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
                        start = 0;
                else
                        start = 4;
-               for (i = 0, j = start; j < (start + BNX2X_NUM_TESTS(bp));
-                    i++, j++) {
-                       offset = sprintf(buf+32*i, "%s",
-                                        bnx2x_tests_str_arr[j]);
-                       *(buf+offset) = '\0';
-               }
-               break;
+               memcpy(buf, bnx2x_tests_str_arr + start,
+                      ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp));
        }
 }
 
@@ -2940,7 +2946,7 @@ static int bnx2x_set_channels(struct net_device *dev,
                bnx2x_change_num_queues(bp, channels->combined_count);
                return 0;
        }
-       bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+       bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
        bnx2x_change_num_queues(bp, channels->combined_count);
        return bnx2x_nic_load(bp, LOAD_NORMAL);
 }
index bbc66ced9c25af262f4884de5549c20659f9bc6e..620fe939ecfd357ed1e852bd0350c3687886a8b5 100644 (file)
@@ -88,9 +88,6 @@
 #define TSTORM_ASSERT_LIST_INDEX_OFFSET        (IRO[102].base)
 #define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
        (IRO[101].base + ((assertListEntry) * IRO[101].m1))
-#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[107].base)
-#define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \
-       (IRO[108].base)
 #define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \
        (IRO[201].base + ((pfId) * IRO[201].m1))
 #define TSTORM_FUNC_EN_OFFSET(funcId) \
index 76b6e65790f8f5c54dab782a84d2c20c2393f381..18704929e6422ec15f0c59ec8c4abcee8802e532 100644 (file)
@@ -1286,6 +1286,9 @@ struct drv_func_mb {
        #define DRV_MSG_CODE_SET_MF_BW_MIN_MASK         0x00ff0000
        #define DRV_MSG_CODE_SET_MF_BW_MAX_MASK         0xff000000
 
+       #define DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET     0x00000002
+
+       #define DRV_MSG_CODE_LOAD_REQ_WITH_LFA          0x0000100a
        u32 fw_mb_header;
        #define FW_MSG_CODE_MASK                        0xffff0000
        #define FW_MSG_CODE_DRV_LOAD_COMMON             0x10100000
@@ -1909,6 +1912,54 @@ struct lldp_local_mib {
 };
 /***END OF DCBX STRUCTURES DECLARATIONS***/
 
+/***********************************************************/
+/*                         Elink section                   */
+/***********************************************************/
+#define SHMEM_LINK_CONFIG_SIZE 2
+struct shmem_lfa {
+       u32 req_duplex;
+       #define REQ_DUPLEX_PHY0_MASK        0x0000ffff
+       #define REQ_DUPLEX_PHY0_SHIFT       0
+       #define REQ_DUPLEX_PHY1_MASK        0xffff0000
+       #define REQ_DUPLEX_PHY1_SHIFT       16
+       u32 req_flow_ctrl;
+       #define REQ_FLOW_CTRL_PHY0_MASK     0x0000ffff
+       #define REQ_FLOW_CTRL_PHY0_SHIFT    0
+       #define REQ_FLOW_CTRL_PHY1_MASK     0xffff0000
+       #define REQ_FLOW_CTRL_PHY1_SHIFT    16
+       u32 req_line_speed; /* Also determine AutoNeg */
+       #define REQ_LINE_SPD_PHY0_MASK      0x0000ffff
+       #define REQ_LINE_SPD_PHY0_SHIFT     0
+       #define REQ_LINE_SPD_PHY1_MASK      0xffff0000
+       #define REQ_LINE_SPD_PHY1_SHIFT     16
+       u32 speed_cap_mask[SHMEM_LINK_CONFIG_SIZE];
+       u32 additional_config;
+       #define REQ_FC_AUTO_ADV_MASK        0x0000ffff
+       #define REQ_FC_AUTO_ADV0_SHIFT      0
+       #define NO_LFA_DUE_TO_DCC_MASK      0x00010000
+       u32 lfa_sts;
+       #define LFA_LINK_FLAP_REASON_OFFSET             0
+       #define LFA_LINK_FLAP_REASON_MASK               0x000000ff
+               #define LFA_LINK_DOWN                       0x1
+               #define LFA_LOOPBACK_ENABLED            0x2
+               #define LFA_DUPLEX_MISMATCH                 0x3
+               #define LFA_MFW_IS_TOO_OLD                  0x4
+               #define LFA_LINK_SPEED_MISMATCH         0x5
+               #define LFA_FLOW_CTRL_MISMATCH          0x6
+               #define LFA_SPEED_CAP_MISMATCH          0x7
+               #define LFA_DCC_LFA_DISABLED            0x8
+               #define LFA_EEE_MISMATCH                0x9
+
+       #define LINK_FLAP_AVOIDANCE_COUNT_OFFSET        8
+       #define LINK_FLAP_AVOIDANCE_COUNT_MASK          0x0000ff00
+
+       #define LINK_FLAP_COUNT_OFFSET                  16
+       #define LINK_FLAP_COUNT_MASK                    0x00ff0000
+
+       #define LFA_FLAGS_MASK                          0xff000000
+       #define SHMEM_LFA_DONT_CLEAR_STAT               (1<<24)
+};
+
 struct ncsi_oem_fcoe_features {
        u32 fcoe_features1;
        #define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK          0x0000FFFF
@@ -2738,8 +2789,8 @@ struct afex_stats {
 };
 
 #define BCM_5710_FW_MAJOR_VERSION                      7
-#define BCM_5710_FW_MINOR_VERSION                      2
-#define BCM_5710_FW_REVISION_VERSION                   51
+#define BCM_5710_FW_MINOR_VERSION                      8
+#define BCM_5710_FW_REVISION_VERSION           2
 #define BCM_5710_FW_ENGINEERING_VERSION                        0
 #define BCM_5710_FW_COMPILE_FLAGS                      1
 
@@ -3861,10 +3912,8 @@ struct eth_rss_update_ramrod_data {
 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4
 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5)
 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5
-#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<6)
-#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 6
-#define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0 (0x1<<7)
-#define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0_SHIFT 7
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<7)
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 7
        u8 rss_result_mask;
        u8 rss_mode;
        __le32 __reserved2;
@@ -4080,27 +4129,29 @@ struct eth_tx_start_bd {
 #define ETH_TX_START_BD_HDR_NBDS_SHIFT 0
 #define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4)
 #define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4
-#define ETH_TX_START_BD_RESREVED (0x1<<5)
-#define ETH_TX_START_BD_RESREVED_SHIFT 5
-#define ETH_TX_START_BD_ETH_ADDR_TYPE (0x3<<6)
-#define ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT 6
+#define ETH_TX_START_BD_PARSE_NBDS (0x3<<5)
+#define ETH_TX_START_BD_PARSE_NBDS_SHIFT 5
+#define ETH_TX_START_BD_RESREVED (0x1<<7)
+#define ETH_TX_START_BD_RESREVED_SHIFT 7
 };
 
 /*
  * Tx parsing BD structure for ETH E1/E1h
  */
 struct eth_tx_parse_bd_e1x {
-       u8 global_data;
+       __le16 global_data;
 #define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0)
 #define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0
-#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x1<<4)
-#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 4
-#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<5)
-#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 5
-#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<6)
-#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 6
-#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<7)
-#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 7
+#define ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE (0x3<<4)
+#define ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT 4
+#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<6)
+#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 6
+#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<7)
+#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 7
+#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<8)
+#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 8
+#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x7F<<9)
+#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 9
        u8 tcp_flags;
 #define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0)
 #define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0
@@ -4119,7 +4170,6 @@ struct eth_tx_parse_bd_e1x {
 #define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7)
 #define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7
        u8 ip_hlen_w;
-       s8 reserved;
        __le16 total_hlen_w;
        __le16 tcp_pseudo_csum;
        __le16 lso_mss;
@@ -4138,14 +4188,16 @@ struct eth_tx_parse_bd_e2 {
        __le16 src_mac_addr_mid;
        __le16 src_mac_addr_hi;
        __le32 parsing_data;
-#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x1FFF<<0)
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x7FF<<0)
 #define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0
-#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<13)
-#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 13
-#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<17)
-#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 17
-#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<31)
-#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 31
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<11)
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 11
+#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<15)
+#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 15
+#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<16)
+#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 16
+#define ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE (0x3<<30)
+#define ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT 30
 };
 
 /*
@@ -4913,7 +4965,8 @@ struct flow_control_configuration {
  *
  */
 struct function_start_data {
-       __le16 function_mode;
+       u8 function_mode;
+       u8 reserved;
        __le16 sd_vlan_tag;
        __le16 vif_id;
        u8 path_id;
index 559c396d45cce465ae77999fa0d22508b6d60624..c8f10f0e8a0dea6db58e412989f2d35eddc6528d 100644 (file)
@@ -566,7 +566,7 @@ static const struct {
                u32 e2;         /* 57712 */
                u32 e3;         /* 578xx */
        } reg_mask;             /* Register mask (all valid bits) */
-       char name[7];           /* Block's longest name is 6 characters long
+       char name[8];           /* Block's longest name is 7 characters long
                                 * (name + suffix)
                                 */
 } bnx2x_blocks_parity_data[] = {
index b046beb435b2c490f70ef3bc2f67bf3af16bfcb5..e2e45ee5df33fcc75c491c78643dfe406297aabb 100644 (file)
 #define EDC_MODE_LIMITING                              0x0044
 #define EDC_MODE_PASSIVE_DAC                   0x0055
 
-/* BRB default for class 0 E2 */
-#define DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR     170
-#define DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR              250
-#define DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR              10
-#define DEFAULT0_E2_BRB_MAC_FULL_XON_THR               50
-
-/* BRB thresholds for E2*/
-#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE            170
-#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE                0
-
-#define PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE             250
-#define PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE         0
-
-#define PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE             10
-#define PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE         90
-
-#define PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE                      50
-#define PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE          250
-
-/* BRB default for class 0 E3A0 */
-#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR   290
-#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR    410
-#define DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR    10
-#define DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR     50
-
-/* BRB thresholds for E3A0 */
-#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE          290
-#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE              0
-
-#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE           410
-#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE               0
-
-#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE           10
-#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE               170
-
-#define PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE            50
-#define PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE                410
-
-/* BRB default for E3B0 */
-#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR   330
-#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR    490
-#define DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR    15
-#define DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR     55
-
-/* BRB thresholds for E3B0 2 port mode*/
-#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE               1025
-#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE   0
-
-#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE                1025
-#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE    0
-
-#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE                10
-#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE    1025
-
-#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE         50
-#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE     1025
-
-/* only for E3B0*/
-#define PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR                       1025
-#define PFC_E3B0_2P_BRB_FULL_LB_XON_THR                        1025
-
-/* Lossy +Lossless GUARANTIED == GUART */
-#define PFC_E3B0_2P_MIX_PAUSE_LB_GUART                 284
-/* Lossless +Lossless*/
-#define PFC_E3B0_2P_PAUSE_LB_GUART                     236
-/* Lossy +Lossy*/
-#define PFC_E3B0_2P_NON_PAUSE_LB_GUART                 342
-
-/* Lossy +Lossless*/
-#define PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART              284
-/* Lossless +Lossless*/
-#define PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART          236
-/* Lossy +Lossy*/
-#define PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART              336
-#define PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST               80
-
-#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART            0
-#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST               0
-
-/* BRB thresholds for E3B0 4 port mode */
-#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE               304
-#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE   0
-
-#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE                384
-#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE    0
-
-#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE                10
-#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE    304
-
-#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE         50
-#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE     384
-
-/* only for E3B0*/
-#define PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR                       304
-#define PFC_E3B0_4P_BRB_FULL_LB_XON_THR                        384
-#define PFC_E3B0_4P_LB_GUART           120
-
-#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART            120
-#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST       80
-
-#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART            80
-#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST       120
-
-/* Pause defines*/
-#define DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR                      330
-#define DEFAULT_E3B0_BRB_FULL_LB_XON_THR                       490
-#define DEFAULT_E3B0_LB_GUART          40
-
-#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART           40
-#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST      0
-
-#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART           40
-#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST      0
-
 /* ETS defines*/
 #define DCBX_INVALID_COS                                       (0xFF)
 
@@ -321,6 +207,127 @@ static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
        return val;
 }
 
+/*
+ * bnx2x_check_lfa - This function checks if link reinitialization is required,
+ *                   or link flap can be avoided.
+ *
+ * @params:    link parameters
+ * Returns 0 if Link Flap Avoidance conditions are met otherwise, the failed
+ *         condition code.
+ */
+static int bnx2x_check_lfa(struct link_params *params)
+{
+       u32 link_status, cfg_idx, lfa_mask, cfg_size;
+       u32 cur_speed_cap_mask, cur_req_fc_auto_adv, additional_config;
+       u32 saved_val, req_val, eee_status;
+       struct bnx2x *bp = params->bp;
+
+       additional_config =
+               REG_RD(bp, params->lfa_base +
+                          offsetof(struct shmem_lfa, additional_config));
+
+       /* NOTE: must be first condition checked -
+       * to verify DCC bit is cleared in any case!
+       */
+       if (additional_config & NO_LFA_DUE_TO_DCC_MASK) {
+               DP(NETIF_MSG_LINK, "No LFA due to DCC flap after clp exit\n");
+               REG_WR(bp, params->lfa_base +
+                          offsetof(struct shmem_lfa, additional_config),
+                      additional_config & ~NO_LFA_DUE_TO_DCC_MASK);
+               return LFA_DCC_LFA_DISABLED;
+       }
+
+       /* Verify that link is up */
+       link_status = REG_RD(bp, params->shmem_base +
+                            offsetof(struct shmem_region,
+                                     port_mb[params->port].link_status));
+       if (!(link_status & LINK_STATUS_LINK_UP))
+               return LFA_LINK_DOWN;
+
+       /* Verify that loopback mode is not set */
+       if (params->loopback_mode)
+               return LFA_LOOPBACK_ENABLED;
+
+       /* Verify that MFW supports LFA */
+       if (!params->lfa_base)
+               return LFA_MFW_IS_TOO_OLD;
+
+       if (params->num_phys == 3) {
+               cfg_size = 2;
+               lfa_mask = 0xffffffff;
+       } else {
+               cfg_size = 1;
+               lfa_mask = 0xffff;
+       }
+
+       /* Compare Duplex */
+       saved_val = REG_RD(bp, params->lfa_base +
+                          offsetof(struct shmem_lfa, req_duplex));
+       req_val = params->req_duplex[0] | (params->req_duplex[1] << 16);
+       if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
+               DP(NETIF_MSG_LINK, "Duplex mismatch %x vs. %x\n",
+                              (saved_val & lfa_mask), (req_val & lfa_mask));
+               return LFA_DUPLEX_MISMATCH;
+       }
+       /* Compare Flow Control */
+       saved_val = REG_RD(bp, params->lfa_base +
+                          offsetof(struct shmem_lfa, req_flow_ctrl));
+       req_val = params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16);
+       if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
+               DP(NETIF_MSG_LINK, "Flow control mismatch %x vs. %x\n",
+                              (saved_val & lfa_mask), (req_val & lfa_mask));
+               return LFA_FLOW_CTRL_MISMATCH;
+       }
+       /* Compare Link Speed */
+       saved_val = REG_RD(bp, params->lfa_base +
+                          offsetof(struct shmem_lfa, req_line_speed));
+       req_val = params->req_line_speed[0] | (params->req_line_speed[1] << 16);
+       if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
+               DP(NETIF_MSG_LINK, "Link speed mismatch %x vs. %x\n",
+                              (saved_val & lfa_mask), (req_val & lfa_mask));
+               return LFA_LINK_SPEED_MISMATCH;
+       }
+
+       for (cfg_idx = 0; cfg_idx < cfg_size; cfg_idx++) {
+               cur_speed_cap_mask = REG_RD(bp, params->lfa_base +
+                                           offsetof(struct shmem_lfa,
+                                                    speed_cap_mask[cfg_idx]));
+
+               if (cur_speed_cap_mask != params->speed_cap_mask[cfg_idx]) {
+                       DP(NETIF_MSG_LINK, "Speed Cap mismatch %x vs. %x\n",
+                                      cur_speed_cap_mask,
+                                      params->speed_cap_mask[cfg_idx]);
+                       return LFA_SPEED_CAP_MISMATCH;
+               }
+       }
+
+       cur_req_fc_auto_adv =
+               REG_RD(bp, params->lfa_base +
+                      offsetof(struct shmem_lfa, additional_config)) &
+               REQ_FC_AUTO_ADV_MASK;
+
+       if ((u16)cur_req_fc_auto_adv != params->req_fc_auto_adv) {
+               DP(NETIF_MSG_LINK, "Flow Ctrl AN mismatch %x vs. %x\n",
+                              cur_req_fc_auto_adv, params->req_fc_auto_adv);
+               return LFA_FLOW_CTRL_MISMATCH;
+       }
+
+       eee_status = REG_RD(bp, params->shmem2_base +
+                           offsetof(struct shmem2_region,
+                                    eee_status[params->port]));
+
+       if (((eee_status & SHMEM_EEE_LPI_REQUESTED_BIT) ^
+            (params->eee_mode & EEE_MODE_ENABLE_LPI)) ||
+           ((eee_status & SHMEM_EEE_REQUESTED_BIT) ^
+            (params->eee_mode & EEE_MODE_ADV_LPI))) {
+               DP(NETIF_MSG_LINK, "EEE mismatch %x vs. %x\n", params->eee_mode,
+                              eee_status);
+               return LFA_EEE_MISMATCH;
+       }
+
+       /* LFA conditions are met */
+       return 0;
+}
 /******************************************************************/
 /*                     EPIO/GPIO section                         */
 /******************************************************************/
@@ -1306,93 +1313,6 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
        return 0;
 }
 
-/******************************************************************/
-/*                     EEE section                                */
-/******************************************************************/
-static u8 bnx2x_eee_has_cap(struct link_params *params)
-{
-       struct bnx2x *bp = params->bp;
-
-       if (REG_RD(bp, params->shmem2_base) <=
-                  offsetof(struct shmem2_region, eee_status[params->port]))
-               return 0;
-
-       return 1;
-}
-
-static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer)
-{
-       switch (nvram_mode) {
-       case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED:
-               *idle_timer = EEE_MODE_NVRAM_BALANCED_TIME;
-               break;
-       case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE:
-               *idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME;
-               break;
-       case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY:
-               *idle_timer = EEE_MODE_NVRAM_LATENCY_TIME;
-               break;
-       default:
-               *idle_timer = 0;
-               break;
-       }
-
-       return 0;
-}
-
-static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode)
-{
-       switch (idle_timer) {
-       case EEE_MODE_NVRAM_BALANCED_TIME:
-               *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED;
-               break;
-       case EEE_MODE_NVRAM_AGGRESSIVE_TIME:
-               *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE;
-               break;
-       case EEE_MODE_NVRAM_LATENCY_TIME:
-               *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY;
-               break;
-       default:
-               *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED;
-               break;
-       }
-
-       return 0;
-}
-
-static u32 bnx2x_eee_calc_timer(struct link_params *params)
-{
-       u32 eee_mode, eee_idle;
-       struct bnx2x *bp = params->bp;
-
-       if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) {
-               if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
-                       /* time value in eee_mode --> used directly*/
-                       eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK;
-               } else {
-                       /* hsi value in eee_mode --> time */
-                       if (bnx2x_eee_nvram_to_time(params->eee_mode &
-                                                   EEE_MODE_NVRAM_MASK,
-                                                   &eee_idle))
-                               return 0;
-               }
-       } else {
-               /* hsi values in nvram --> time*/
-               eee_mode = ((REG_RD(bp, params->shmem_base +
-                                   offsetof(struct shmem_region, dev_info.
-                                   port_feature_config[params->port].
-                                   eee_power_mode)) &
-                            PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
-                           PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
-
-               if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle))
-                       return 0;
-       }
-
-       return eee_idle;
-}
-
-
 /******************************************************************/
 /*                     PFC section                               */
 /******************************************************************/
@@ -1606,16 +1526,23 @@ static void bnx2x_set_xumac_nig(struct link_params *params,
               NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en);
 }
 
-static void bnx2x_umac_disable(struct link_params *params)
+static void bnx2x_set_umac_rxtx(struct link_params *params, u8 en)
 {
        u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+       u32 val;
        struct bnx2x *bp = params->bp;
        if (!(REG_RD(bp, MISC_REG_RESET_REG_2) &
                   (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)))
                return;
-
+       val = REG_RD(bp, umac_base + UMAC_REG_COMMAND_CONFIG);
+       if (en)
+               val |= (UMAC_COMMAND_CONFIG_REG_TX_ENA |
+                       UMAC_COMMAND_CONFIG_REG_RX_ENA);
+       else
+               val &= ~(UMAC_COMMAND_CONFIG_REG_TX_ENA |
+                        UMAC_COMMAND_CONFIG_REG_RX_ENA);
        /* Disable RX and TX */
-       REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, 0);
+       REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
 }
 
 static void bnx2x_umac_enable(struct link_params *params,
@@ -1671,6 +1598,16 @@ static void bnx2x_umac_enable(struct link_params *params,
        REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
        udelay(50);
 
+       /* Configure UMAC for EEE */
+       if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) {
+               DP(NETIF_MSG_LINK, "configured UMAC for EEE\n");
+               REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL,
+                      UMAC_UMAC_EEE_CTRL_REG_EEE_EN);
+               REG_WR(bp, umac_base + UMAC_REG_EEE_WAKE_TIMER, 0x11);
+       } else {
+               REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL, 0x0);
+       }
+
        /* Set MAC address for source TX Pause/PFC frames (under SW reset) */
        REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR0,
               ((params->mac_addr[2] << 24) |
@@ -1766,11 +1703,12 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
 
 }
 
-static void bnx2x_xmac_disable(struct link_params *params)
+static void bnx2x_set_xmac_rxtx(struct link_params *params, u8 en)
 {
        u8 port = params->port;
        struct bnx2x *bp = params->bp;
        u32 pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+       u32 val;
 
        if (REG_RD(bp, MISC_REG_RESET_REG_2) &
            MISC_REGISTERS_RESET_REG_2_XMAC) {
@@ -1784,7 +1722,12 @@ static void bnx2x_xmac_disable(struct link_params *params)
                REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
                       (pfc_ctrl | (1<<1)));
                DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port);
-               REG_WR(bp, xmac_base + XMAC_REG_CTRL, 0);
+               val = REG_RD(bp, xmac_base + XMAC_REG_CTRL);
+               if (en)
+                       val |= (XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN);
+               else
+                       val &= ~(XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN);
+               REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
        }
 }
 
@@ -2087,391 +2030,6 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
        REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
 }
 
-/* PFC BRB internal port configuration params */
-struct bnx2x_pfc_brb_threshold_val {
-       u32 pause_xoff;
-       u32 pause_xon;
-       u32 full_xoff;
-       u32 full_xon;
-};
-
-struct bnx2x_pfc_brb_e3b0_val {
-       u32 per_class_guaranty_mode;
-       u32 lb_guarantied_hyst;
-       u32 full_lb_xoff_th;
-       u32 full_lb_xon_threshold;
-       u32 lb_guarantied;
-       u32 mac_0_class_t_guarantied;
-       u32 mac_0_class_t_guarantied_hyst;
-       u32 mac_1_class_t_guarantied;
-       u32 mac_1_class_t_guarantied_hyst;
-};
-
-struct bnx2x_pfc_brb_th_val {
-       struct bnx2x_pfc_brb_threshold_val pauseable_th;
-       struct bnx2x_pfc_brb_threshold_val non_pauseable_th;
-       struct bnx2x_pfc_brb_threshold_val default_class0;
-       struct bnx2x_pfc_brb_threshold_val default_class1;
-
-};
-static int bnx2x_pfc_brb_get_config_params(
-                               struct link_params *params,
-                               struct bnx2x_pfc_brb_th_val *config_val)
-{
-       struct bnx2x *bp = params->bp;
-       DP(NETIF_MSG_LINK, "Setting PFC BRB configuration\n");
-
-       config_val->default_class1.pause_xoff = 0;
-       config_val->default_class1.pause_xon = 0;
-       config_val->default_class1.full_xoff = 0;
-       config_val->default_class1.full_xon = 0;
-
-       if (CHIP_IS_E2(bp)) {
-               /* Class0 defaults */
-               config_val->default_class0.pause_xoff =
-                       DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR;
-               config_val->default_class0.pause_xon =
-                       DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR;
-               config_val->default_class0.full_xoff =
-                       DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR;
-               config_val->default_class0.full_xon =
-                       DEFAULT0_E2_BRB_MAC_FULL_XON_THR;
-               /* Pause able*/
-               config_val->pauseable_th.pause_xoff =
-                       PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
-               config_val->pauseable_th.pause_xon =
-                       PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE;
-               config_val->pauseable_th.full_xoff =
-                       PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
-               config_val->pauseable_th.full_xon =
-                       PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
-               /* Non pause able*/
-               config_val->non_pauseable_th.pause_xoff =
-                       PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
-               config_val->non_pauseable_th.pause_xon =
-                       PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
-               config_val->non_pauseable_th.full_xoff =
-                       PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
-               config_val->non_pauseable_th.full_xon =
-                       PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
-       } else if (CHIP_IS_E3A0(bp)) {
-               /* Class0 defaults */
-               config_val->default_class0.pause_xoff =
-                       DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR;
-               config_val->default_class0.pause_xon =
-                       DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR;
-               config_val->default_class0.full_xoff =
-                       DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR;
-               config_val->default_class0.full_xon =
-                       DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR;
-               /* Pause able */
-               config_val->pauseable_th.pause_xoff =
-                       PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
-               config_val->pauseable_th.pause_xon =
-                       PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE;
-               config_val->pauseable_th.full_xoff =
-                       PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
-               config_val->pauseable_th.full_xon =
-                       PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
-               /* Non pause able*/
-               config_val->non_pauseable_th.pause_xoff =
-                       PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
-               config_val->non_pauseable_th.pause_xon =
-                       PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
-               config_val->non_pauseable_th.full_xoff =
-                       PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
-               config_val->non_pauseable_th.full_xon =
-                       PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
-       } else if (CHIP_IS_E3B0(bp)) {
-               /* Class0 defaults */
-               config_val->default_class0.pause_xoff =
-                       DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR;
-               config_val->default_class0.pause_xon =
-                   DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR;
-               config_val->default_class0.full_xoff =
-                   DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR;
-               config_val->default_class0.full_xon =
-                   DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR;
-
-               if (params->phy[INT_PHY].flags &
-                   FLAGS_4_PORT_MODE) {
-                       config_val->pauseable_th.pause_xoff =
-                               PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
-                       config_val->pauseable_th.pause_xon =
-                               PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE;
-                       config_val->pauseable_th.full_xoff =
-                               PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
-                       config_val->pauseable_th.full_xon =
-                               PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
-                       /* Non pause able*/
-                       config_val->non_pauseable_th.pause_xoff =
-                       PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
-                       config_val->non_pauseable_th.pause_xon =
-                       PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
-                       config_val->non_pauseable_th.full_xoff =
-                       PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
-                       config_val->non_pauseable_th.full_xon =
-                       PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
-               } else {
-                       config_val->pauseable_th.pause_xoff =
-                               PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
-                       config_val->pauseable_th.pause_xon =
-                               PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE;
-                       config_val->pauseable_th.full_xoff =
-                               PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
-                       config_val->pauseable_th.full_xon =
-                               PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
-                       /* Non pause able*/
-                       config_val->non_pauseable_th.pause_xoff =
-                               PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
-                       config_val->non_pauseable_th.pause_xon =
-                               PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
-                       config_val->non_pauseable_th.full_xoff =
-                               PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
-                       config_val->non_pauseable_th.full_xon =
-                               PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
-               }
-       } else
-           return -EINVAL;
-
-       return 0;
-}
-
-static void bnx2x_pfc_brb_get_e3b0_config_params(
-               struct link_params *params,
-               struct bnx2x_pfc_brb_e3b0_val
-               *e3b0_val,
-               struct bnx2x_nig_brb_pfc_port_params *pfc_params,
-               const u8 pfc_enabled)
-{
-       if (pfc_enabled && pfc_params) {
-               e3b0_val->per_class_guaranty_mode = 1;
-               e3b0_val->lb_guarantied_hyst = 80;
-
-               if (params->phy[INT_PHY].flags &
-                   FLAGS_4_PORT_MODE) {
-                       e3b0_val->full_lb_xoff_th =
-                               PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR;
-                       e3b0_val->full_lb_xon_threshold =
-                               PFC_E3B0_4P_BRB_FULL_LB_XON_THR;
-                       e3b0_val->lb_guarantied =
-                               PFC_E3B0_4P_LB_GUART;
-                       e3b0_val->mac_0_class_t_guarantied =
-                               PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART;
-                       e3b0_val->mac_0_class_t_guarantied_hyst =
-                               PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST;
-                       e3b0_val->mac_1_class_t_guarantied =
-                               PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART;
-                       e3b0_val->mac_1_class_t_guarantied_hyst =
-                               PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST;
-               } else {
-                       e3b0_val->full_lb_xoff_th =
-                               PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR;
-                       e3b0_val->full_lb_xon_threshold =
-                               PFC_E3B0_2P_BRB_FULL_LB_XON_THR;
-                       e3b0_val->mac_0_class_t_guarantied_hyst =
-                               PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST;
-                       e3b0_val->mac_1_class_t_guarantied =
-                               PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART;
-                       e3b0_val->mac_1_class_t_guarantied_hyst =
-                               PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST;
-
-                       if (pfc_params->cos0_pauseable !=
-                               pfc_params->cos1_pauseable) {
-                               /* Nonpauseable= Lossy + pauseable = Lossless*/
-                               e3b0_val->lb_guarantied =
-                                       PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
-                               e3b0_val->mac_0_class_t_guarantied =
-                              PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART;
-                       } else if (pfc_params->cos0_pauseable) {
-                               /* Lossless +Lossless*/
-                               e3b0_val->lb_guarantied =
-                                       PFC_E3B0_2P_PAUSE_LB_GUART;
-                               e3b0_val->mac_0_class_t_guarantied =
-                                  PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART;
-                       } else {
-                               /* Lossy +Lossy*/
-                               e3b0_val->lb_guarantied =
-                                       PFC_E3B0_2P_NON_PAUSE_LB_GUART;
-                               e3b0_val->mac_0_class_t_guarantied =
-                              PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART;
-                       }
-               }
-       } else {
-               e3b0_val->per_class_guaranty_mode = 0;
-               e3b0_val->lb_guarantied_hyst = 0;
-               e3b0_val->full_lb_xoff_th =
-                       DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR;
-               e3b0_val->full_lb_xon_threshold =
-                       DEFAULT_E3B0_BRB_FULL_LB_XON_THR;
-               e3b0_val->lb_guarantied =
-                       DEFAULT_E3B0_LB_GUART;
-               e3b0_val->mac_0_class_t_guarantied =
-                       DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART;
-               e3b0_val->mac_0_class_t_guarantied_hyst =
-                       DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST;
-               e3b0_val->mac_1_class_t_guarantied =
-                       DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART;
-               e3b0_val->mac_1_class_t_guarantied_hyst =
-                       DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST;
-       }
-}
-static int bnx2x_update_pfc_brb(struct link_params *params,
-                               struct link_vars *vars,
-                               struct bnx2x_nig_brb_pfc_port_params
-                               *pfc_params)
-{
-       struct bnx2x *bp = params->bp;
-       struct bnx2x_pfc_brb_th_val config_val = { {0} };
-       struct bnx2x_pfc_brb_threshold_val *reg_th_config =
-               &config_val.pauseable_th;
-       struct bnx2x_pfc_brb_e3b0_val e3b0_val = {0};
-       const int set_pfc = params->feature_config_flags &
-               FEATURE_CONFIG_PFC_ENABLED;
-       const u8 pfc_enabled = (set_pfc && pfc_params);
-       int bnx2x_status = 0;
-       u8 port = params->port;
-
-       /* default - pause configuration */
-       reg_th_config = &config_val.pauseable_th;
-       bnx2x_status = bnx2x_pfc_brb_get_config_params(params, &config_val);
-       if (bnx2x_status)
-               return bnx2x_status;
-
-       if (pfc_enabled) {
-               /* First COS */
-               if (pfc_params->cos0_pauseable)
-                       reg_th_config = &config_val.pauseable_th;
-               else
-                       reg_th_config = &config_val.non_pauseable_th;
-       } else
-               reg_th_config = &config_val.default_class0;
-       /* The number of free blocks below which the pause signal to class 0
-        * of MAC #n is asserted. n=0,1
-        */
-       REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 :
-              BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 ,
-              reg_th_config->pause_xoff);
-       /* The number of free blocks above which the pause signal to class 0
-        * of MAC #n is de-asserted. n=0,1
-        */
-       REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XON_THRESHOLD_1 :
-              BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , reg_th_config->pause_xon);
-       /* The number of free blocks below which the full signal to class 0
-        * of MAC #n is asserted. n=0,1
-        */
-       REG_WR(bp, (port) ? BRB1_REG_FULL_0_XOFF_THRESHOLD_1 :
-              BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , reg_th_config->full_xoff);
-       /* The number of free blocks above which the full signal to class 0
-        * of MAC #n is de-asserted. n=0,1
-        */
-       REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 :
-              BRB1_REG_FULL_0_XON_THRESHOLD_0 , reg_th_config->full_xon);
-
-       if (pfc_enabled) {
-               /* Second COS */
-               if (pfc_params->cos1_pauseable)
-                       reg_th_config = &config_val.pauseable_th;
-               else
-                       reg_th_config = &config_val.non_pauseable_th;
-       } else
-               reg_th_config = &config_val.default_class1;
-       /* The number of free blocks below which the pause signal to
-        * class 1 of MAC #n is asserted. n=0,1
-        */
-       REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
-              BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
-              reg_th_config->pause_xoff);
-
-       /* The number of free blocks above which the pause signal to
-        * class 1 of MAC #n is de-asserted. n=0,1
-        */
-       REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
-              BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
-              reg_th_config->pause_xon);
-       /* The number of free blocks below which the full signal to
-        * class 1 of MAC #n is asserted. n=0,1
-        */
-       REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
-              BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
-              reg_th_config->full_xoff);
-       /* The number of free blocks above which the full signal to
-        * class 1 of MAC #n is de-asserted. n=0,1
-        */
-       REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
-              BRB1_REG_FULL_1_XON_THRESHOLD_0,
-              reg_th_config->full_xon);
-
-       if (CHIP_IS_E3B0(bp)) {
-               bnx2x_pfc_brb_get_e3b0_config_params(
-                       params,
-                       &e3b0_val,
-                       pfc_params,
-                       pfc_enabled);
-
-               REG_WR(bp, BRB1_REG_PER_CLASS_GUARANTY_MODE,
-                          e3b0_val.per_class_guaranty_mode);
-
-               /* The hysteresis on the guarantied buffer space for the Lb
-                * port before signaling XON.
-                */
-               REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST,
-                          e3b0_val.lb_guarantied_hyst);
-
-               /* The number of free blocks below which the full signal to the
-                * LB port is asserted.
-                */
-               REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
-                      e3b0_val.full_lb_xoff_th);
-               /* The number of free blocks above which the full signal to the
-                * LB port is de-asserted.
-                */
-               REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
-                      e3b0_val.full_lb_xon_threshold);
-               /* The number of blocks guarantied for the MAC #n port. n=0,1
-                */
-
-               /* The number of blocks guarantied for the LB port. */
-               REG_WR(bp, BRB1_REG_LB_GUARANTIED,
-                      e3b0_val.lb_guarantied);
-
-               /* The number of blocks guarantied for the MAC #n port. */
-               REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
-                      2 * e3b0_val.mac_0_class_t_guarantied);
-               REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
-                      2 * e3b0_val.mac_1_class_t_guarantied);
-               /* The number of blocks guarantied for class #t in MAC0. t=0,1
-                */
-               REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
-                      e3b0_val.mac_0_class_t_guarantied);
-               REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
-                      e3b0_val.mac_0_class_t_guarantied);
-               /* The hysteresis on the guarantied buffer space for class in
-                * MAC0.  t=0,1
-                */
-               REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
-                      e3b0_val.mac_0_class_t_guarantied_hyst);
-               REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
-                      e3b0_val.mac_0_class_t_guarantied_hyst);
-
-               /* The number of blocks guarantied for class #t in MAC1.t=0,1
-                */
-               REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
-                      e3b0_val.mac_1_class_t_guarantied);
-               REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
-                      e3b0_val.mac_1_class_t_guarantied);
-               /* The hysteresis on the guarantied buffer space for class #t
-                * in MAC1.  t=0,1
-                */
-               REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
-                      e3b0_val.mac_1_class_t_guarantied_hyst);
-               REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST,
-                      e3b0_val.mac_1_class_t_guarantied_hyst);
-       }
-
-       return bnx2x_status;
-}
-
 /******************************************************************************
 * Description:
 *  This function is needed because NIG ARB_CREDIT_WEIGHT_X are
@@ -2529,16 +2087,6 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status)
                        port_mb[params->port].link_status), link_status);
 }
 
-static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status)
-{
-       struct bnx2x *bp = params->bp;
-
-       if (bnx2x_eee_has_cap(params))
-               REG_WR(bp, params->shmem2_base +
-                      offsetof(struct shmem2_region,
-                               eee_status[params->port]), eee_status);
-}
-
 static void bnx2x_update_pfc_nig(struct link_params *params,
                struct link_vars *vars,
                struct bnx2x_nig_brb_pfc_port_params *nig_params)
@@ -2658,11 +2206,6 @@ int bnx2x_update_pfc(struct link_params *params,
        /* Update NIG params */
        bnx2x_update_pfc_nig(params, vars, pfc_params);
 
-       /* Update BRB params */
-       bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params);
-       if (bnx2x_status)
-               return bnx2x_status;
-
        if (!vars->link_up)
                return bnx2x_status;
 
@@ -2827,16 +2370,18 @@ static int bnx2x_bmac2_enable(struct link_params *params,
 
 static int bnx2x_bmac_enable(struct link_params *params,
                             struct link_vars *vars,
-                            u8 is_lb)
+                            u8 is_lb, u8 reset_bmac)
 {
        int rc = 0;
        u8 port = params->port;
        struct bnx2x *bp = params->bp;
        u32 val;
        /* Reset and unreset the BigMac */
-       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
-              (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
-       usleep_range(1000, 2000);
+       if (reset_bmac) {
+               REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+                      (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+               usleep_range(1000, 2000);
+       }
 
        REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
               (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
@@ -2868,37 +2413,28 @@ static int bnx2x_bmac_enable(struct link_params *params,
        return rc;
 }
 
-static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
+static void bnx2x_set_bmac_rx(struct bnx2x *bp, u32 chip_id, u8 port, u8 en)
 {
        u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
                        NIG_REG_INGRESS_BMAC0_MEM;
        u32 wb_data[2];
        u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
 
+       if (CHIP_IS_E2(bp))
+               bmac_addr += BIGMAC2_REGISTER_BMAC_CONTROL;
+       else
+               bmac_addr += BIGMAC_REGISTER_BMAC_CONTROL;
        /* Only if the bmac is out of reset */
        if (REG_RD(bp, MISC_REG_RESET_REG_2) &
                        (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) &&
            nig_bmac_enable) {
-
-               if (CHIP_IS_E2(bp)) {
-                       /* Clear Rx Enable bit in BMAC_CONTROL register */
-                       REG_RD_DMAE(bp, bmac_addr +
-                                   BIGMAC2_REGISTER_BMAC_CONTROL,
-                                   wb_data, 2);
-                       wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
-                       REG_WR_DMAE(bp, bmac_addr +
-                                   BIGMAC2_REGISTER_BMAC_CONTROL,
-                                   wb_data, 2);
-               } else {
-                       /* Clear Rx Enable bit in BMAC_CONTROL register */
-                       REG_RD_DMAE(bp, bmac_addr +
-                                       BIGMAC_REGISTER_BMAC_CONTROL,
-                                       wb_data, 2);
+               /* Clear Rx Enable bit in BMAC_CONTROL register */
+               REG_RD_DMAE(bp, bmac_addr, wb_data, 2);
+               if (en)
+                       wb_data[0] |= BMAC_CONTROL_RX_ENABLE;
+               else
                        wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
-                       REG_WR_DMAE(bp, bmac_addr +
-                                       BIGMAC_REGISTER_BMAC_CONTROL,
-                                       wb_data, 2);
-               }
+               REG_WR_DMAE(bp, bmac_addr, wb_data, 2);
                usleep_range(1000, 2000);
        }
 }
@@ -3233,6 +2769,245 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
                               EMAC_MDIO_STATUS_10MB);
        return rc;
 }
+
+/******************************************************************/
+/*                     EEE section                                */
+/******************************************************************/
+static u8 bnx2x_eee_has_cap(struct link_params *params)
+{
+       struct bnx2x *bp = params->bp;
+
+       if (REG_RD(bp, params->shmem2_base) <=
+                  offsetof(struct shmem2_region, eee_status[params->port]))
+               return 0;
+
+       return 1;
+}
+
+static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer)
+{
+       switch (nvram_mode) {
+       case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED:
+               *idle_timer = EEE_MODE_NVRAM_BALANCED_TIME;
+               break;
+       case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE:
+               *idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME;
+               break;
+       case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY:
+               *idle_timer = EEE_MODE_NVRAM_LATENCY_TIME;
+               break;
+       default:
+               *idle_timer = 0;
+               break;
+       }
+
+       return 0;
+}
+
+static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode)
+{
+       switch (idle_timer) {
+       case EEE_MODE_NVRAM_BALANCED_TIME:
+               *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED;
+               break;
+       case EEE_MODE_NVRAM_AGGRESSIVE_TIME:
+               *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE;
+               break;
+       case EEE_MODE_NVRAM_LATENCY_TIME:
+               *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY;
+               break;
+       default:
+               *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED;
+               break;
+       }
+
+       return 0;
+}
+
+static u32 bnx2x_eee_calc_timer(struct link_params *params)
+{
+       u32 eee_mode, eee_idle;
+       struct bnx2x *bp = params->bp;
+
+       if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) {
+               if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
+                       /* time value in eee_mode --> used directly*/
+                       eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK;
+               } else {
+                       /* hsi value in eee_mode --> time */
+                       if (bnx2x_eee_nvram_to_time(params->eee_mode &
+                                                   EEE_MODE_NVRAM_MASK,
+                                                   &eee_idle))
+                               return 0;
+               }
+       } else {
+               /* hsi values in nvram --> time*/
+               eee_mode = ((REG_RD(bp, params->shmem_base +
+                                   offsetof(struct shmem_region, dev_info.
+                                   port_feature_config[params->port].
+                                   eee_power_mode)) &
+                            PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
+                           PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
+
+               if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle))
+                       return 0;
+       }
+
+       return eee_idle;
+}
+
+static int bnx2x_eee_set_timers(struct link_params *params,
+                                  struct link_vars *vars)
+{
+       u32 eee_idle = 0, eee_mode;
+       struct bnx2x *bp = params->bp;
+
+       eee_idle = bnx2x_eee_calc_timer(params);
+
+       if (eee_idle) {
+               REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2),
+                      eee_idle);
+       } else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) &&
+                  (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) &&
+                  (params->eee_mode & EEE_MODE_OUTPUT_TIME)) {
+               DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n");
+               return -EINVAL;
+       }
+
+       vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT);
+       if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
+               /* eee_idle in 1u --> eee_status in 16u */
+               eee_idle >>= 4;
+               vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) |
+                                   SHMEM_EEE_TIME_OUTPUT_BIT;
+       } else {
+               if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode))
+                       return -EINVAL;
+               vars->eee_status |= eee_mode;
+       }
+
+       return 0;
+}
+
+static int bnx2x_eee_initial_config(struct link_params *params,
+                                    struct link_vars *vars, u8 mode)
+{
+       vars->eee_status |= ((u32) mode) << SHMEM_EEE_SUPPORTED_SHIFT;
+
+       /* Propogate params' bits --> vars (for migration exposure) */
+       if (params->eee_mode & EEE_MODE_ENABLE_LPI)
+               vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT;
+       else
+               vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT;
+
+       if (params->eee_mode & EEE_MODE_ADV_LPI)
+               vars->eee_status |= SHMEM_EEE_REQUESTED_BIT;
+       else
+               vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT;
+
+       return bnx2x_eee_set_timers(params, vars);
+}
+
+static int bnx2x_eee_disable(struct bnx2x_phy *phy,
+                               struct link_params *params,
+                               struct link_vars *vars)
+{
+       struct bnx2x *bp = params->bp;
+
+       /* Make Certain LPI is disabled */
+       REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0);
+
+       bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x0);
+
+       vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
+
+       return 0;
+}
+
+static int bnx2x_eee_advertise(struct bnx2x_phy *phy,
+                                 struct link_params *params,
+                                 struct link_vars *vars, u8 modes)
+{
+       struct bnx2x *bp = params->bp;
+       u16 val = 0;
+
+       /* Mask events preventing LPI generation */
+       REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20);
+
+       if (modes & SHMEM_EEE_10G_ADV) {
+               DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n");
+               val |= 0x8;
+       }
+       if (modes & SHMEM_EEE_1G_ADV) {
+               DP(NETIF_MSG_LINK, "Advertise 1GBase-T EEE\n");
+               val |= 0x4;
+       }
+
+       bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, val);
+
+       vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
+       vars->eee_status |= (modes << SHMEM_EEE_ADV_STATUS_SHIFT);
+
+       return 0;
+}
+
+static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status)
+{
+       struct bnx2x *bp = params->bp;
+
+       if (bnx2x_eee_has_cap(params))
+               REG_WR(bp, params->shmem2_base +
+                      offsetof(struct shmem2_region,
+                               eee_status[params->port]), eee_status);
+}
+
+static void bnx2x_eee_an_resolve(struct bnx2x_phy *phy,
+                                 struct link_params *params,
+                                 struct link_vars *vars)
+{
+       struct bnx2x *bp = params->bp;
+       u16 adv = 0, lp = 0;
+       u32 lp_adv = 0;
+       u8 neg = 0;
+
+       bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, &adv);
+       bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LP_EEE_ADV, &lp);
+
+       if (lp & 0x2) {
+               lp_adv |= SHMEM_EEE_100M_ADV;
+               if (adv & 0x2) {
+                       if (vars->line_speed == SPEED_100)
+                               neg = 1;
+                       DP(NETIF_MSG_LINK, "EEE negotiated - 100M\n");
+               }
+       }
+       if (lp & 0x14) {
+               lp_adv |= SHMEM_EEE_1G_ADV;
+               if (adv & 0x14) {
+                       if (vars->line_speed == SPEED_1000)
+                               neg = 1;
+                       DP(NETIF_MSG_LINK, "EEE negotiated - 1G\n");
+               }
+       }
+       if (lp & 0x68) {
+               lp_adv |= SHMEM_EEE_10G_ADV;
+               if (adv & 0x68) {
+                       if (vars->line_speed == SPEED_10000)
+                               neg = 1;
+                       DP(NETIF_MSG_LINK, "EEE negotiated - 10G\n");
+               }
+       }
+
+       vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK;
+       vars->eee_status |= (lp_adv << SHMEM_EEE_LP_ADV_STATUS_SHIFT);
+
+       if (neg) {
+               DP(NETIF_MSG_LINK, "EEE is active\n");
+               vars->eee_status |= SHMEM_EEE_ACTIVE_BIT;
+       }
+
+}
+
 /******************************************************************/
 /*                     BSC access functions from E3              */
 /******************************************************************/
@@ -3754,6 +3529,19 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
  * init configuration, and set/clear SGMII flag. Internal
  * phy init is done purely in phy_init stage.
  */
+
+static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
+                                              struct link_params *params)
+{
+       struct bnx2x *bp = params->bp;
+
+       DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n");
+       bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+                        MDIO_WC_REG_EEE_COMBO_CONTROL0, 0x7c);
+       bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+                                MDIO_WC_REG_DIGITAL4_MISC5, 0xc000);
+}
+
 static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
                                        struct link_params *params,
                                        struct link_vars *vars) {
@@ -4013,13 +3801,7 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
        bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
                                 MDIO_WC_REG_DIGITAL4_MISC3, 0x8080);
 
-       /* Enable LPI pass through */
-       DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n");
-       bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-                        MDIO_WC_REG_EEE_COMBO_CONTROL0,
-                        0x7c);
-       bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
-                                MDIO_WC_REG_DIGITAL4_MISC5, 0xc000);
+       bnx2x_warpcore_set_lpi_passthrough(phy, params);
 
        /* 10G XFI Full Duplex */
        bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
@@ -4116,6 +3898,8 @@ static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
        bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
                         MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13));
 
+       bnx2x_warpcore_set_lpi_passthrough(phy, params);
+
        if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) {
                /* SGMII Autoneg */
                bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -4409,7 +4193,7 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
                           "serdes_net_if = 0x%x\n",
                       vars->line_speed, serdes_net_if);
        bnx2x_set_aer_mmd(params, phy);
-
+       bnx2x_warpcore_reset_lane(bp, phy, 1);
        vars->phy_flags |= PHY_XGXS_FLAG;
        if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) ||
            (phy->req_line_speed &&
@@ -4718,6 +4502,10 @@ void bnx2x_link_status_update(struct link_params *params,
        vars->link_status = REG_RD(bp, params->shmem_base +
                                   offsetof(struct shmem_region,
                                            port_mb[port].link_status));
+       if (bnx2x_eee_has_cap(params))
+               vars->eee_status = REG_RD(bp, params->shmem2_base +
+                                         offsetof(struct shmem2_region,
+                                                  eee_status[params->port]));
 
        vars->phy_flags = PHY_XGXS_FLAG;
        bnx2x_sync_link(params, vars);
@@ -6530,25 +6318,21 @@ static int bnx2x_update_link_down(struct link_params *params,
        usleep_range(10000, 20000);
        /* Reset BigMac/Xmac */
        if (CHIP_IS_E1x(bp) ||
-           CHIP_IS_E2(bp)) {
-               bnx2x_bmac_rx_disable(bp, params->port);
-               REG_WR(bp, GRCBASE_MISC +
-                      MISC_REGISTERS_RESET_REG_2_CLEAR,
-              (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
-       }
+           CHIP_IS_E2(bp))
+               bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0);
+
        if (CHIP_IS_E3(bp)) {
                /* Prevent LPI Generation by chip */
                REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2),
                       0);
-               REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0);
                REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + (params->port << 2),
                       0);
                vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK |
                                      SHMEM_EEE_ACTIVE_BIT);
 
                bnx2x_update_mng_eee(params, vars->eee_status);
-               bnx2x_xmac_disable(params);
-               bnx2x_umac_disable(params);
+               bnx2x_set_xmac_rxtx(params, 0);
+               bnx2x_set_umac_rxtx(params, 0);
        }
 
        return 0;
@@ -6600,7 +6384,7 @@ static int bnx2x_update_link_up(struct link_params *params,
        if ((CHIP_IS_E1x(bp) ||
             CHIP_IS_E2(bp))) {
                if (link_10g) {
-                       if (bnx2x_bmac_enable(params, vars, 0) ==
+                       if (bnx2x_bmac_enable(params, vars, 0, 1) ==
                            -ESRCH) {
                                DP(NETIF_MSG_LINK, "Found errors on BMAC\n");
                                vars->link_up = 0;
@@ -7207,6 +6991,22 @@ static void bnx2x_8073_set_pause_cl37(struct link_params *params,
        msleep(500);
 }
 
+static void bnx2x_8073_specific_func(struct bnx2x_phy *phy,
+                                    struct link_params *params,
+                                    u32 action)
+{
+       struct bnx2x *bp = params->bp;
+       switch (action) {
+       case PHY_INIT:
+               /* Enable LASI */
+               bnx2x_cl45_write(bp, phy,
+                                MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2));
+               bnx2x_cl45_write(bp, phy,
+                                MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,  0x0004);
+               break;
+       }
+}
+
 static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
                                  struct link_params *params,
                                  struct link_vars *vars)
@@ -7227,12 +7027,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
                       MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
 
-       /* Enable LASI */
-       bnx2x_cl45_write(bp, phy,
-                        MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2));
-       bnx2x_cl45_write(bp, phy,
-                        MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,  0x0004);
-
+       bnx2x_8073_specific_func(phy, params, PHY_INIT);
        bnx2x_8073_set_pause_cl37(params, phy, vars);
 
        bnx2x_cl45_read(bp, phy,
@@ -8267,7 +8062,7 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
                                     u32 action)
 {
        struct bnx2x *bp = params->bp;
-
+       u16 val;
        switch (action) {
        case DISABLE_TX:
                bnx2x_sfp_set_transmitter(params, phy, 0);
@@ -8276,6 +8071,40 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
                if (!(phy->flags & FLAGS_SFP_NOT_APPROVED))
                        bnx2x_sfp_set_transmitter(params, phy, 1);
                break;
+       case PHY_INIT:
+               bnx2x_cl45_write(bp, phy,
+                                MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+                                (1<<2) | (1<<5));
+               bnx2x_cl45_write(bp, phy,
+                                MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
+                                0);
+               bnx2x_cl45_write(bp, phy,
+                                MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0006);
+               /* Make MOD_ABS give interrupt on change */
+               bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+                               &val);
+               val |= (1<<12);
+               if (phy->flags & FLAGS_NOC)
+                       val |= (3<<5);
+               /* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
+                * status which reflect SFP+ module over-current
+                */
+               if (!(phy->flags & FLAGS_NOC))
+                       val &= 0xff8f; /* Reset bits 4-6 */
+               bnx2x_cl45_write(bp, phy,
+                                MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+                                val);
+
+               /* Set 2-wire transfer rate of SFP+ module EEPROM
+                * to 100Khz since some DACs(direct attached cables) do
+                * not work at 400Khz.
+                */
+               bnx2x_cl45_write(bp, phy,
+                                MDIO_PMA_DEVAD,
+                                MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
+                                0xa001);
+               break;
        default:
                DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
                   action);
@@ -9058,28 +8887,15 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
                                  struct link_vars *vars)
 {
        u32 tx_en_mode;
-       u16 tmp1, val, mod_abs, tmp2;
-       u16 rx_alarm_ctrl_val;
-       u16 lasi_ctrl_val;
+       u16 tmp1, mod_abs, tmp2;
        struct bnx2x *bp = params->bp;
        /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
 
        bnx2x_wait_reset_complete(bp, phy, params);
-       rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
-       /* Should be 0x6 to enable XS on Tx side. */
-       lasi_ctrl_val = 0x0006;
 
        DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
-       /* Enable LASI */
-       bnx2x_cl45_write(bp, phy,
-                        MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
-                        rx_alarm_ctrl_val);
-       bnx2x_cl45_write(bp, phy,
-                        MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
-                        0);
-       bnx2x_cl45_write(bp, phy,
-                        MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, lasi_ctrl_val);
 
+       bnx2x_8727_specific_func(phy, params, PHY_INIT);
        /* Initially configure MOD_ABS to interrupt when module is
         * presence( bit 8)
         */
@@ -9095,25 +8911,9 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
        bnx2x_cl45_write(bp, phy,
                         MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
 
-
        /* Enable/Disable PHY transmitter output */
        bnx2x_set_disable_pmd_transmit(params, phy, 0);
 
-       /* Make MOD_ABS give interrupt on change */
-       bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
-                       &val);
-       val |= (1<<12);
-       if (phy->flags & FLAGS_NOC)
-               val |= (3<<5);
-
-       /* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
-        * status which reflect SFP+ module over-current
-        */
-       if (!(phy->flags & FLAGS_NOC))
-               val &= 0xff8f; /* Reset bits 4-6 */
-       bnx2x_cl45_write(bp, phy,
-                        MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, val);
-
        bnx2x_8727_power_module(bp, phy, 1);
 
        bnx2x_cl45_read(bp, phy,
@@ -9123,13 +8923,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
                        MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
 
        bnx2x_8727_config_speed(phy, params);
-       /* Set 2-wire transfer rate of SFP+ module EEPROM
-        * to 100Khz since some DACs(direct attached cables) do
-        * not work at 400Khz.
-        */
-       bnx2x_cl45_write(bp, phy,
-                        MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
-                        0xa001);
+
 
        /* Set TX PreEmphasis if needed */
        if ((params->feature_config_flags &
@@ -9558,6 +9352,29 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
                         0xFFFB, 0xFFFD);
 }
 
+static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
+                                     struct link_params *params,
+                                     u32 action)
+{
+       struct bnx2x *bp = params->bp;
+       switch (action) {
+       case PHY_INIT:
+               if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+                       /* Save spirom version */
+                       bnx2x_save_848xx_spirom_version(phy, bp, params->port);
+               }
+               /* This phy uses the NIG latch mechanism since link indication
+                * arrives through its LED4 and not via its LASI signal, so we
+                * get steady signal instead of clear on read
+                */
+               bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
+                             1 << NIG_LATCH_BC_ENABLE_MI_INT);
+
+               bnx2x_848xx_set_led(bp, phy);
+               break;
+       }
+}
+
 static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
                                       struct link_params *params,
                                       struct link_vars *vars)
@@ -9565,22 +9382,10 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
        struct bnx2x *bp = params->bp;
        u16 autoneg_val, an_1000_val, an_10_100_val, an_10g_val;
 
-       if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
-               /* Save spirom version */
-               bnx2x_save_848xx_spirom_version(phy, bp, params->port);
-       }
-       /* This phy uses the NIG latch mechanism since link indication
-        * arrives through its LED4 and not via its LASI signal, so we
-        * get steady signal instead of clear on read
-        */
-       bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
-                     1 << NIG_LATCH_BC_ENABLE_MI_INT);
-
+       bnx2x_848xx_specific_func(phy, params, PHY_INIT);
        bnx2x_cl45_write(bp, phy,
                         MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000);
 
-       bnx2x_848xx_set_led(bp, phy);
-
        /* set 1000 speed advertisement */
        bnx2x_cl45_read(bp, phy,
                        MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
@@ -9887,39 +9692,6 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
        return 0;
 }
 
-static int bnx2x_8483x_eee_timers(struct link_params *params,
-                                  struct link_vars *vars)
-{
-       u32 eee_idle = 0, eee_mode;
-       struct bnx2x *bp = params->bp;
-
-       eee_idle = bnx2x_eee_calc_timer(params);
-
-       if (eee_idle) {
-               REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2),
-                      eee_idle);
-       } else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) &&
-                  (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) &&
-                  (params->eee_mode & EEE_MODE_OUTPUT_TIME)) {
-               DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n");
-               return -EINVAL;
-       }
-
-       vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT);
-       if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
-               /* eee_idle in 1u --> eee_status in 16u */
-               eee_idle >>= 4;
-               vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) |
-                                   SHMEM_EEE_TIME_OUTPUT_BIT;
-       } else {
-               if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode))
-                       return -EINVAL;
-               vars->eee_status |= eee_mode;
-       }
-
-       return 0;
-}
-
 static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
                                   struct link_params *params,
                                   struct link_vars *vars)
@@ -9930,10 +9702,6 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
 
        DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n");
 
-       /* Make Certain LPI is disabled */
-       REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0);
-       REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0);
-
        /* Prevent Phy from working in EEE and advertising it */
        rc = bnx2x_84833_cmd_hdlr(phy, params,
                PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
@@ -9942,10 +9710,7 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
                return rc;
        }
 
-       bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0);
-       vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
-
-       return 0;
+       return bnx2x_eee_disable(phy, params, vars);
 }
 
 static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
@@ -9956,8 +9721,6 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
        struct bnx2x *bp = params->bp;
        u16 cmd_args = 1;
 
-       DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n");
-
        rc = bnx2x_84833_cmd_hdlr(phy, params,
                PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
        if (rc) {
@@ -9965,15 +9728,7 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
                return rc;
        }
 
-       bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x8);
-
-       /* Mask events preventing LPI generation */
-       REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20);
-
-       vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
-       vars->eee_status |= (SHMEM_EEE_10G_ADV << SHMEM_EEE_ADV_STATUS_SHIFT);
-
-       return 0;
+       return bnx2x_eee_advertise(phy, params, vars, SHMEM_EEE_10G_ADV);
 }
 
 #define PHY84833_CONSTANT_LATENCY 1193
@@ -10105,22 +9860,10 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
                        MDIO_84833_TOP_CFG_FW_REV, &val);
 
        /* Configure EEE support */
-       if ((val >= MDIO_84833_TOP_CFG_FW_EEE) && bnx2x_eee_has_cap(params)) {
-               phy->flags |= FLAGS_EEE_10GBT;
-               vars->eee_status |= SHMEM_EEE_10G_ADV <<
-                                   SHMEM_EEE_SUPPORTED_SHIFT;
-               /* Propogate params' bits --> vars (for migration exposure) */
-               if (params->eee_mode & EEE_MODE_ENABLE_LPI)
-                       vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT;
-               else
-                       vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT;
-
-               if (params->eee_mode & EEE_MODE_ADV_LPI)
-                       vars->eee_status |= SHMEM_EEE_REQUESTED_BIT;
-               else
-                       vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT;
-
-               rc = bnx2x_8483x_eee_timers(params, vars);
+       if ((val >= MDIO_84833_TOP_CFG_FW_EEE) &&
+           (val != MDIO_84833_TOP_CFG_FW_NO_EEE) &&
+           bnx2x_eee_has_cap(params)) {
+               rc = bnx2x_eee_initial_config(params, vars, SHMEM_EEE_10G_ADV);
                if (rc) {
                        DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n");
                        bnx2x_8483x_disable_eee(phy, params, vars);
@@ -10139,7 +9882,6 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
                        return rc;
                }
        } else {
-               phy->flags &= ~FLAGS_EEE_10GBT;
                vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
        }
 
@@ -10278,29 +10020,8 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
                                LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
 
                /* Determine if EEE was negotiated */
-               if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
-                       u32 eee_shmem = 0;
-
-                       bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
-                                       MDIO_AN_REG_EEE_ADV, &val1);
-                       bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
-                                       MDIO_AN_REG_LP_EEE_ADV, &val2);
-                       if ((val1 & val2) & 0x8) {
-                               DP(NETIF_MSG_LINK, "EEE negotiated\n");
-                               vars->eee_status |= SHMEM_EEE_ACTIVE_BIT;
-                       }
-
-                       if (val2 & 0x12)
-                               eee_shmem |= SHMEM_EEE_100M_ADV;
-                       if (val2 & 0x4)
-                               eee_shmem |= SHMEM_EEE_1G_ADV;
-                       if (val2 & 0x68)
-                               eee_shmem |= SHMEM_EEE_10G_ADV;
-
-                       vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK;
-                       vars->eee_status |= (eee_shmem <<
-                                            SHMEM_EEE_LP_ADV_STATUS_SHIFT);
-               }
+               if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+                       bnx2x_eee_an_resolve(phy, params, vars);
        }
 
        return link_up;
@@ -10569,6 +10290,35 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
 /******************************************************************/
 /*                     54618SE PHY SECTION                       */
 /******************************************************************/
+static void bnx2x_54618se_specific_func(struct bnx2x_phy *phy,
+                                       struct link_params *params,
+                                       u32 action)
+{
+       struct bnx2x *bp = params->bp;
+       u16 temp;
+       switch (action) {
+       case PHY_INIT:
+               /* Configure LED4: set to INTR (0x6). */
+               /* Accessing shadow register 0xe. */
+               bnx2x_cl22_write(bp, phy,
+                                MDIO_REG_GPHY_SHADOW,
+                                MDIO_REG_GPHY_SHADOW_LED_SEL2);
+               bnx2x_cl22_read(bp, phy,
+                               MDIO_REG_GPHY_SHADOW,
+                               &temp);
+               temp &= ~(0xf << 4);
+               temp |= (0x6 << 4);
+               bnx2x_cl22_write(bp, phy,
+                                MDIO_REG_GPHY_SHADOW,
+                                MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+               /* Configure INTR based on link status change. */
+               bnx2x_cl22_write(bp, phy,
+                                MDIO_REG_INTR_MASK,
+                                ~MDIO_REG_INTR_MASK_LINK_STATUS);
+               break;
+       }
+}
+
 static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
                                               struct link_params *params,
                                               struct link_vars *vars)
@@ -10606,24 +10356,8 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
        /* Wait for GPHY to reset */
        msleep(50);
 
-       /* Configure LED4: set to INTR (0x6). */
-       /* Accessing shadow register 0xe. */
-       bnx2x_cl22_write(bp, phy,
-                       MDIO_REG_GPHY_SHADOW,
-                       MDIO_REG_GPHY_SHADOW_LED_SEL2);
-       bnx2x_cl22_read(bp, phy,
-                       MDIO_REG_GPHY_SHADOW,
-                       &temp);
-       temp &= ~(0xf << 4);
-       temp |= (0x6 << 4);
-       bnx2x_cl22_write(bp, phy,
-                       MDIO_REG_GPHY_SHADOW,
-                       MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
-       /* Configure INTR based on link status change. */
-       bnx2x_cl22_write(bp, phy,
-                       MDIO_REG_INTR_MASK,
-                       ~MDIO_REG_INTR_MASK_LINK_STATUS);
 
+       bnx2x_54618se_specific_func(phy, params, PHY_INIT);
        /* Flip the signal detect polarity (set 0x1c.0x1e[8]). */
        bnx2x_cl22_write(bp, phy,
                        MDIO_REG_GPHY_SHADOW,
@@ -10728,28 +10462,52 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
                DP(NETIF_MSG_LINK, "Setting 10M force\n");
        }
 
-       /* Check if we should turn on Auto-GrEEEn */
-       bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &temp);
-       if (temp == MDIO_REG_GPHY_ID_54618SE) {
-               if (params->feature_config_flags &
-                   FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
-                       temp = 6;
-                       DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n");
+       if ((phy->flags & FLAGS_EEE) && bnx2x_eee_has_cap(params)) {
+               int rc;
+
+               bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_EXP_ACCESS,
+                                MDIO_REG_GPHY_EXP_ACCESS_TOP |
+                                MDIO_REG_GPHY_EXP_TOP_2K_BUF);
+               bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, &temp);
+               temp &= 0xfffe;
+               bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, temp);
+
+               rc = bnx2x_eee_initial_config(params, vars, SHMEM_EEE_1G_ADV);
+               if (rc) {
+                       DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n");
+                       bnx2x_eee_disable(phy, params, vars);
+               } else if ((params->eee_mode & EEE_MODE_ADV_LPI) &&
+                          (phy->req_duplex == DUPLEX_FULL) &&
+                          (bnx2x_eee_calc_timer(params) ||
+                           !(params->eee_mode & EEE_MODE_ENABLE_LPI))) {
+                       /* Need to advertise EEE only when requested,
+                        * and either no LPI assertion was requested,
+                        * or it was requested and a valid timer was set.
+                        * Also notice full duplex is required for EEE.
+                        */
+                       bnx2x_eee_advertise(phy, params, vars,
+                                           SHMEM_EEE_1G_ADV);
                } else {
-                       temp = 0;
-                       DP(NETIF_MSG_LINK, "Disabling Auto-GrEEEn\n");
+                       DP(NETIF_MSG_LINK, "Don't Advertise 1GBase-T EEE\n");
+                       bnx2x_eee_disable(phy, params, vars);
+               }
+       } else {
+               vars->eee_status &= ~SHMEM_EEE_1G_ADV <<
+                                   SHMEM_EEE_SUPPORTED_SHIFT;
+
+               if (phy->flags & FLAGS_EEE) {
+                       /* Handle legacy auto-grEEEn */
+                       if (params->feature_config_flags &
+                           FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
+                               temp = 6;
+                               DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n");
+                       } else {
+                               temp = 0;
+                               DP(NETIF_MSG_LINK, "Don't Adv. EEE\n");
+                       }
+                       bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+                                        MDIO_AN_REG_EEE_ADV, temp);
                }
-               bnx2x_cl22_write(bp, phy,
-                                MDIO_REG_GPHY_CL45_ADDR_REG, MDIO_AN_DEVAD);
-               bnx2x_cl22_write(bp, phy,
-                                MDIO_REG_GPHY_CL45_DATA_REG,
-                                MDIO_REG_GPHY_EEE_ADV);
-               bnx2x_cl22_write(bp, phy,
-                                MDIO_REG_GPHY_CL45_ADDR_REG,
-                                (0x1 << 14) | MDIO_AN_DEVAD);
-               bnx2x_cl22_write(bp, phy,
-                                MDIO_REG_GPHY_CL45_DATA_REG,
-                                temp);
        }
 
        bnx2x_cl22_write(bp, phy,
@@ -10896,29 +10654,6 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
                DP(NETIF_MSG_LINK, "BCM54618SE: link speed is %d\n",
                           vars->line_speed);
 
-               /* Report whether EEE is resolved. */
-               bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &val);
-               if (val == MDIO_REG_GPHY_ID_54618SE) {
-                       if (vars->link_status &
-                           LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)
-                               val = 0;
-                       else {
-                               bnx2x_cl22_write(bp, phy,
-                                       MDIO_REG_GPHY_CL45_ADDR_REG,
-                                       MDIO_AN_DEVAD);
-                               bnx2x_cl22_write(bp, phy,
-                                       MDIO_REG_GPHY_CL45_DATA_REG,
-                                       MDIO_REG_GPHY_EEE_RESOLVED);
-                               bnx2x_cl22_write(bp, phy,
-                                       MDIO_REG_GPHY_CL45_ADDR_REG,
-                                       (0x1 << 14) | MDIO_AN_DEVAD);
-                               bnx2x_cl22_read(bp, phy,
-                                       MDIO_REG_GPHY_CL45_DATA_REG,
-                                       &val);
-                       }
-                       DP(NETIF_MSG_LINK, "EEE resolution: 0x%x\n", val);
-               }
-
                bnx2x_ext_phy_resolve_fc(phy, params, vars);
 
                if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
@@ -10948,6 +10683,10 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
                        if (val & (1<<11))
                                vars->link_status |=
                                  LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+
+                       if ((phy->flags & FLAGS_EEE) &&
+                           bnx2x_eee_has_cap(params))
+                               bnx2x_eee_an_resolve(phy, params, vars);
                }
        }
        return link_up;
@@ -11353,7 +11092,7 @@ static struct bnx2x_phy phy_8073 = {
        .format_fw_ver  = (format_fw_ver_t)bnx2x_format_ver,
        .hw_reset       = (hw_reset_t)NULL,
        .set_link_led   = (set_link_led_t)NULL,
-       .phy_specific_func = (phy_specific_func_t)NULL
+       .phy_specific_func = (phy_specific_func_t)bnx2x_8073_specific_func
 };
 static struct bnx2x_phy phy_8705 = {
        .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705,
@@ -11546,7 +11285,7 @@ static struct bnx2x_phy phy_84823 = {
        .format_fw_ver  = (format_fw_ver_t)bnx2x_848xx_format_ver,
        .hw_reset       = (hw_reset_t)NULL,
        .set_link_led   = (set_link_led_t)bnx2x_848xx_set_link_led,
-       .phy_specific_func = (phy_specific_func_t)NULL
+       .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
 };
 
 static struct bnx2x_phy phy_84833 = {
@@ -11555,8 +11294,7 @@ static struct bnx2x_phy phy_84833 = {
        .def_md_devad   = 0,
        .flags          = (FLAGS_FAN_FAILURE_DET_REQ |
                           FLAGS_REARM_LATCH_SIGNAL |
-                          FLAGS_TX_ERROR_CHECK |
-                          FLAGS_EEE_10GBT),
+                          FLAGS_TX_ERROR_CHECK),
        .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .mdio_ctrl      = 0,
@@ -11582,7 +11320,7 @@ static struct bnx2x_phy phy_84833 = {
        .format_fw_ver  = (format_fw_ver_t)bnx2x_848xx_format_ver,
        .hw_reset       = (hw_reset_t)bnx2x_84833_hw_reset_phy,
        .set_link_led   = (set_link_led_t)bnx2x_848xx_set_link_led,
-       .phy_specific_func = (phy_specific_func_t)NULL
+       .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
 };
 
 static struct bnx2x_phy phy_54618se = {
@@ -11616,7 +11354,7 @@ static struct bnx2x_phy phy_54618se = {
        .format_fw_ver  = (format_fw_ver_t)NULL,
        .hw_reset       = (hw_reset_t)NULL,
        .set_link_led   = (set_link_led_t)bnx2x_5461x_set_link_led,
-       .phy_specific_func = (phy_specific_func_t)NULL
+       .phy_specific_func = (phy_specific_func_t)bnx2x_54618se_specific_func
 };
 /*****************************************************************/
 /*                                                               */
@@ -11862,6 +11600,8 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616:
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE:
                *phy = phy_54618se;
+               if (phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE)
+                       phy->flags |= FLAGS_EEE;
                break;
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
                *phy = phy_7101;
@@ -12141,7 +11881,7 @@ void bnx2x_init_bmac_loopback(struct link_params *params,
                bnx2x_xgxs_deassert(params);
 
                /* set bmac loopback */
-               bnx2x_bmac_enable(params, vars, 1);
+               bnx2x_bmac_enable(params, vars, 1, 1);
 
                REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
 }
@@ -12233,7 +11973,7 @@ void bnx2x_init_xgxs_loopback(struct link_params *params,
                if (USES_WARPCORE(bp))
                        bnx2x_xmac_enable(params, vars, 0);
                else
-                       bnx2x_bmac_enable(params, vars, 0);
+                       bnx2x_bmac_enable(params, vars, 0, 1);
        }
 
                if (params->loopback_mode == LOOPBACK_XGXS) {
@@ -12258,8 +11998,161 @@ void bnx2x_init_xgxs_loopback(struct link_params *params,
        bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
 }
 
+static void bnx2x_set_rx_filter(struct link_params *params, u8 en)
+{
+       struct bnx2x *bp = params->bp;
+       u8 val = en * 0x1F;
+
+       /* Open the gate between the NIG to the BRB */
+       if (!CHIP_IS_E1x(bp))
+               val |= en * 0x20;
+       REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + params->port*4, val);
+
+       if (!CHIP_IS_E1(bp)) {
+               REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + params->port*4,
+                      en*0x3);
+       }
+
+       REG_WR(bp, (params->port ? NIG_REG_LLH1_BRB1_NOT_MCP :
+                   NIG_REG_LLH0_BRB1_NOT_MCP), en);
+}
+static int bnx2x_avoid_link_flap(struct link_params *params,
+                                           struct link_vars *vars)
+{
+       u32 phy_idx;
+       u32 dont_clear_stat, lfa_sts;
+       struct bnx2x *bp = params->bp;
+
+       /* Sync the link parameters */
+       bnx2x_link_status_update(params, vars);
+
+       /*
+        * The module verification was already done by previous link owner,
+        * so this call is meant only to get warning message
+        */
+
+       for (phy_idx = INT_PHY; phy_idx < params->num_phys; phy_idx++) {
+               struct bnx2x_phy *phy = &params->phy[phy_idx];
+               if (phy->phy_specific_func) {
+                       DP(NETIF_MSG_LINK, "Calling PHY specific func\n");
+                       phy->phy_specific_func(phy, params, PHY_INIT);
+               }
+               if ((phy->media_type == ETH_PHY_SFPP_10G_FIBER) ||
+                   (phy->media_type == ETH_PHY_SFP_1G_FIBER) ||
+                   (phy->media_type == ETH_PHY_DA_TWINAX))
+                       bnx2x_verify_sfp_module(phy, params);
+       }
+       lfa_sts = REG_RD(bp, params->lfa_base +
+                        offsetof(struct shmem_lfa,
+                                 lfa_sts));
+
+       dont_clear_stat = lfa_sts & SHMEM_LFA_DONT_CLEAR_STAT;
+
+       /* Re-enable the NIG/MAC */
+       if (CHIP_IS_E3(bp)) {
+               if (!dont_clear_stat) {
+                       REG_WR(bp, GRCBASE_MISC +
+                              MISC_REGISTERS_RESET_REG_2_CLEAR,
+                              (MISC_REGISTERS_RESET_REG_2_MSTAT0 <<
+                               params->port));
+                       REG_WR(bp, GRCBASE_MISC +
+                              MISC_REGISTERS_RESET_REG_2_SET,
+                              (MISC_REGISTERS_RESET_REG_2_MSTAT0 <<
+                               params->port));
+               }
+               if (vars->line_speed < SPEED_10000)
+                       bnx2x_umac_enable(params, vars, 0);
+               else
+                       bnx2x_xmac_enable(params, vars, 0);
+       } else {
+               if (vars->line_speed < SPEED_10000)
+                       bnx2x_emac_enable(params, vars, 0);
+               else
+                       bnx2x_bmac_enable(params, vars, 0, !dont_clear_stat);
+       }
+
+       /* Increment LFA count */
+       lfa_sts = ((lfa_sts & ~LINK_FLAP_AVOIDANCE_COUNT_MASK) |
+                  (((((lfa_sts & LINK_FLAP_AVOIDANCE_COUNT_MASK) >>
+                      LINK_FLAP_AVOIDANCE_COUNT_OFFSET) + 1) & 0xff)
+                   << LINK_FLAP_AVOIDANCE_COUNT_OFFSET));
+       /* Clear link flap reason */
+       lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK;
+
+       REG_WR(bp, params->lfa_base +
+              offsetof(struct shmem_lfa, lfa_sts), lfa_sts);
+
+       /* Disable NIG DRAIN */
+       REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+
+       /* Enable interrupts */
+       bnx2x_link_int_enable(params);
+       return 0;
+}
+
+static void bnx2x_cannot_avoid_link_flap(struct link_params *params,
+                                        struct link_vars *vars,
+                                        int lfa_status)
+{
+       u32 lfa_sts, cfg_idx, tmp_val;
+       struct bnx2x *bp = params->bp;
+
+       bnx2x_link_reset(params, vars, 1);
+
+       if (!params->lfa_base)
+               return;
+       /* Store the new link parameters */
+       REG_WR(bp, params->lfa_base +
+              offsetof(struct shmem_lfa, req_duplex),
+              params->req_duplex[0] | (params->req_duplex[1] << 16));
+
+       REG_WR(bp, params->lfa_base +
+              offsetof(struct shmem_lfa, req_flow_ctrl),
+              params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16));
+
+       REG_WR(bp, params->lfa_base +
+              offsetof(struct shmem_lfa, req_line_speed),
+              params->req_line_speed[0] | (params->req_line_speed[1] << 16));
+
+       for (cfg_idx = 0; cfg_idx < SHMEM_LINK_CONFIG_SIZE; cfg_idx++) {
+               REG_WR(bp, params->lfa_base +
+                      offsetof(struct shmem_lfa,
+                               speed_cap_mask[cfg_idx]),
+                      params->speed_cap_mask[cfg_idx]);
+       }
+
+       tmp_val = REG_RD(bp, params->lfa_base +
+                        offsetof(struct shmem_lfa, additional_config));
+       tmp_val &= ~REQ_FC_AUTO_ADV_MASK;
+       tmp_val |= params->req_fc_auto_adv;
+
+       REG_WR(bp, params->lfa_base +
+              offsetof(struct shmem_lfa, additional_config), tmp_val);
+
+       lfa_sts = REG_RD(bp, params->lfa_base +
+                        offsetof(struct shmem_lfa, lfa_sts));
+
+       /* Clear the "Don't Clear Statistics" bit, and set reason */
+       lfa_sts &= ~SHMEM_LFA_DONT_CLEAR_STAT;
+
+       /* Set link flap reason */
+       lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK;
+       lfa_sts |= ((lfa_status & LFA_LINK_FLAP_REASON_MASK) <<
+                   LFA_LINK_FLAP_REASON_OFFSET);
+
+       /* Increment link flap counter */
+       lfa_sts = ((lfa_sts & ~LINK_FLAP_COUNT_MASK) |
+                  (((((lfa_sts & LINK_FLAP_COUNT_MASK) >>
+                      LINK_FLAP_COUNT_OFFSET) + 1) & 0xff)
+                   << LINK_FLAP_COUNT_OFFSET));
+       REG_WR(bp, params->lfa_base +
+              offsetof(struct shmem_lfa, lfa_sts), lfa_sts);
+       /* Proceed with regular link initialization */
+}
+
 int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
 {
+       int lfa_status;
        struct bnx2x *bp = params->bp;
        DP(NETIF_MSG_LINK, "Phy Initialization started\n");
        DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n",
@@ -12274,6 +12167,19 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
        vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
        vars->mac_type = MAC_TYPE_NONE;
        vars->phy_flags = 0;
+       /* Driver opens NIG-BRB filters */
+       bnx2x_set_rx_filter(params, 1);
+       /* Check if link flap can be avoided */
+       lfa_status = bnx2x_check_lfa(params);
+
+       if (lfa_status == 0) {
+               DP(NETIF_MSG_LINK, "Link Flap Avoidance in progress\n");
+               return bnx2x_avoid_link_flap(params, vars);
+       }
+
+       DP(NETIF_MSG_LINK, "Cannot avoid link flap lfa_sta=0x%x\n",
+                      lfa_status);
+       bnx2x_cannot_avoid_link_flap(params, vars, lfa_status);
 
        /* Disable attentions */
        bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
@@ -12356,13 +12262,12 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
                REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
        }
 
-       /* Stop BigMac rx */
-       if (!CHIP_IS_E3(bp))
-               bnx2x_bmac_rx_disable(bp, port);
-       else {
-               bnx2x_xmac_disable(params);
-               bnx2x_umac_disable(params);
-       }
+               if (!CHIP_IS_E3(bp)) {
+                       bnx2x_set_bmac_rx(bp, params->chip_id, port, 0);
+               } else {
+                       bnx2x_set_xmac_rxtx(params, 0);
+                       bnx2x_set_umac_rxtx(params, 0);
+               }
        /* Disable emac */
        if (!CHIP_IS_E3(bp))
                REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
@@ -12420,6 +12325,56 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
        vars->phy_flags = 0;
        return 0;
 }
+int bnx2x_lfa_reset(struct link_params *params,
+                              struct link_vars *vars)
+{
+       struct bnx2x *bp = params->bp;
+       vars->link_up = 0;
+       vars->phy_flags = 0;
+       if (!params->lfa_base)
+               return bnx2x_link_reset(params, vars, 1);
+       /*
+        * Activate NIG drain so that during this time the device won't send
+        * anything while it is unable to response.
+        */
+       REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1);
+
+       /*
+        * Close gracefully the gate from BMAC to NIG such that no half packets
+        * are passed.
+        */
+       if (!CHIP_IS_E3(bp))
+               bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0);
+
+       if (CHIP_IS_E3(bp)) {
+               bnx2x_set_xmac_rxtx(params, 0);
+               bnx2x_set_umac_rxtx(params, 0);
+       }
+       /* Wait 10ms for the pipe to clean up*/
+       usleep_range(10000, 20000);
+
+       /* Clean the NIG-BRB using the network filters in a way that will
+        * not cut a packet in the middle.
+        */
+       bnx2x_set_rx_filter(params, 0);
+
+       /*
+        * Re-open the gate between the BMAC and the NIG, after verifying the
+        * gate to the BRB is closed, otherwise packets may arrive to the
+        * firmware before driver had initialized it. The target is to achieve
+        * minimum management protocol down time.
+        */
+       if (!CHIP_IS_E3(bp))
+               bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 1);
+
+       if (CHIP_IS_E3(bp)) {
+               bnx2x_set_xmac_rxtx(params, 1);
+               bnx2x_set_umac_rxtx(params, 1);
+       }
+       /* Disable NIG drain */
+       REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+       return 0;
+}
 
 /****************************************************************************/
 /*                             Common function                             */
index 51cac8130051e27a0994c288712346698280d740..9165b89a4b1923d6948bcefba018f2d6cc707633 100644 (file)
@@ -155,7 +155,7 @@ struct bnx2x_phy {
 #define FLAGS_DUMMY_READ               (1<<9)
 #define FLAGS_MDC_MDIO_WA_B0           (1<<10)
 #define FLAGS_TX_ERROR_CHECK           (1<<12)
-#define FLAGS_EEE_10GBT                        (1<<13)
+#define FLAGS_EEE                      (1<<13)
 
        /* preemphasis values for the rx side */
        u16 rx_preemphasis[4];
@@ -216,6 +216,7 @@ struct bnx2x_phy {
        phy_specific_func_t phy_specific_func;
 #define DISABLE_TX     1
 #define ENABLE_TX      2
+#define PHY_INIT       3
 };
 
 /* Inputs parameters to the CLC */
@@ -304,6 +305,8 @@ struct link_params {
        struct bnx2x *bp;
        u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
                                req_flow_ctrl is set to AUTO */
+       u16 rsrv1;
+       u32 lfa_base;
 };
 
 /* Output parameters */
@@ -356,7 +359,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars);
    to 0 */
 int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
                     u8 reset_ext_phy);
-
+int bnx2x_lfa_reset(struct link_params *params, struct link_vars *vars);
 /* bnx2x_link_update should be called upon link interrupt */
 int bnx2x_link_update(struct link_params *params, struct link_vars *vars);
 
index e11485ca037dc223fd6f9e7420387c16c4251a50..f7ed122f40717ee4392041e09e1b2396e640370b 100644 (file)
@@ -2166,7 +2166,6 @@ void bnx2x_link_set(struct bnx2x *bp)
 {
        if (!BP_NOMCP(bp)) {
                bnx2x_acquire_phy_lock(bp);
-               bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
                bnx2x_phy_init(&bp->link_params, &bp->link_vars);
                bnx2x_release_phy_lock(bp);
 
@@ -2179,12 +2178,19 @@ static void bnx2x__link_reset(struct bnx2x *bp)
 {
        if (!BP_NOMCP(bp)) {
                bnx2x_acquire_phy_lock(bp);
-               bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
+               bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
                bnx2x_release_phy_lock(bp);
        } else
                BNX2X_ERR("Bootcode is missing - can not reset link\n");
 }
 
+void bnx2x_force_link_reset(struct bnx2x *bp)
+{
+       bnx2x_acquire_phy_lock(bp);
+       bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
+       bnx2x_release_phy_lock(bp);
+}
+
 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
 {
        u8 rc = 0;
@@ -6751,7 +6757,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
        u32 low, high;
        u32 val;
 
-       bnx2x__link_reset(bp);
 
        DP(NETIF_MSG_HW, "starting port init  port %d\n", port);
 
@@ -8244,12 +8249,15 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
  * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
  *
  * @bp:                driver handle
+ * @keep_link:         true iff link should be kept up
  */
-void bnx2x_send_unload_done(struct bnx2x *bp)
+void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link)
 {
+       u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
+
        /* Report UNLOAD_DONE to MCP */
        if (!BP_NOMCP(bp))
-               bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+               bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
 }
 
 static int bnx2x_func_wait_started(struct bnx2x *bp)
@@ -8318,7 +8326,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp)
        return 0;
 }
 
-void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
+void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
 {
        int port = BP_PORT(bp);
        int i, rc = 0;
@@ -8440,7 +8448,7 @@ unload_error:
 
 
        /* Report UNLOAD_DONE to MCP */
-       bnx2x_send_unload_done(bp);
+       bnx2x_send_unload_done(bp, keep_link);
 }
 
 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
@@ -8852,7 +8860,8 @@ int bnx2x_leader_reset(struct bnx2x *bp)
         * driver is owner of the HW
         */
        if (!global && !BP_NOMCP(bp)) {
-               load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
+               load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
+                                            DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
                if (!load_code) {
                        BNX2X_ERR("MCP response failure, aborting\n");
                        rc = -EAGAIN;
@@ -8958,7 +8967,7 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
 
                        /* Stop the driver */
                        /* If interface has been removed - break */
-                       if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
+                       if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
                                return;
 
                        bp->recovery_state = BNX2X_RECOVERY_WAIT;
@@ -9124,7 +9133,7 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
                bp->sp_rtnl_state = 0;
                smp_mb();
 
-               bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+               bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
                bnx2x_nic_load(bp, LOAD_NORMAL);
 
                goto sp_rtnl_exit;
@@ -9310,7 +9319,8 @@ static void __devinit bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port,
 
 static int __devinit bnx2x_prev_mcp_done(struct bnx2x *bp)
 {
-       u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+       u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
+                                 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
        if (!rc) {
                BNX2X_ERR("MCP response failure, aborting\n");
                return -EBUSY;
@@ -11000,7 +11010,7 @@ static int bnx2x_close(struct net_device *dev)
        struct bnx2x *bp = netdev_priv(dev);
 
        /* Unload the driver, release IRQs */
-       bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+       bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
 
        /* Power off */
        bnx2x_set_power_state(bp, PCI_D3hot);
index 28a0bcfe61ff9a4224d5b1f3442400a363bce79b..1b1999d34c7180f41649b606695ecccfbcb237c5 100644 (file)
 #define UMAC_COMMAND_CONFIG_REG_SW_RESET                        (0x1<<13)
 #define UMAC_COMMAND_CONFIG_REG_TX_ENA                          (0x1<<0)
 #define UMAC_REG_COMMAND_CONFIG                                         0x8
+/* [RW 16] This is the duration for which MAC must wait to go back to ACTIVE
+ * state from LPI state when it receives packet for transmission. The
+ * decrement unit is 1 micro-second. */
+#define UMAC_REG_EEE_WAKE_TIMER                                         0x6c
 /* [RW 32] Register Bit 0 refers to Bit 16 of the MAC address; Bit 1 refers
  * to bit 17 of the MAC address etc. */
 #define UMAC_REG_MAC_ADDR0                                      0xc
 /* [RW 14] Defines a 14-Bit maximum frame length used by the MAC receive
  * logic to check frames. */
 #define UMAC_REG_MAXFR                                          0x14
+#define UMAC_REG_UMAC_EEE_CTRL                                  0x64
+#define UMAC_UMAC_EEE_CTRL_REG_EEE_EN                           (0x1<<3)
 /* [RW 8] The event id for aggregated interrupt 0 */
 #define USDM_REG_AGG_INT_EVENT_0                                0xc4038
 #define USDM_REG_AGG_INT_EVENT_1                                0xc403c
@@ -6992,6 +6998,7 @@ Theotherbitsarereservedandshouldbezero*/
 /* BCM84833 only */
 #define MDIO_84833_TOP_CFG_FW_REV                      0x400f
 #define MDIO_84833_TOP_CFG_FW_EEE              0x10b1
+#define MDIO_84833_TOP_CFG_FW_NO_EEE           0x1f81
 #define MDIO_84833_TOP_CFG_XGPHY_STRAP1                        0x401a
 #define MDIO_84833_SUPER_ISOLATE               0x8000
 /* These are mailbox register set used by 84833. */
@@ -7160,10 +7167,11 @@ Theotherbitsarereservedandshouldbezero*/
 #define MDIO_REG_GPHY_ID_54618SE               0x5cd5
 #define MDIO_REG_GPHY_CL45_ADDR_REG                    0xd
 #define MDIO_REG_GPHY_CL45_DATA_REG                    0xe
-#define MDIO_REG_GPHY_EEE_ADV                  0x3c
-#define MDIO_REG_GPHY_EEE_1G           (0x1 << 2)
-#define MDIO_REG_GPHY_EEE_100          (0x1 << 1)
 #define MDIO_REG_GPHY_EEE_RESOLVED             0x803e
+#define MDIO_REG_GPHY_EXP_ACCESS_GATE                  0x15
+#define MDIO_REG_GPHY_EXP_ACCESS                       0x17
+#define MDIO_REG_GPHY_EXP_ACCESS_TOP           0xd00
+#define MDIO_REG_GPHY_EXP_TOP_2K_BUF           0x40
 #define MDIO_REG_GPHY_AUX_STATUS                       0x19
 #define MDIO_REG_INTR_STATUS                           0x1a
 #define MDIO_REG_INTR_MASK                             0x1b
index 62f754bd0dfe65704a1af826a754a720dc8cfd43..71971a161bd199746595d501691f19300c5ff2ad 100644 (file)
@@ -229,8 +229,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
                         */
                        list_add_tail(&spacer.link, &o->pending_comp);
                        mb();
-                       list_del(&elem->link);
-                       list_add_tail(&elem->link, &o->pending_comp);
+                       list_move_tail(&elem->link, &o->pending_comp);
                        list_del(&spacer.link);
                } else
                        break;
@@ -5620,7 +5619,7 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
        memset(rdata, 0, sizeof(*rdata));
 
        /* Fill the ramrod data with provided parameters */
-       rdata->function_mode = cpu_to_le16(start_params->mf_mode);
+       rdata->function_mode = (u8)start_params->mf_mode;
        rdata->sd_vlan_tag   = cpu_to_le16(start_params->sd_vlan_tag);
        rdata->path_id       = BP_PATH(bp);
        rdata->network_cos_mode = start_params->network_cos_mode;
index a1d0446b39b356dd69e0b77e69f6285ba37edb63..348ed02d3c69928c3991d7c8de046ccf95c0a3c9 100644 (file)
@@ -39,14 +39,39 @@ static inline long bnx2x_hilo(u32 *hiref)
 #endif
 }
 
-static u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
+static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
 {
-       u16 res = sizeof(struct host_port_stats) >> 2;
+       u16 res = 0;
 
-       /* if PFC stats are not supported by the MFW, don't DMA them */
-       if (!(bp->flags &  BC_SUPPORTS_PFC_STATS))
-               res -= (sizeof(u32)*4) >> 2;
+       /* 'newest' convention - shmem2 cotains the size of the port stats */
+       if (SHMEM2_HAS(bp, sizeof_port_stats)) {
+               u32 size = SHMEM2_RD(bp, sizeof_port_stats);
+               if (size)
+                       res = size;
 
+               /* prevent newer BC from causing buffer overflow */
+               if (res > sizeof(struct host_port_stats))
+                       res = sizeof(struct host_port_stats);
+       }
+
+       /* Older convention - all BCs support the port stats' fields up until
+        * the 'not_used' field
+        */
+       if (!res) {
+               res = offsetof(struct host_port_stats, not_used) + 4;
+
+               /* if PFC stats are supported by the MFW, DMA them as well */
+               if (bp->flags & BC_SUPPORTS_PFC_STATS) {
+                       res += offsetof(struct host_port_stats,
+                                       pfc_frames_rx_lo) -
+                              offsetof(struct host_port_stats,
+                                       pfc_frames_tx_hi) + 4 ;
+               }
+       }
+
+       res >>= 2;
+
+       WARN_ON(res > 2 * DMAE_LEN32_RD_MAX);
        return res;
 }
 
index 3b4fc61f24cfe1cb047dbbd16e86d7180a102046..cc8434fd606e2a089e20708f4ee12794fdfdd1e1 100644 (file)
@@ -823,10 +823,8 @@ static void cnic_free_context(struct cnic_dev *dev)
        }
 }
 
-static void __cnic_free_uio(struct cnic_uio_dev *udev)
+static void __cnic_free_uio_rings(struct cnic_uio_dev *udev)
 {
-       uio_unregister_device(&udev->cnic_uinfo);
-
        if (udev->l2_buf) {
                dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
                                  udev->l2_buf, udev->l2_buf_map);
@@ -839,6 +837,14 @@ static void __cnic_free_uio(struct cnic_uio_dev *udev)
                udev->l2_ring = NULL;
        }
 
+}
+
+static void __cnic_free_uio(struct cnic_uio_dev *udev)
+{
+       uio_unregister_device(&udev->cnic_uinfo);
+
+       __cnic_free_uio_rings(udev);
+
        pci_dev_put(udev->pdev);
        kfree(udev);
 }
@@ -862,6 +868,8 @@ static void cnic_free_resc(struct cnic_dev *dev)
        if (udev) {
                udev->dev = NULL;
                cp->udev = NULL;
+               if (udev->uio_dev == -1)
+                       __cnic_free_uio_rings(udev);
        }
 
        cnic_free_context(dev);
@@ -996,6 +1004,34 @@ static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
        return 0;
 }
 
+static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
+{
+       struct cnic_local *cp = udev->dev->cnic_priv;
+
+       if (udev->l2_ring)
+               return 0;
+
+       udev->l2_ring_size = pages * BCM_PAGE_SIZE;
+       udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
+                                          &udev->l2_ring_map,
+                                          GFP_KERNEL | __GFP_COMP);
+       if (!udev->l2_ring)
+               return -ENOMEM;
+
+       udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
+       udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
+       udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
+                                         &udev->l2_buf_map,
+                                         GFP_KERNEL | __GFP_COMP);
+       if (!udev->l2_buf) {
+               __cnic_free_uio_rings(udev);
+               return -ENOMEM;
+       }
+
+       return 0;
+
+}
+
 static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
 {
        struct cnic_local *cp = dev->cnic_priv;
@@ -1005,6 +1041,11 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
        list_for_each_entry(udev, &cnic_udev_list, list) {
                if (udev->pdev == dev->pcidev) {
                        udev->dev = dev;
+                       if (__cnic_alloc_uio_rings(udev, pages)) {
+                               udev->dev = NULL;
+                               read_unlock(&cnic_dev_lock);
+                               return -ENOMEM;
+                       }
                        cp->udev = udev;
                        read_unlock(&cnic_dev_lock);
                        return 0;
@@ -1020,20 +1061,9 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
 
        udev->dev = dev;
        udev->pdev = dev->pcidev;
-       udev->l2_ring_size = pages * BCM_PAGE_SIZE;
-       udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
-                                          &udev->l2_ring_map,
-                                          GFP_KERNEL | __GFP_COMP);
-       if (!udev->l2_ring)
-               goto err_udev;
 
-       udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
-       udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
-       udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
-                                         &udev->l2_buf_map,
-                                         GFP_KERNEL | __GFP_COMP);
-       if (!udev->l2_buf)
-               goto err_dma;
+       if (__cnic_alloc_uio_rings(udev, pages))
+               goto err_udev;
 
        write_lock(&cnic_dev_lock);
        list_add(&udev->list, &cnic_udev_list);
@@ -1044,9 +1074,7 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
        cp->udev = udev;
 
        return 0;
- err_dma:
-       dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
-                         udev->l2_ring, udev->l2_ring_map);
+
  err_udev:
        kfree(udev);
        return -ENOMEM;
@@ -1260,7 +1288,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
        if (ret)
                goto error;
 
-       if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+       if (CNIC_SUPPORTS_FCOE(cp)) {
                ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
                if (ret)
                        goto error;
@@ -1275,6 +1303,9 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
        if (ret)
                goto error;
 
+       if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
+               return 0;
+
        cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
 
        cp->l2_rx_ring_size = 15;
@@ -3050,6 +3081,22 @@ static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
                        IGU_INT_DISABLE, 0);
 }
 
+static void cnic_arm_bnx2x_msix(struct cnic_dev *dev, u32 idx)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+
+       cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, idx,
+                          IGU_INT_ENABLE, 1);
+}
+
+static void cnic_arm_bnx2x_e2_msix(struct cnic_dev *dev, u32 idx)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+
+       cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, idx,
+                       IGU_INT_ENABLE, 1);
+}
+
 static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
 {
        u32 last_status = *info->status_idx_ptr;
@@ -3086,9 +3133,8 @@ static void cnic_service_bnx2x_bh(unsigned long data)
                CNIC_WR16(dev, cp->kcq1.io_addr,
                          cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
 
-               if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
-                       cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
-                                          status_idx, IGU_INT_ENABLE, 1);
+               if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE) {
+                       cp->arm_int(dev, status_idx);
                        break;
                }
 
@@ -4845,6 +4891,9 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
        buf_map = udev->l2_buf_map;
        for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
                struct eth_tx_start_bd *start_bd = &txbd->start_bd;
+               struct eth_tx_parse_bd_e1x *pbd_e1x =
+                       &((txbd + 1)->parse_bd_e1x);
+               struct eth_tx_parse_bd_e2 *pbd_e2 = &((txbd + 1)->parse_bd_e2);
                struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
 
                start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
@@ -4854,10 +4903,15 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
                start_bd->nbytes = cpu_to_le16(0x10);
                start_bd->nbd = cpu_to_le16(3);
                start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
-               start_bd->general_data = (UNICAST_ADDRESS <<
-                       ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
+               start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS;
                start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
 
+               if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
+                       pbd_e2->parsing_data = (UNICAST_ADDRESS <<
+                                ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
+               else
+                        pbd_e1x->global_data = (UNICAST_ADDRESS <<
+                               ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT);
        }
 
        val = (u64) ring_map >> 32;
@@ -5308,7 +5362,7 @@ static void cnic_stop_hw(struct cnic_dev *dev)
                /* Need to wait for the ring shutdown event to complete
                 * before clearing the CNIC_UP flag.
                 */
-               while (cp->udev->uio_dev != -1 && i < 15) {
+               while (cp->udev && cp->udev->uio_dev != -1 && i < 15) {
                        msleep(100);
                        i++;
                }
@@ -5473,8 +5527,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
 
        if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
                cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
-       if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
-           !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
+       if (CNIC_SUPPORTS_FCOE(cp))
                cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
 
        if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
@@ -5492,10 +5545,13 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
        cp->stop_cm = cnic_cm_stop_bnx2x_hw;
        cp->enable_int = cnic_enable_bnx2x_int;
        cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
-       if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
+       if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
                cp->ack_int = cnic_ack_bnx2x_e2_msix;
-       else
+               cp->arm_int = cnic_arm_bnx2x_e2_msix;
+       } else {
                cp->ack_int = cnic_ack_bnx2x_msix;
+               cp->arm_int = cnic_arm_bnx2x_msix;
+       }
        cp->close_conn = cnic_close_bnx2x_conn;
        return cdev;
 }
index 30328097f516375ee7db7fe9179e178c71cf3375..148604c3fa0c79c51bf0fd1dc75edc3f7598c670 100644 (file)
@@ -334,6 +334,7 @@ struct cnic_local {
        void                    (*enable_int)(struct cnic_dev *);
        void                    (*disable_int_sync)(struct cnic_dev *);
        void                    (*ack_int)(struct cnic_dev *);
+       void                    (*arm_int)(struct cnic_dev *, u32 index);
        void                    (*close_conn)(struct cnic_sock *, u32 opcode);
 };
 
@@ -474,6 +475,10 @@ struct bnx2x_bd_chain_next {
          MAX_STAT_COUNTER_ID_E1))
 #endif
 
+#define CNIC_SUPPORTS_FCOE(cp)                                 \
+       (BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id) &&                \
+        !((cp)->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
+
 #define CNIC_RAMROD_TMO                        (HZ / 4)
 
 #endif
index 382c98b0cc0c6feb959cd0f4e9dca7a078159c84..ede3db35d757e9c51a5f07cef320ecf323515295 100644 (file)
@@ -896,7 +896,7 @@ struct tstorm_tcp_tcp_ag_context_section {
        u32 snd_nxt;
        u32 rtt_seq;
        u32 rtt_time;
-       u32 __reserved66;
+       u32 wnd_right_edge_local;
        u32 wnd_right_edge;
        u32 tcp_agg_vars1;
 #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0)
index 5cb88881bba1134776731b7f438d876ed47dc441..865095aad1f6494d985f4dbe15ab1ae198e7e74f 100644 (file)
@@ -14,8 +14,8 @@
 
 #include "bnx2x/bnx2x_mfw_req.h"
 
-#define CNIC_MODULE_VERSION    "2.5.12"
-#define CNIC_MODULE_RELDATE    "June 29, 2012"
+#define CNIC_MODULE_VERSION    "2.5.14"
+#define CNIC_MODULE_RELDATE    "Sep 30, 2012"
 
 #define CNIC_ULP_RDMA          0
 #define CNIC_ULP_ISCSI         1
index 388d3221393701cfb2366204f06aa6e191fc2947..46280ba4c5d415930746c0cd7dc7c7e8b5790a95 100644 (file)
 #include <linux/prefetch.h>
 #include <linux/dma-mapping.h>
 #include <linux/firmware.h>
-#if IS_ENABLED(CONFIG_HWMON)
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
-#endif
 
 #include <net/checksum.h>
 #include <net/ip.h>
@@ -92,10 +90,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
 
 #define DRV_MODULE_NAME                "tg3"
 #define TG3_MAJ_NUM                    3
-#define TG3_MIN_NUM                    124
+#define TG3_MIN_NUM                    125
 #define DRV_MODULE_VERSION     \
        __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE     "March 21, 2012"
+#define DRV_MODULE_RELDATE     "September 26, 2012"
 
 #define RESET_KIND_SHUTDOWN    0
 #define RESET_KIND_INIT                1
@@ -6263,7 +6261,7 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
                u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
 
                tp->rx_refill = false;
-               for (i = 1; i < tp->irq_cnt; i++)
+               for (i = 1; i <= tp->rxq_cnt; i++)
                        err |= tg3_rx_prodring_xfer(tp, dpr,
                                                    &tp->napi[i].prodring);
 
@@ -7592,15 +7590,11 @@ static int tg3_init_rings(struct tg3 *tp)
        return 0;
 }
 
-/*
- * Must not be invoked with interrupt sources disabled and
- * the hardware shutdown down.
- */
-static void tg3_free_consistent(struct tg3 *tp)
+static void tg3_mem_tx_release(struct tg3 *tp)
 {
        int i;
 
-       for (i = 0; i < tp->irq_cnt; i++) {
+       for (i = 0; i < tp->irq_max; i++) {
                struct tg3_napi *tnapi = &tp->napi[i];
 
                if (tnapi->tx_ring) {
@@ -7611,17 +7605,114 @@ static void tg3_free_consistent(struct tg3 *tp)
 
                kfree(tnapi->tx_buffers);
                tnapi->tx_buffers = NULL;
+       }
+}
 
-               if (tnapi->rx_rcb) {
-                       dma_free_coherent(&tp->pdev->dev,
-                                         TG3_RX_RCB_RING_BYTES(tp),
-                                         tnapi->rx_rcb,
-                                         tnapi->rx_rcb_mapping);
-                       tnapi->rx_rcb = NULL;
-               }
+static int tg3_mem_tx_acquire(struct tg3 *tp)
+{
+       int i;
+       struct tg3_napi *tnapi = &tp->napi[0];
+
+       /* If multivector TSS is enabled, vector 0 does not handle
+        * tx interrupts.  Don't allocate any resources for it.
+        */
+       if (tg3_flag(tp, ENABLE_TSS))
+               tnapi++;
+
+       for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
+               tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
+                                           TG3_TX_RING_SIZE, GFP_KERNEL);
+               if (!tnapi->tx_buffers)
+                       goto err_out;
+
+               tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
+                                                   TG3_TX_RING_BYTES,
+                                                   &tnapi->tx_desc_mapping,
+                                                   GFP_KERNEL);
+               if (!tnapi->tx_ring)
+                       goto err_out;
+       }
+
+       return 0;
+
+err_out:
+       tg3_mem_tx_release(tp);
+       return -ENOMEM;
+}
+
+static void tg3_mem_rx_release(struct tg3 *tp)
+{
+       int i;
+
+       for (i = 0; i < tp->irq_max; i++) {
+               struct tg3_napi *tnapi = &tp->napi[i];
 
                tg3_rx_prodring_fini(tp, &tnapi->prodring);
 
+               if (!tnapi->rx_rcb)
+                       continue;
+
+               dma_free_coherent(&tp->pdev->dev,
+                                 TG3_RX_RCB_RING_BYTES(tp),
+                                 tnapi->rx_rcb,
+                                 tnapi->rx_rcb_mapping);
+               tnapi->rx_rcb = NULL;
+       }
+}
+
+static int tg3_mem_rx_acquire(struct tg3 *tp)
+{
+       unsigned int i, limit;
+
+       limit = tp->rxq_cnt;
+
+       /* If RSS is enabled, we need a (dummy) producer ring
+        * set on vector zero.  This is the true hw prodring.
+        */
+       if (tg3_flag(tp, ENABLE_RSS))
+               limit++;
+
+       for (i = 0; i < limit; i++) {
+               struct tg3_napi *tnapi = &tp->napi[i];
+
+               if (tg3_rx_prodring_init(tp, &tnapi->prodring))
+                       goto err_out;
+
+               /* If multivector RSS is enabled, vector 0
+                * does not handle rx or tx interrupts.
+                * Don't allocate any resources for it.
+                */
+               if (!i && tg3_flag(tp, ENABLE_RSS))
+                       continue;
+
+               tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
+                                                  TG3_RX_RCB_RING_BYTES(tp),
+                                                  &tnapi->rx_rcb_mapping,
+                                                  GFP_KERNEL);
+               if (!tnapi->rx_rcb)
+                       goto err_out;
+
+               memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
+       }
+
+       return 0;
+
+err_out:
+       tg3_mem_rx_release(tp);
+       return -ENOMEM;
+}
+
+/*
+ * Must not be invoked with interrupt sources disabled and
+ * the hardware shutdown down.
+ */
+static void tg3_free_consistent(struct tg3 *tp)
+{
+       int i;
+
+       for (i = 0; i < tp->irq_cnt; i++) {
+               struct tg3_napi *tnapi = &tp->napi[i];
+
                if (tnapi->hw_status) {
                        dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
                                          tnapi->hw_status,
@@ -7630,6 +7721,9 @@ static void tg3_free_consistent(struct tg3 *tp)
                }
        }
 
+       tg3_mem_rx_release(tp);
+       tg3_mem_tx_release(tp);
+
        if (tp->hw_stats) {
                dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
                                  tp->hw_stats, tp->stats_mapping);
@@ -7668,72 +7762,38 @@ static int tg3_alloc_consistent(struct tg3 *tp)
                memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
                sblk = tnapi->hw_status;
 
-               if (tg3_rx_prodring_init(tp, &tnapi->prodring))
-                       goto err_out;
+               if (tg3_flag(tp, ENABLE_RSS)) {
+                       u16 *prodptr = 0;
 
-               /* If multivector TSS is enabled, vector 0 does not handle
-                * tx interrupts.  Don't allocate any resources for it.
-                */
-               if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
-                   (i && tg3_flag(tp, ENABLE_TSS))) {
-                       tnapi->tx_buffers = kzalloc(
-                                              sizeof(struct tg3_tx_ring_info) *
-                                              TG3_TX_RING_SIZE, GFP_KERNEL);
-                       if (!tnapi->tx_buffers)
-                               goto err_out;
-
-                       tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
-                                                           TG3_TX_RING_BYTES,
-                                                       &tnapi->tx_desc_mapping,
-                                                           GFP_KERNEL);
-                       if (!tnapi->tx_ring)
-                               goto err_out;
-               }
-
-               /*
-                * When RSS is enabled, the status block format changes
-                * slightly.  The "rx_jumbo_consumer", "reserved",
-                * and "rx_mini_consumer" members get mapped to the
-                * other three rx return ring producer indexes.
-                */
-               switch (i) {
-               default:
-                       if (tg3_flag(tp, ENABLE_RSS)) {
-                               tnapi->rx_rcb_prod_idx = NULL;
+                       /*
+                        * When RSS is enabled, the status block format changes
+                        * slightly.  The "rx_jumbo_consumer", "reserved",
+                        * and "rx_mini_consumer" members get mapped to the
+                        * other three rx return ring producer indexes.
+                        */
+                       switch (i) {
+                       case 1:
+                               prodptr = &sblk->idx[0].rx_producer;
+                               break;
+                       case 2:
+                               prodptr = &sblk->rx_jumbo_consumer;
+                               break;
+                       case 3:
+                               prodptr = &sblk->reserved;
+                               break;
+                       case 4:
+                               prodptr = &sblk->rx_mini_consumer;
                                break;
                        }
-                       /* Fall through */
-               case 1:
+                       tnapi->rx_rcb_prod_idx = prodptr;
+               } else {
                        tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
-                       break;
-               case 2:
-                       tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
-                       break;
-               case 3:
-                       tnapi->rx_rcb_prod_idx = &sblk->reserved;
-                       break;
-               case 4:
-                       tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
-                       break;
                }
-
-               /*
-                * If multivector RSS is enabled, vector 0 does not handle
-                * rx or tx interrupts.  Don't allocate any resources for it.
-                */
-               if (!i && tg3_flag(tp, ENABLE_RSS))
-                       continue;
-
-               tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
-                                                  TG3_RX_RCB_RING_BYTES(tp),
-                                                  &tnapi->rx_rcb_mapping,
-                                                  GFP_KERNEL);
-               if (!tnapi->rx_rcb)
-                       goto err_out;
-
-               memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
        }
 
+       if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
+               goto err_out;
+
        return 0;
 
 err_out:
@@ -8247,9 +8307,10 @@ static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
                              nic_addr);
 }
 
-static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
+
+static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
 {
-       int i;
+       int i = 0;
 
        if (!tg3_flag(tp, ENABLE_TSS)) {
                tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
@@ -8259,31 +8320,43 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
                tw32(HOSTCC_TXCOL_TICKS, 0);
                tw32(HOSTCC_TXMAX_FRAMES, 0);
                tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
+
+               for (; i < tp->txq_cnt; i++) {
+                       u32 reg;
+
+                       reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
+                       tw32(reg, ec->tx_coalesce_usecs);
+                       reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
+                       tw32(reg, ec->tx_max_coalesced_frames);
+                       reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
+                       tw32(reg, ec->tx_max_coalesced_frames_irq);
+               }
        }
 
+       for (; i < tp->irq_max - 1; i++) {
+               tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
+               tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
+               tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
+       }
+}
+
+static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
+{
+       int i = 0;
+       u32 limit = tp->rxq_cnt;
+
        if (!tg3_flag(tp, ENABLE_RSS)) {
                tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
                tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
                tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
+               limit--;
        } else {
                tw32(HOSTCC_RXCOL_TICKS, 0);
                tw32(HOSTCC_RXMAX_FRAMES, 0);
                tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
        }
 
-       if (!tg3_flag(tp, 5705_PLUS)) {
-               u32 val = ec->stats_block_coalesce_usecs;
-
-               tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
-               tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
-
-               if (!netif_carrier_ok(tp->dev))
-                       val = 0;
-
-               tw32(HOSTCC_STAT_COAL_TICKS, val);
-       }
-
-       for (i = 0; i < tp->irq_cnt - 1; i++) {
+       for (; i < limit; i++) {
                u32 reg;
 
                reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
@@ -8292,27 +8365,30 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
                tw32(reg, ec->rx_max_coalesced_frames);
                reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
                tw32(reg, ec->rx_max_coalesced_frames_irq);
-
-               if (tg3_flag(tp, ENABLE_TSS)) {
-                       reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
-                       tw32(reg, ec->tx_coalesce_usecs);
-                       reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
-                       tw32(reg, ec->tx_max_coalesced_frames);
-                       reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
-                       tw32(reg, ec->tx_max_coalesced_frames_irq);
-               }
        }
 
        for (; i < tp->irq_max - 1; i++) {
                tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
                tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
                tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
+       }
+}
 
-               if (tg3_flag(tp, ENABLE_TSS)) {
-                       tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
-                       tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
-                       tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
-               }
+static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
+{
+       tg3_coal_tx_init(tp, ec);
+       tg3_coal_rx_init(tp, ec);
+
+       if (!tg3_flag(tp, 5705_PLUS)) {
+               u32 val = ec->stats_block_coalesce_usecs;
+
+               tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
+               tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
+
+               if (!netif_carrier_ok(tp->dev))
+                       val = 0;
+
+               tw32(HOSTCC_STAT_COAL_TICKS, val);
        }
 }
 
@@ -8570,13 +8646,12 @@ static void __tg3_set_rx_mode(struct net_device *dev)
        }
 }
 
-static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
+static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
 {
        int i;
 
        for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
-               tp->rss_ind_tbl[i] =
-                       ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
+               tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
 }
 
 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
@@ -8598,7 +8673,7 @@ static void tg3_rss_check_indir_tbl(struct tg3 *tp)
        }
 
        if (i != TG3_RSS_INDIR_TBL_SIZE)
-               tg3_rss_init_dflt_indir_tbl(tp);
+               tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
 }
 
 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
@@ -9495,7 +9570,6 @@ static int tg3_init_hw(struct tg3 *tp, int reset_phy)
        return tg3_reset_hw(tp, reset_phy);
 }
 
-#if IS_ENABLED(CONFIG_HWMON)
 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
 {
        int i;
@@ -9548,22 +9622,17 @@ static const struct attribute_group tg3_group = {
        .attrs = tg3_attributes,
 };
 
-#endif
-
 static void tg3_hwmon_close(struct tg3 *tp)
 {
-#if IS_ENABLED(CONFIG_HWMON)
        if (tp->hwmon_dev) {
                hwmon_device_unregister(tp->hwmon_dev);
                tp->hwmon_dev = NULL;
                sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
        }
-#endif
 }
 
 static void tg3_hwmon_open(struct tg3 *tp)
 {
-#if IS_ENABLED(CONFIG_HWMON)
        int i, err;
        u32 size = 0;
        struct pci_dev *pdev = tp->pdev;
@@ -9595,7 +9664,6 @@ static void tg3_hwmon_open(struct tg3 *tp)
                dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
                sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
        }
-#endif
 }
 
 
@@ -10119,21 +10187,43 @@ static int tg3_request_firmware(struct tg3 *tp)
        return 0;
 }
 
-static bool tg3_enable_msix(struct tg3 *tp)
+static u32 tg3_irq_count(struct tg3 *tp)
 {
-       int i, rc;
-       struct msix_entry msix_ent[tp->irq_max];
+       u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
 
-       tp->irq_cnt = netif_get_num_default_rss_queues();
-       if (tp->irq_cnt > 1) {
+       if (irq_cnt > 1) {
                /* We want as many rx rings enabled as there are cpus.
                 * In multiqueue MSI-X mode, the first MSI-X vector
                 * only deals with link interrupts, etc, so we add
                 * one to the number of vectors we are requesting.
                 */
-               tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
+               irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
        }
 
+       return irq_cnt;
+}
+
+static bool tg3_enable_msix(struct tg3 *tp)
+{
+       int i, rc;
+       struct msix_entry msix_ent[tp->irq_max];
+
+       tp->txq_cnt = tp->txq_req;
+       tp->rxq_cnt = tp->rxq_req;
+       if (!tp->rxq_cnt)
+               tp->rxq_cnt = netif_get_num_default_rss_queues();
+       if (tp->rxq_cnt > tp->rxq_max)
+               tp->rxq_cnt = tp->rxq_max;
+
+       /* Disable multiple TX rings by default.  Simple round-robin hardware
+        * scheduling of the TX rings can cause starvation of rings with
+        * small packets when other rings have TSO or jumbo packets.
+        */
+       if (!tp->txq_req)
+               tp->txq_cnt = 1;
+
+       tp->irq_cnt = tg3_irq_count(tp);
+
        for (i = 0; i < tp->irq_max; i++) {
                msix_ent[i].entry  = i;
                msix_ent[i].vector = 0;
@@ -10148,27 +10238,28 @@ static bool tg3_enable_msix(struct tg3 *tp)
                netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
                              tp->irq_cnt, rc);
                tp->irq_cnt = rc;
+               tp->rxq_cnt = max(rc - 1, 1);
+               if (tp->txq_cnt)
+                       tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
        }
 
        for (i = 0; i < tp->irq_max; i++)
                tp->napi[i].irq_vec = msix_ent[i].vector;
 
-       netif_set_real_num_tx_queues(tp->dev, 1);
-       rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
-       if (netif_set_real_num_rx_queues(tp->dev, rc)) {
+       if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
                pci_disable_msix(tp->pdev);
                return false;
        }
 
-       if (tp->irq_cnt > 1) {
-               tg3_flag_set(tp, ENABLE_RSS);
+       if (tp->irq_cnt == 1)
+               return true;
 
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
-                       tg3_flag_set(tp, ENABLE_TSS);
-                       netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
-               }
-       }
+       tg3_flag_set(tp, ENABLE_RSS);
+
+       if (tp->txq_cnt > 1)
+               tg3_flag_set(tp, ENABLE_TSS);
+
+       netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
 
        return true;
 }
@@ -10202,6 +10293,11 @@ defcfg:
        if (!tg3_flag(tp, USING_MSIX)) {
                tp->irq_cnt = 1;
                tp->napi[0].irq_vec = tp->pdev->irq;
+       }
+
+       if (tp->irq_cnt == 1) {
+               tp->txq_cnt = 1;
+               tp->rxq_cnt = 1;
                netif_set_real_num_tx_queues(tp->dev, 1);
                netif_set_real_num_rx_queues(tp->dev, 1);
        }
@@ -10219,38 +10315,11 @@ static void tg3_ints_fini(struct tg3 *tp)
        tg3_flag_clear(tp, ENABLE_TSS);
 }
 
-static int tg3_open(struct net_device *dev)
+static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq)
 {
-       struct tg3 *tp = netdev_priv(dev);
+       struct net_device *dev = tp->dev;
        int i, err;
 
-       if (tp->fw_needed) {
-               err = tg3_request_firmware(tp);
-               if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
-                       if (err)
-                               return err;
-               } else if (err) {
-                       netdev_warn(tp->dev, "TSO capability disabled\n");
-                       tg3_flag_clear(tp, TSO_CAPABLE);
-               } else if (!tg3_flag(tp, TSO_CAPABLE)) {
-                       netdev_notice(tp->dev, "TSO capability restored\n");
-                       tg3_flag_set(tp, TSO_CAPABLE);
-               }
-       }
-
-       netif_carrier_off(tp->dev);
-
-       err = tg3_power_up(tp);
-       if (err)
-               return err;
-
-       tg3_full_lock(tp, 0);
-
-       tg3_disable_ints(tp);
-       tg3_flag_clear(tp, INIT_COMPLETE);
-
-       tg3_full_unlock(tp);
-
        /*
         * Setup interrupts first so we know how
         * many NAPI resources to allocate
@@ -10284,7 +10353,7 @@ static int tg3_open(struct net_device *dev)
 
        tg3_full_lock(tp, 0);
 
-       err = tg3_init_hw(tp, 1);
+       err = tg3_init_hw(tp, reset_phy);
        if (err) {
                tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
                tg3_free_rings(tp);
@@ -10295,7 +10364,7 @@ static int tg3_open(struct net_device *dev)
        if (err)
                goto err_out3;
 
-       if (tg3_flag(tp, USING_MSI)) {
+       if (test_irq && tg3_flag(tp, USING_MSI)) {
                err = tg3_test_msi(tp);
 
                if (err) {
@@ -10351,20 +10420,18 @@ err_out2:
 
 err_out1:
        tg3_ints_fini(tp);
-       tg3_frob_aux_power(tp, false);
-       pci_set_power_state(tp->pdev, PCI_D3hot);
+
        return err;
 }
 
-static int tg3_close(struct net_device *dev)
+static void tg3_stop(struct tg3 *tp)
 {
        int i;
-       struct tg3 *tp = netdev_priv(dev);
 
        tg3_napi_disable(tp);
        tg3_reset_task_cancel(tp);
 
-       netif_tx_stop_all_queues(dev);
+       netif_tx_disable(tp->dev);
 
        tg3_timer_stop(tp);
 
@@ -10389,13 +10456,60 @@ static int tg3_close(struct net_device *dev)
 
        tg3_ints_fini(tp);
 
-       /* Clear stats across close / open calls */
-       memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
-       memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
-
        tg3_napi_fini(tp);
 
        tg3_free_consistent(tp);
+}
+
+static int tg3_open(struct net_device *dev)
+{
+       struct tg3 *tp = netdev_priv(dev);
+       int err;
+
+       if (tp->fw_needed) {
+               err = tg3_request_firmware(tp);
+               if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
+                       if (err)
+                               return err;
+               } else if (err) {
+                       netdev_warn(tp->dev, "TSO capability disabled\n");
+                       tg3_flag_clear(tp, TSO_CAPABLE);
+               } else if (!tg3_flag(tp, TSO_CAPABLE)) {
+                       netdev_notice(tp->dev, "TSO capability restored\n");
+                       tg3_flag_set(tp, TSO_CAPABLE);
+               }
+       }
+
+       netif_carrier_off(tp->dev);
+
+       err = tg3_power_up(tp);
+       if (err)
+               return err;
+
+       tg3_full_lock(tp, 0);
+
+       tg3_disable_ints(tp);
+       tg3_flag_clear(tp, INIT_COMPLETE);
+
+       tg3_full_unlock(tp);
+
+       err = tg3_start(tp, true, true);
+       if (err) {
+               tg3_frob_aux_power(tp, false);
+               pci_set_power_state(tp->pdev, PCI_D3hot);
+       }
+       return err;
+}
+
+static int tg3_close(struct net_device *dev)
+{
+       struct tg3 *tp = netdev_priv(dev);
+
+       tg3_stop(tp);
+
+       /* Clear stats across close / open calls */
+       memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
+       memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
 
        tg3_power_down(tp);
 
@@ -11185,11 +11299,11 @@ static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
        switch (info->cmd) {
        case ETHTOOL_GRXRINGS:
                if (netif_running(tp->dev))
-                       info->data = tp->irq_cnt;
+                       info->data = tp->rxq_cnt;
                else {
                        info->data = num_online_cpus();
-                       if (info->data > TG3_IRQ_MAX_VECS_RSS)
-                               info->data = TG3_IRQ_MAX_VECS_RSS;
+                       if (info->data > TG3_RSS_MAX_NUM_QS)
+                               info->data = TG3_RSS_MAX_NUM_QS;
                }
 
                /* The first interrupt vector only
@@ -11246,6 +11360,58 @@ static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
        return 0;
 }
 
+static void tg3_get_channels(struct net_device *dev,
+                            struct ethtool_channels *channel)
+{
+       struct tg3 *tp = netdev_priv(dev);
+       u32 deflt_qs = netif_get_num_default_rss_queues();
+
+       channel->max_rx = tp->rxq_max;
+       channel->max_tx = tp->txq_max;
+
+       if (netif_running(dev)) {
+               channel->rx_count = tp->rxq_cnt;
+               channel->tx_count = tp->txq_cnt;
+       } else {
+               if (tp->rxq_req)
+                       channel->rx_count = tp->rxq_req;
+               else
+                       channel->rx_count = min(deflt_qs, tp->rxq_max);
+
+               if (tp->txq_req)
+                       channel->tx_count = tp->txq_req;
+               else
+                       channel->tx_count = min(deflt_qs, tp->txq_max);
+       }
+}
+
+static int tg3_set_channels(struct net_device *dev,
+                           struct ethtool_channels *channel)
+{
+       struct tg3 *tp = netdev_priv(dev);
+
+       if (!tg3_flag(tp, SUPPORT_MSIX))
+               return -EOPNOTSUPP;
+
+       if (channel->rx_count > tp->rxq_max ||
+           channel->tx_count > tp->txq_max)
+               return -EINVAL;
+
+       tp->rxq_req = channel->rx_count;
+       tp->txq_req = channel->tx_count;
+
+       if (!netif_running(dev))
+               return 0;
+
+       tg3_stop(tp);
+
+       netif_carrier_off(dev);
+
+       tg3_start(tp, true, false);
+
+       return 0;
+}
+
 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
 {
        switch (stringset) {
@@ -12494,6 +12660,8 @@ static const struct ethtool_ops tg3_ethtool_ops = {
        .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
        .get_rxfh_indir         = tg3_get_rxfh_indir,
        .set_rxfh_indir         = tg3_set_rxfh_indir,
+       .get_channels           = tg3_get_channels,
+       .set_channels           = tg3_set_channels,
        .get_ts_info            = ethtool_op_get_ts_info,
 };
 
@@ -14510,10 +14678,20 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                if (tg3_flag(tp, 57765_PLUS)) {
                        tg3_flag_set(tp, SUPPORT_MSIX);
                        tp->irq_max = TG3_IRQ_MAX_VECS;
-                       tg3_rss_init_dflt_indir_tbl(tp);
                }
        }
 
+       tp->txq_max = 1;
+       tp->rxq_max = 1;
+       if (tp->irq_max > 1) {
+               tp->rxq_max = TG3_RSS_MAX_NUM_QS;
+               tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
+
+               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+                       tp->txq_max = tp->irq_max - 1;
+       }
+
        if (tg3_flag(tp, 5755_PLUS) ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
                tg3_flag_set(tp, SHORT_DMA_BUG);
index 6d52cb28682674b6bab84f39450b8149eb0ca569..d9308c32102e998fb22c45f9ee5c21482286072a 100644 (file)
@@ -2860,7 +2860,8 @@ struct tg3_rx_prodring_set {
        dma_addr_t                      rx_jmb_mapping;
 };
 
-#define TG3_IRQ_MAX_VECS_RSS           5
+#define TG3_RSS_MAX_NUM_QS             4
+#define TG3_IRQ_MAX_VECS_RSS           (TG3_RSS_MAX_NUM_QS + 1)
 #define TG3_IRQ_MAX_VECS               TG3_IRQ_MAX_VECS_RSS
 
 struct tg3_napi {
@@ -3037,6 +3038,9 @@ struct tg3 {
        void                            (*write32_tx_mbox) (struct tg3 *, u32,
                                                            u32);
        u32                             dma_limit;
+       u32                             txq_req;
+       u32                             txq_cnt;
+       u32                             txq_max;
 
        /* begin "rx thread" cacheline section */
        struct tg3_napi                 napi[TG3_IRQ_MAX_VECS];
@@ -3051,6 +3055,9 @@ struct tg3 {
        u32                             rx_std_max_post;
        u32                             rx_offset;
        u32                             rx_pkt_map_sz;
+       u32                             rxq_req;
+       u32                             rxq_cnt;
+       u32                             rxq_max;
        bool                            rx_refill;
 
 
index b441f33258e7c48c2cd1ce5afd5a2c021cbde98a..ce1eac529470de89cdbc2107a649affbfedb3761 100644 (file)
@@ -3268,6 +3268,7 @@ bnad_pci_probe(struct pci_dev *pdev,
         *      Output : using_dac = 1 for 64 bit DMA
         *                         = 0 for 32 bit DMA
         */
+       using_dac = false;
        err = bnad_pci_init(bnad, pdev, &using_dac);
        if (err)
                goto unlock_mutex;
index ec2dafe8ae5bbb15b0bfaad2be9c5cd4bd2bf2a7..745a1f53361f379b9c075d190f7919c0f0866e4c 100644 (file)
@@ -67,12 +67,12 @@ enum {
 };
 
 enum {
-       MEMWIN0_APERTURE = 65536,
-       MEMWIN0_BASE     = 0x30000,
+       MEMWIN0_APERTURE = 2048,
+       MEMWIN0_BASE     = 0x1b800,
        MEMWIN1_APERTURE = 32768,
        MEMWIN1_BASE     = 0x28000,
-       MEMWIN2_APERTURE = 2048,
-       MEMWIN2_BASE     = 0x1b800,
+       MEMWIN2_APERTURE = 65536,
+       MEMWIN2_BASE     = 0x30000,
 };
 
 enum dev_master {
@@ -211,6 +211,9 @@ struct tp_err_stats {
 struct tp_params {
        unsigned int ntxchan;        /* # of Tx channels */
        unsigned int tre;            /* log2 of core clocks per TP tick */
+
+       uint32_t dack_re;            /* DACK timer resolution */
+       unsigned short tx_modq[NCHAN];  /* channel to modulation queue map */
 };
 
 struct vpd_params {
@@ -315,6 +318,10 @@ enum {                                 /* adapter flags */
        USING_MSI          = (1 << 1),
        USING_MSIX         = (1 << 2),
        FW_OK              = (1 << 4),
+       RSS_TNLALLLOOKUP   = (1 << 5),
+       USING_SOFT_PARAMS  = (1 << 6),
+       MASTER_PF          = (1 << 7),
+       FW_OFLD_CONN       = (1 << 9),
 };
 
 struct rx_sw_desc;
@@ -467,6 +474,11 @@ struct sge {
        u16 rdma_rxq[NCHAN];
        u16 timer_val[SGE_NTIMERS];
        u8 counter_val[SGE_NCOUNTERS];
+       u32 fl_pg_order;            /* large page allocation size */
+       u32 stat_len;               /* length of status page at ring end */
+       u32 pktshift;               /* padding between CPL & packet data */
+       u32 fl_align;               /* response queue message alignment */
+       u32 fl_starve_thres;        /* Free List starvation threshold */
        unsigned int starve_thres;
        u8 idma_state[2];
        unsigned int egr_start;
@@ -511,6 +523,8 @@ struct adapter {
        struct net_device *port[MAX_NPORTS];
        u8 chan_map[NCHAN];                   /* channel -> port map */
 
+       unsigned int l2t_start;
+       unsigned int l2t_end;
        struct l2t_data *l2t;
        void *uld_handle[CXGB4_ULD_MAX];
        struct list_head list_node;
@@ -619,7 +633,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
 int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
                          struct net_device *dev, unsigned int iqid);
 irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
-void t4_sge_init(struct adapter *adap);
+int t4_sge_init(struct adapter *adap);
 void t4_sge_start(struct adapter *adap);
 void t4_sge_stop(struct adapter *adap);
 extern int dbfifo_int_thresh;
@@ -638,6 +652,14 @@ static inline unsigned int us_to_core_ticks(const struct adapter *adap,
        return (us * adap->params.vpd.cclk) / 1000;
 }
 
+static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
+                                           unsigned int ticks)
+{
+       /* add Core Clock / 2 to round ticks to nearest uS */
+       return ((ticks * 1000 + adapter->params.vpd.cclk/2) /
+               adapter->params.vpd.cclk);
+}
+
 void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
                      u32 val);
 
@@ -656,6 +678,9 @@ static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
        return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
 }
 
+void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
+                      unsigned int data_reg, const u32 *vals,
+                      unsigned int nregs, unsigned int start_idx);
 void t4_intr_enable(struct adapter *adapter);
 void t4_intr_disable(struct adapter *adapter);
 int t4_slow_intr_handler(struct adapter *adapter);
@@ -664,8 +689,12 @@ int t4_wait_dev_ready(struct adapter *adap);
 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
                  struct link_config *lc);
 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
+int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
+                   __be32 *buf);
 int t4_seeprom_wp(struct adapter *adapter, bool enable);
+int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
 int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
+unsigned int t4_flash_cfg_addr(struct adapter *adapter);
 int t4_check_fw_version(struct adapter *adapter);
 int t4_prep_adapter(struct adapter *adapter);
 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
@@ -680,6 +709,8 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
 
 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
+void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
+                           unsigned int mask, unsigned int val);
 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
                         struct tp_tcp_stats *v6);
 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
@@ -695,6 +726,16 @@ int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
 int t4_fw_bye(struct adapter *adap, unsigned int mbox);
 int t4_early_init(struct adapter *adap, unsigned int mbox);
 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
+int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force);
+int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset);
+int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
+                 const u8 *fw_data, unsigned int size, int force);
+int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
+                     unsigned int mtype, unsigned int maddr,
+                     u32 *finiver, u32 *finicsum, u32 *cfcsum);
+int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
+                         unsigned int cache_line_size);
+int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
                    unsigned int vf, unsigned int nparams, const u32 *params,
                    u32 *val);
index 933985420acbbfc7d0503bba1913125961963ab1..6b9f6bb2f7edb9e505c69f8ce40507b2093df131 100644 (file)
  */
 #define MAX_SGE_TIMERVAL 200U
 
-#ifdef CONFIG_PCI_IOV
-/*
- * Virtual Function provisioning constants.  We need two extra Ingress Queues
- * with Interrupt capability to serve as the VF's Firmware Event Queue and
- * Forwarded Interrupt Queue (when using MSI mode) -- neither will have Free
- * Lists associated with them).  For each Ethernet/Control Egress Queue and
- * for each Free List, we need an Egress Context.
- */
 enum {
+       /*
+        * Physical Function provisioning constants.
+        */
+       PFRES_NVI = 4,                  /* # of Virtual Interfaces */
+       PFRES_NETHCTRL = 128,           /* # of EQs used for ETH or CTRL Qs */
+       PFRES_NIQFLINT = 128,           /* # of ingress Qs/w Free List(s)/intr
+                                        */
+       PFRES_NEQ = 256,                /* # of egress queues */
+       PFRES_NIQ = 0,                  /* # of ingress queues */
+       PFRES_TC = 0,                   /* PCI-E traffic class */
+       PFRES_NEXACTF = 128,            /* # of exact MPS filters */
+
+       PFRES_R_CAPS = FW_CMD_CAP_PF,
+       PFRES_WX_CAPS = FW_CMD_CAP_PF,
+
+#ifdef CONFIG_PCI_IOV
+       /*
+        * Virtual Function provisioning constants.  We need two extra Ingress
+        * Queues with Interrupt capability to serve as the VF's Firmware
+        * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
+        * neither will have Free Lists associated with them).  For each
+        * Ethernet/Control Egress Queue and for each Free List, we need an
+        * Egress Context.
+        */
        VFRES_NPORTS = 1,               /* # of "ports" per VF */
        VFRES_NQSETS = 2,               /* # of "Queue Sets" per VF */
 
        VFRES_NVI = VFRES_NPORTS,       /* # of Virtual Interfaces */
        VFRES_NETHCTRL = VFRES_NQSETS,  /* # of EQs used for ETH or CTRL Qs */
        VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
-       VFRES_NIQ = 0,                  /* # of non-fl/int ingress queues */
        VFRES_NEQ = VFRES_NQSETS*2,     /* # of egress queues */
+       VFRES_NIQ = 0,                  /* # of non-fl/int ingress queues */
        VFRES_TC = 0,                   /* PCI-E traffic class */
        VFRES_NEXACTF = 16,             /* # of exact MPS filters */
 
        VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
        VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
+#endif
 };
 
 /*
@@ -146,7 +163,6 @@ static unsigned int pfvfres_pmask(struct adapter *adapter,
        }
        /*NOTREACHED*/
 }
-#endif
 
 enum {
        MAX_TXQ_ENTRIES      = 16384,
@@ -193,6 +209,7 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
 };
 
 #define FW_FNAME "cxgb4/t4fw.bin"
+#define FW_CFNAME "cxgb4/t4-config.txt"
 
 MODULE_DESCRIPTION(DRV_DESC);
 MODULE_AUTHOR("Chelsio Communications");
@@ -201,6 +218,28 @@ MODULE_VERSION(DRV_VERSION);
 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
 MODULE_FIRMWARE(FW_FNAME);
 
+/*
+ * Normally we're willing to become the firmware's Master PF but will be happy
+ * if another PF has already become the Master and initialized the adapter.
+ * Setting "force_init" will cause this driver to forcibly establish itself as
+ * the Master PF and initialize the adapter.
+ */
+static uint force_init;
+
+module_param(force_init, uint, 0644);
+MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
+
+/*
+ * Normally if the firmware we connect to has Configuration File support, we
+ * use that and only fall back to the old Driver-based initialization if the
+ * Configuration File fails for some reason.  If force_old_init is set, then
+ * we'll always use the old Driver-based initialization sequence.
+ */
+static uint force_old_init;
+
+module_param(force_old_init, uint, 0644);
+MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
+
 static int dflt_msg_enable = DFLT_MSG_ENABLE;
 
 module_param(dflt_msg_enable, int, 0644);
@@ -236,6 +275,20 @@ module_param_array(intr_cnt, uint, NULL, 0644);
 MODULE_PARM_DESC(intr_cnt,
                 "thresholds 1..3 for queue interrupt packet counters");
 
+/*
+ * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
+ * offset by 2 bytes in order to have the IP headers line up on 4-byte
+ * boundaries.  This is a requirement for many architectures which will throw
+ * a machine check fault if an attempt is made to access one of the 4-byte IP
+ * header fields on a non-4-byte boundary.  And it's a major performance issue
+ * even on some architectures which allow it like some implementations of the
+ * x86 ISA.  However, some architectures don't mind this and for some very
+ * edge-case performance sensitive applications (like forwarding large volumes
+ * of small packets), setting this DMA offset to 0 will decrease the number of
+ * PCI-E Bus transfers enough to measurably affect performance.
+ */
+static int rx_dma_offset = 2;
+
 static bool vf_acls;
 
 #ifdef CONFIG_PCI_IOV
@@ -248,6 +301,30 @@ module_param_array(num_vf, uint, NULL, 0644);
 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
 #endif
 
+/*
+ * The filter TCAM has a fixed portion and a variable portion.  The fixed
+ * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
+ * ports.  The variable portion is 36 bits which can include things like Exact
+ * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
+ * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
+ * far exceed the 36-bit budget for this "compressed" header portion of the
+ * filter.  Thus, we have a scarce resource which must be carefully managed.
+ *
+ * By default we set this up to mostly match the set of filter matching
+ * capabilities of T3 but with accommodations for some of T4's more
+ * interesting features:
+ *
+ *   { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
+ *     [Inner] VLAN (17), Port (3), FCoE (1) }
+ */
+enum {
+       TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
+       TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
+       TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
+};
+
+static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
+
 static struct dentry *cxgb4_debugfs_root;
 
 static LIST_HEAD(adapter_list);
@@ -852,11 +929,25 @@ static int upgrade_fw(struct adapter *adap)
         */
        if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
            vers > adap->params.fw_vers) {
-               ret = -t4_load_fw(adap, fw->data, fw->size);
+               dev_info(dev, "upgrading firmware ...\n");
+               ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
+                                   /*force=*/false);
                if (!ret)
-                       dev_info(dev, "firmware upgraded to version %pI4 from "
-                                FW_FNAME "\n", &hdr->fw_ver);
+                       dev_info(dev, "firmware successfully upgraded to "
+                                FW_FNAME " (%d.%d.%d.%d)\n",
+                                FW_HDR_FW_VER_MAJOR_GET(vers),
+                                FW_HDR_FW_VER_MINOR_GET(vers),
+                                FW_HDR_FW_VER_MICRO_GET(vers),
+                                FW_HDR_FW_VER_BUILD_GET(vers));
+               else
+                       dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
+       } else {
+               /*
+                * Tell our caller that we didn't upgrade the firmware.
+                */
+               ret = -EINVAL;
        }
+
 out:   release_firmware(fw);
        return ret;
 }
@@ -2470,8 +2561,8 @@ int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
                else
                        delta = size - hw_pidx + pidx;
                wmb();
-               t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
-                            V_QID(qid) | V_PIDX(delta));
+               t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
+                            QID(qid) | PIDX(delta));
        }
 out:
        return ret;
@@ -2579,8 +2670,8 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
                else
                        delta = q->size - hw_pidx + q->db_pidx;
                wmb();
-               t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
-                               V_QID(q->cntxt_id) | V_PIDX(delta));
+               t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
+                            QID(q->cntxt_id) | PIDX(delta));
        }
 out:
        q->db_disabled = 0;
@@ -2617,9 +2708,9 @@ static void process_db_full(struct work_struct *work)
 
        notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
        drain_db_fifo(adap, dbfifo_drain_delay);
-       t4_set_reg_field(adap, A_SGE_INT_ENABLE3,
-                       F_DBFIFO_HP_INT | F_DBFIFO_LP_INT,
-                       F_DBFIFO_HP_INT | F_DBFIFO_LP_INT);
+       t4_set_reg_field(adap, SGE_INT_ENABLE3,
+                        DBFIFO_HP_INT | DBFIFO_LP_INT,
+                        DBFIFO_HP_INT | DBFIFO_LP_INT);
        notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
 }
 
@@ -2639,8 +2730,8 @@ static void process_db_drop(struct work_struct *work)
 
 void t4_db_full(struct adapter *adap)
 {
-       t4_set_reg_field(adap, A_SGE_INT_ENABLE3,
-                       F_DBFIFO_HP_INT | F_DBFIFO_LP_INT, 0);
+       t4_set_reg_field(adap, SGE_INT_ENABLE3,
+                        DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
        queue_work(workq, &adap->db_full_task);
 }
 
@@ -3076,6 +3167,10 @@ static void setup_memwin(struct adapter *adap)
        t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
                     (bar0 + MEMWIN2_BASE) | BIR(0) |
                     WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
+}
+
+static void setup_memwin_rdma(struct adapter *adap)
+{
        if (adap->vres.ocq.size) {
                unsigned int start, sz_kb;
 
@@ -3153,6 +3248,488 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
  */
 #define MAX_ATIDS 8192U
 
+/*
+ * Phase 0 of initialization: contact FW, obtain config, perform basic init.
+ *
+ * If the firmware we're dealing with has Configuration File support, then
+ * we use that to perform all configuration
+ */
+
+/*
+ * Tweak configuration based on module parameters, etc.  Most of these have
+ * defaults assigned to them by Firmware Configuration Files (if we're using
+ * them) but need to be explicitly set if we're using hard-coded
+ * initialization.  But even in the case of using Firmware Configuration
+ * Files, we'd like to expose the ability to change these via module
+ * parameters so these are essentially common tweaks/settings for
+ * Configuration Files and hard-coded initialization ...
+ */
+static int adap_init0_tweaks(struct adapter *adapter)
+{
+       /*
+        * Fix up various Host-Dependent Parameters like Page Size, Cache
+        * Line Size, etc.  The firmware default is for a 4KB Page Size and
+        * 64B Cache Line Size ...
+        */
+       t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
+
+       /*
+        * Process module parameters which affect early initialization.
+        */
+       if (rx_dma_offset != 2 && rx_dma_offset != 0) {
+               dev_err(&adapter->pdev->dev,
+                       "Ignoring illegal rx_dma_offset=%d, using 2\n",
+                       rx_dma_offset);
+               rx_dma_offset = 2;
+       }
+       t4_set_reg_field(adapter, SGE_CONTROL,
+                        PKTSHIFT_MASK,
+                        PKTSHIFT(rx_dma_offset));
+
+       /*
+        * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
+        * adds the pseudo header itself.
+        */
+       t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
+                              CSUM_HAS_PSEUDO_HDR, 0);
+
+       return 0;
+}
+
+/*
+ * Attempt to initialize the adapter via a Firmware Configuration File.
+ */
+static int adap_init0_config(struct adapter *adapter, int reset)
+{
+       struct fw_caps_config_cmd caps_cmd;
+       const struct firmware *cf;
+       unsigned long mtype = 0, maddr = 0;
+       u32 finiver, finicsum, cfcsum;
+       int ret, using_flash;
+
+       /*
+        * Reset device if necessary.
+        */
+       if (reset) {
+               ret = t4_fw_reset(adapter, adapter->mbox,
+                                 PIORSTMODE | PIORST);
+               if (ret < 0)
+                       goto bye;
+       }
+
+       /*
+        * If we have a T4 configuration file under /lib/firmware/cxgb4/,
+        * then use that.  Otherwise, use the configuration file stored
+        * in the adapter flash ...
+        */
+       ret = request_firmware(&cf, FW_CFNAME, adapter->pdev_dev);
+       if (ret < 0) {
+               using_flash = 1;
+               mtype = FW_MEMTYPE_CF_FLASH;
+               maddr = t4_flash_cfg_addr(adapter);
+       } else {
+               u32 params[7], val[7];
+
+               using_flash = 0;
+               if (cf->size >= FLASH_CFG_MAX_SIZE)
+                       ret = -ENOMEM;
+               else {
+                       params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+                            FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
+                       ret = t4_query_params(adapter, adapter->mbox,
+                                             adapter->fn, 0, 1, params, val);
+                       if (ret == 0) {
+                               /*
+                                * For t4_memory_write() below addresses and
+                                * sizes have to be in terms of multiples of 4
+                                * bytes.  So, if the Configuration File isn't
+                                * a multiple of 4 bytes in length we'll have
+                                * to write that out separately since we can't
+                                * guarantee that the bytes following the
+                                * residual byte in the buffer returned by
+                                * request_firmware() are zeroed out ...
+                                */
+                               size_t resid = cf->size & 0x3;
+                               size_t size = cf->size & ~0x3;
+                               __be32 *data = (__be32 *)cf->data;
+
+                               mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
+                               maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
+
+                               ret = t4_memory_write(adapter, mtype, maddr,
+                                                     size, data);
+                               if (ret == 0 && resid != 0) {
+                                       union {
+                                               __be32 word;
+                                               char buf[4];
+                                       } last;
+                                       int i;
+
+                                       last.word = data[size >> 2];
+                                       for (i = resid; i < 4; i++)
+                                               last.buf[i] = 0;
+                                       ret = t4_memory_write(adapter, mtype,
+                                                             maddr + size,
+                                                             4, &last.word);
+                               }
+                       }
+               }
+
+               release_firmware(cf);
+               if (ret)
+                       goto bye;
+       }
+
+       /*
+        * Issue a Capability Configuration command to the firmware to get it
+        * to parse the Configuration File.  We don't use t4_fw_config_file()
+        * because we want the ability to modify various features after we've
+        * processed the configuration file ...
+        */
+       memset(&caps_cmd, 0, sizeof(caps_cmd));
+       caps_cmd.op_to_write =
+               htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+                     FW_CMD_REQUEST |
+                     FW_CMD_READ);
+       caps_cmd.retval_len16 =
+               htonl(FW_CAPS_CONFIG_CMD_CFVALID |
+                     FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
+                     FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
+                     FW_LEN16(caps_cmd));
+       ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
+                        &caps_cmd);
+       if (ret < 0)
+               goto bye;
+
+       finiver = ntohl(caps_cmd.finiver);
+       finicsum = ntohl(caps_cmd.finicsum);
+       cfcsum = ntohl(caps_cmd.cfcsum);
+       if (finicsum != cfcsum)
+               dev_warn(adapter->pdev_dev, "Configuration File checksum "\
+                        "mismatch: [fini] csum=%#x, computed csum=%#x\n",
+                        finicsum, cfcsum);
+
+       /*
+        * If we're a pure NIC driver then disable all offloading facilities.
+        * This will allow the firmware to optimize aspects of the hardware
+        * configuration which will result in improved performance.
+        */
+       caps_cmd.ofldcaps = 0;
+       caps_cmd.iscsicaps = 0;
+       caps_cmd.rdmacaps = 0;
+       caps_cmd.fcoecaps = 0;
+
+       /*
+        * And now tell the firmware to use the configuration we just loaded.
+        */
+       caps_cmd.op_to_write =
+               htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+                     FW_CMD_REQUEST |
+                     FW_CMD_WRITE);
+       caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
+       ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
+                        NULL);
+       if (ret < 0)
+               goto bye;
+
+       /*
+        * Tweak configuration based on system architecture, module
+        * parameters, etc.
+        */
+       ret = adap_init0_tweaks(adapter);
+       if (ret < 0)
+               goto bye;
+
+       /*
+        * And finally tell the firmware to initialize itself using the
+        * parameters from the Configuration File.
+        */
+       ret = t4_fw_initialize(adapter, adapter->mbox);
+       if (ret < 0)
+               goto bye;
+
+       /*
+        * Return successfully and note that we're operating with parameters
+        * not supplied by the driver, rather than from hard-wired
+        * initialization constants burried in the driver.
+        */
+       adapter->flags |= USING_SOFT_PARAMS;
+       dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
+                "Configuration File %s, version %#x, computed checksum %#x\n",
+                (using_flash
+                 ? "in device FLASH"
+                 : "/lib/firmware/" FW_CFNAME),
+                finiver, cfcsum);
+       return 0;
+
+       /*
+        * Something bad happened.  Return the error ...  (If the "error"
+        * is that there's no Configuration File on the adapter we don't
+        * want to issue a warning since this is fairly common.)
+        */
+bye:
+       if (ret != -ENOENT)
+               dev_warn(adapter->pdev_dev, "Configuration file error %d\n",
+                        -ret);
+       return ret;
+}
+
+/*
+ * Attempt to initialize the adapter via hard-coded, driver supplied
+ * parameters ...
+ */
+static int adap_init0_no_config(struct adapter *adapter, int reset)
+{
+       struct sge *s = &adapter->sge;
+       struct fw_caps_config_cmd caps_cmd;
+       u32 v;
+       int i, ret;
+
+       /*
+        * Reset device if necessary
+        */
+       if (reset) {
+               ret = t4_fw_reset(adapter, adapter->mbox,
+                                 PIORSTMODE | PIORST);
+               if (ret < 0)
+                       goto bye;
+       }
+
+       /*
+        * Get device capabilities and select which we'll be using.
+        */
+       memset(&caps_cmd, 0, sizeof(caps_cmd));
+       caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+                                    FW_CMD_REQUEST | FW_CMD_READ);
+       caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
+       ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
+                        &caps_cmd);
+       if (ret < 0)
+               goto bye;
+
+#ifndef CONFIG_CHELSIO_T4_OFFLOAD
+       /*
+        * If we're a pure NIC driver then disable all offloading facilities.
+        * This will allow the firmware to optimize aspects of the hardware
+        * configuration which will result in improved performance.
+        */
+       caps_cmd.ofldcaps = 0;
+       caps_cmd.iscsicaps = 0;
+       caps_cmd.rdmacaps = 0;
+       caps_cmd.fcoecaps = 0;
+#endif
+
+       if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
+               if (!vf_acls)
+                       caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
+               else
+                       caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
+       } else if (vf_acls) {
+               dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
+               goto bye;
+       }
+       caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+                             FW_CMD_REQUEST | FW_CMD_WRITE);
+       ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
+                        NULL);
+       if (ret < 0)
+               goto bye;
+
+       /*
+        * Tweak configuration based on system architecture, module
+        * parameters, etc.
+        */
+       ret = adap_init0_tweaks(adapter);
+       if (ret < 0)
+               goto bye;
+
+       /*
+        * Select RSS Global Mode we want to use.  We use "Basic Virtual"
+        * mode which maps each Virtual Interface to its own section of
+        * the RSS Table and we turn on all map and hash enables ...
+        */
+       adapter->flags |= RSS_TNLALLLOOKUP;
+       ret = t4_config_glbl_rss(adapter, adapter->mbox,
+                                FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
+                                FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
+                                FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
+                                ((adapter->flags & RSS_TNLALLLOOKUP) ?
+                                       FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
+       if (ret < 0)
+               goto bye;
+
+       /*
+        * Set up our own fundamental resource provisioning ...
+        */
+       ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
+                         PFRES_NEQ, PFRES_NETHCTRL,
+                         PFRES_NIQFLINT, PFRES_NIQ,
+                         PFRES_TC, PFRES_NVI,
+                         FW_PFVF_CMD_CMASK_MASK,
+                         pfvfres_pmask(adapter, adapter->fn, 0),
+                         PFRES_NEXACTF,
+                         PFRES_R_CAPS, PFRES_WX_CAPS);
+       if (ret < 0)
+               goto bye;
+
+       /*
+        * Perform low level SGE initialization.  We need to do this before we
+        * send the firmware the INITIALIZE command because that will cause
+        * any other PF Drivers which are waiting for the Master
+        * Initialization to proceed forward.
+        */
+       for (i = 0; i < SGE_NTIMERS - 1; i++)
+               s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
+       s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
+       s->counter_val[0] = 1;
+       for (i = 1; i < SGE_NCOUNTERS; i++)
+               s->counter_val[i] = min(intr_cnt[i - 1],
+                                       THRESHOLD_0_GET(THRESHOLD_0_MASK));
+       t4_sge_init(adapter);
+
+#ifdef CONFIG_PCI_IOV
+       /*
+        * Provision resource limits for Virtual Functions.  We currently
+        * grant them all the same static resource limits except for the Port
+        * Access Rights Mask which we're assigning based on the PF.  All of
+        * the static provisioning stuff for both the PF and VF really needs
+        * to be managed in a persistent manner for each device which the
+        * firmware controls.
+        */
+       {
+               int pf, vf;
+
+               for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
+                       if (num_vf[pf] <= 0)
+                               continue;
+
+                       /* VF numbering starts at 1! */
+                       for (vf = 1; vf <= num_vf[pf]; vf++) {
+                               ret = t4_cfg_pfvf(adapter, adapter->mbox,
+                                                 pf, vf,
+                                                 VFRES_NEQ, VFRES_NETHCTRL,
+                                                 VFRES_NIQFLINT, VFRES_NIQ,
+                                                 VFRES_TC, VFRES_NVI,
+                                                 FW_PFVF_CMD_CMASK_GET(
+                                                 FW_PFVF_CMD_CMASK_MASK),
+                                                 pfvfres_pmask(
+                                                 adapter, pf, vf),
+                                                 VFRES_NEXACTF,
+                                                 VFRES_R_CAPS, VFRES_WX_CAPS);
+                               if (ret < 0)
+                                       dev_warn(adapter->pdev_dev,
+                                                "failed to "\
+                                                "provision pf/vf=%d/%d; "
+                                                "err=%d\n", pf, vf, ret);
+                       }
+               }
+       }
+#endif
+
+       /*
+        * Set up the default filter mode.  Later we'll want to implement this
+        * via a firmware command, etc. ...  This needs to be done before the
+        * firmare initialization command ...  If the selected set of fields
+        * isn't equal to the default value, we'll need to make sure that the
+        * field selections will fit in the 36-bit budget.
+        */
+       if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
+               int i, bits = 0;
+
+               for (i = TP_VLAN_PRI_MAP_FIRST; i <= TP_VLAN_PRI_MAP_LAST; i++)
+                       switch (tp_vlan_pri_map & (1 << i)) {
+                       case 0:
+                               /* compressed filter field not enabled */
+                               break;
+                       case FCOE_MASK:
+                               bits +=  1;
+                               break;
+                       case PORT_MASK:
+                               bits +=  3;
+                               break;
+                       case VNIC_ID_MASK:
+                               bits += 17;
+                               break;
+                       case VLAN_MASK:
+                               bits += 17;
+                               break;
+                       case TOS_MASK:
+                               bits +=  8;
+                               break;
+                       case PROTOCOL_MASK:
+                               bits +=  8;
+                               break;
+                       case ETHERTYPE_MASK:
+                               bits += 16;
+                               break;
+                       case MACMATCH_MASK:
+                               bits +=  9;
+                               break;
+                       case MPSHITTYPE_MASK:
+                               bits +=  3;
+                               break;
+                       case FRAGMENTATION_MASK:
+                               bits +=  1;
+                               break;
+                       }
+
+               if (bits > 36) {
+                       dev_err(adapter->pdev_dev,
+                               "tp_vlan_pri_map=%#x needs %d bits > 36;"\
+                               " using %#x\n", tp_vlan_pri_map, bits,
+                               TP_VLAN_PRI_MAP_DEFAULT);
+                       tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
+               }
+       }
+       v = tp_vlan_pri_map;
+       t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
+                         &v, 1, TP_VLAN_PRI_MAP);
+
+       /*
+        * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
+        * to support any of the compressed filter fields above.  Newer
+        * versions of the firmware do this automatically but it doesn't hurt
+        * to set it here.  Meanwhile, we do _not_ need to set Lookup Every
+        * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
+        * since the firmware automatically turns this on and off when we have
+        * a non-zero number of filters active (since it does have a
+        * performance impact).
+        */
+       if (tp_vlan_pri_map)
+               t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
+                                FIVETUPLELOOKUP_MASK,
+                                FIVETUPLELOOKUP_MASK);
+
+       /*
+        * Tweak some settings.
+        */
+       t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
+                    RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
+                    PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
+                    KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
+
+       /*
+        * Get basic stuff going by issuing the Firmware Initialize command.
+        * Note that this _must_ be after all PFVF commands ...
+        */
+       ret = t4_fw_initialize(adapter, adapter->mbox);
+       if (ret < 0)
+               goto bye;
+
+       /*
+        * Return successfully!
+        */
+       dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
+                "driver parameters\n");
+       return 0;
+
+       /*
+        * Something bad happened.  Return the error ...
+        */
+bye:
+       return ret;
+}
+
 /*
  * Phase 0 of initialization: contact FW, obtain config, perform basic init.
  */
@@ -3162,72 +3739,216 @@ static int adap_init0(struct adapter *adap)
        u32 v, port_vec;
        enum dev_state state;
        u32 params[7], val[7];
-       struct fw_caps_config_cmd c;
-
-       ret = t4_check_fw_version(adap);
-       if (ret == -EINVAL || ret > 0) {
-               if (upgrade_fw(adap) >= 0)             /* recache FW version */
-                       ret = t4_check_fw_version(adap);
-       }
-       if (ret < 0)
-               return ret;
+       int reset = 1, j;
 
-       /* contact FW, request master */
-       ret = t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, &state);
+       /*
+        * Contact FW, advertising Master capability (and potentially forcing
+        * ourselves as the Master PF if our module parameter force_init is
+        * set).
+        */
+       ret = t4_fw_hello(adap, adap->mbox, adap->fn,
+                         force_init ? MASTER_MUST : MASTER_MAY,
+                         &state);
        if (ret < 0) {
                dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
                        ret);
                return ret;
        }
+       if (ret == adap->mbox)
+               adap->flags |= MASTER_PF;
+       if (force_init && state == DEV_STATE_INIT)
+               state = DEV_STATE_UNINIT;
 
-       /* reset device */
-       ret = t4_fw_reset(adap, adap->fn, PIORSTMODE | PIORST);
-       if (ret < 0)
-               goto bye;
-
-       for (v = 0; v < SGE_NTIMERS - 1; v++)
-               adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL);
-       adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
-       adap->sge.counter_val[0] = 1;
-       for (v = 1; v < SGE_NCOUNTERS; v++)
-               adap->sge.counter_val[v] = min(intr_cnt[v - 1],
-                                              THRESHOLD_3_MASK);
-#define FW_PARAM_DEV(param) \
-       (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
-        FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
+       /*
+        * If we're the Master PF Driver and the device is uninitialized,
+        * then let's consider upgrading the firmware ...  (We always want
+        * to check the firmware version number in order to A. get it for
+        * later reporting and B. to warn if the currently loaded firmware
+        * is excessively mismatched relative to the driver.)
+        */
+       ret = t4_check_fw_version(adap);
+       if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
+               if (ret == -EINVAL || ret > 0) {
+                       if (upgrade_fw(adap) >= 0) {
+                               /*
+                                * Note that the chip was reset as part of the
+                                * firmware upgrade so we don't reset it again
+                                * below and grab the new firmware version.
+                                */
+                               reset = 0;
+                               ret = t4_check_fw_version(adap);
+                       }
+               }
+               if (ret < 0)
+                       return ret;
+       }
 
-       params[0] = FW_PARAM_DEV(CCLK);
-       ret = t4_query_params(adap, adap->fn, adap->fn, 0, 1, params, val);
+       /*
+        * Grab VPD parameters.  This should be done after we establish a
+        * connection to the firmware since some of the VPD parameters
+        * (notably the Core Clock frequency) are retrieved via requests to
+        * the firmware.  On the other hand, we need these fairly early on
+        * so we do this right after getting ahold of the firmware.
+        */
+       ret = get_vpd_params(adap, &adap->params.vpd);
        if (ret < 0)
                goto bye;
-       adap->params.vpd.cclk = val[0];
 
-       ret = adap_init1(adap, &c);
+       /*
+        * Find out what ports are available to us.  Note that we need to do
+        * this before calling adap_init0_no_config() since it needs nports
+        * and portvec ...
+        */
+       v =
+           FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+           FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
+       ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
        if (ret < 0)
                goto bye;
 
+       adap->params.nports = hweight32(port_vec);
+       adap->params.portvec = port_vec;
+
+       /*
+        * If the firmware is initialized already (and we're not forcing a
+        * master initialization), note that we're living with existing
+        * adapter parameters.  Otherwise, it's time to try initializing the
+        * adapter ...
+        */
+       if (state == DEV_STATE_INIT) {
+               dev_info(adap->pdev_dev, "Coming up as %s: "\
+                        "Adapter already initialized\n",
+                        adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
+               adap->flags |= USING_SOFT_PARAMS;
+       } else {
+               dev_info(adap->pdev_dev, "Coming up as MASTER: "\
+                        "Initializing adapter\n");
+
+               /*
+                * If the firmware doesn't support Configuration
+                * Files warn user and exit,
+                */
+               if (ret < 0)
+                       dev_warn(adap->pdev_dev, "Firmware doesn't support "
+                                "configuration file.\n");
+               if (force_old_init)
+                       ret = adap_init0_no_config(adap, reset);
+               else {
+                       /*
+                        * Find out whether we're dealing with a version of
+                        * the firmware which has configuration file support.
+                        */
+                       params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+                                    FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
+                       ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
+                                             params, val);
+
+                       /*
+                        * If the firmware doesn't support Configuration
+                        * Files, use the old Driver-based, hard-wired
+                        * initialization.  Otherwise, try using the
+                        * Configuration File support and fall back to the
+                        * Driver-based initialization if there's no
+                        * Configuration File found.
+                        */
+                       if (ret < 0)
+                               ret = adap_init0_no_config(adap, reset);
+                       else {
+                               /*
+                                * The firmware provides us with a memory
+                                * buffer where we can load a Configuration
+                                * File from the host if we want to override
+                                * the Configuration File in flash.
+                                */
+
+                               ret = adap_init0_config(adap, reset);
+                               if (ret == -ENOENT) {
+                                       dev_info(adap->pdev_dev,
+                                           "No Configuration File present "
+                                           "on adapter.  Using hard-wired "
+                                           "configuration parameters.\n");
+                                       ret = adap_init0_no_config(adap, reset);
+                               }
+                       }
+               }
+               if (ret < 0) {
+                       dev_err(adap->pdev_dev,
+                               "could not initialize adapter, error %d\n",
+                               -ret);
+                       goto bye;
+               }
+       }
+
+       /*
+        * If we're living with non-hard-coded parameters (either from a
+        * Firmware Configuration File or values programmed by a different PF
+        * Driver), give the SGE code a chance to pull in anything that it
+        * needs ...  Note that this must be called after we retrieve our VPD
+        * parameters in order to know how to convert core ticks to seconds.
+        */
+       if (adap->flags & USING_SOFT_PARAMS) {
+               ret = t4_sge_init(adap);
+               if (ret < 0)
+                       goto bye;
+       }
+
+       /*
+        * Grab some of our basic fundamental operating parameters.
+        */
+#define FW_PARAM_DEV(param) \
+       (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
+       FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
+
 #define FW_PARAM_PFVF(param) \
-       (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
-        FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
-        FW_PARAMS_PARAM_Y(adap->fn))
+       FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
+       FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)|  \
+       FW_PARAMS_PARAM_Y(0) | \
+       FW_PARAMS_PARAM_Z(0)
 
-       params[0] = FW_PARAM_DEV(PORTVEC);
+       params[0] = FW_PARAM_PFVF(EQ_START);
        params[1] = FW_PARAM_PFVF(L2T_START);
        params[2] = FW_PARAM_PFVF(L2T_END);
        params[3] = FW_PARAM_PFVF(FILTER_START);
        params[4] = FW_PARAM_PFVF(FILTER_END);
        params[5] = FW_PARAM_PFVF(IQFLINT_START);
-       params[6] = FW_PARAM_PFVF(EQ_START);
-       ret = t4_query_params(adap, adap->fn, adap->fn, 0, 7, params, val);
+       ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
        if (ret < 0)
                goto bye;
-       port_vec = val[0];
+       adap->sge.egr_start = val[0];
+       adap->l2t_start = val[1];
+       adap->l2t_end = val[2];
        adap->tids.ftid_base = val[3];
        adap->tids.nftids = val[4] - val[3] + 1;
        adap->sge.ingr_start = val[5];
-       adap->sge.egr_start = val[6];
 
-       if (c.ofldcaps) {
+       /* query params related to active filter region */
+       params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
+       params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
+       ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
+       /* If Active filter size is set we enable establishing
+        * offload connection through firmware work request
+        */
+       if ((val[0] != val[1]) && (ret >= 0)) {
+               adap->flags |= FW_OFLD_CONN;
+               adap->tids.aftid_base = val[0];
+               adap->tids.aftid_end = val[1];
+       }
+
+#ifdef CONFIG_CHELSIO_T4_OFFLOAD
+       /*
+        * Get device capabilities so we can determine what resources we need
+        * to manage.
+        */
+       memset(&caps_cmd, 0, sizeof(caps_cmd));
+       caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+                                    FW_CMD_REQUEST | FW_CMD_READ);
+       caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
+       ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
+                        &caps_cmd);
+       if (ret < 0)
+               goto bye;
+
+       if (caps_cmd.ofldcaps) {
                /* query offload-related parameters */
                params[0] = FW_PARAM_DEV(NTID);
                params[1] = FW_PARAM_PFVF(SERVER_START);
@@ -3235,28 +3956,55 @@ static int adap_init0(struct adapter *adap)
                params[3] = FW_PARAM_PFVF(TDDP_START);
                params[4] = FW_PARAM_PFVF(TDDP_END);
                params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
-               ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
-                                     val);
+               ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
+                                     params, val);
                if (ret < 0)
                        goto bye;
                adap->tids.ntids = val[0];
                adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
                adap->tids.stid_base = val[1];
                adap->tids.nstids = val[2] - val[1] + 1;
+               /*
+                * Setup server filter region. Divide the availble filter
+                * region into two parts. Regular filters get 1/3rd and server
+                * filters get 2/3rd part. This is only enabled if workarond
+                * path is enabled.
+                * 1. For regular filters.
+                * 2. Server filter: This are special filters which are used
+                * to redirect SYN packets to offload queue.
+                */
+               if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
+                       adap->tids.sftid_base = adap->tids.ftid_base +
+                                       DIV_ROUND_UP(adap->tids.nftids, 3);
+                       adap->tids.nsftids = adap->tids.nftids -
+                                        DIV_ROUND_UP(adap->tids.nftids, 3);
+                       adap->tids.nftids = adap->tids.sftid_base -
+                                               adap->tids.ftid_base;
+               }
                adap->vres.ddp.start = val[3];
                adap->vres.ddp.size = val[4] - val[3] + 1;
                adap->params.ofldq_wr_cred = val[5];
+
+               params[0] = FW_PARAM_PFVF(ETHOFLD_START);
+               params[1] = FW_PARAM_PFVF(ETHOFLD_END);
+               ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
+                                     params, val);
+               if ((val[0] != val[1]) && (ret >= 0)) {
+                       adap->tids.uotid_base = val[0];
+                       adap->tids.nuotids = val[1] - val[0] + 1;
+               }
+
                adap->params.offload = 1;
        }
-       if (c.rdmacaps) {
+       if (caps_cmd.rdmacaps) {
                params[0] = FW_PARAM_PFVF(STAG_START);
                params[1] = FW_PARAM_PFVF(STAG_END);
                params[2] = FW_PARAM_PFVF(RQ_START);
                params[3] = FW_PARAM_PFVF(RQ_END);
                params[4] = FW_PARAM_PFVF(PBL_START);
                params[5] = FW_PARAM_PFVF(PBL_END);
-               ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
-                                     val);
+               ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
+                                     params, val);
                if (ret < 0)
                        goto bye;
                adap->vres.stag.start = val[0];
@@ -3272,8 +4020,7 @@ static int adap_init0(struct adapter *adap)
                params[3] = FW_PARAM_PFVF(CQ_END);
                params[4] = FW_PARAM_PFVF(OCQ_START);
                params[5] = FW_PARAM_PFVF(OCQ_END);
-               ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
-                                     val);
+               ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
                if (ret < 0)
                        goto bye;
                adap->vres.qp.start = val[0];
@@ -3283,11 +4030,11 @@ static int adap_init0(struct adapter *adap)
                adap->vres.ocq.start = val[4];
                adap->vres.ocq.size = val[5] - val[4] + 1;
        }
-       if (c.iscsicaps) {
+       if (caps_cmd.iscsicaps) {
                params[0] = FW_PARAM_PFVF(ISCSI_START);
                params[1] = FW_PARAM_PFVF(ISCSI_END);
-               ret = t4_query_params(adap, adap->fn, adap->fn, 0, 2, params,
-                                     val);
+               ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
+                                     params, val);
                if (ret < 0)
                        goto bye;
                adap->vres.iscsi.start = val[0];
@@ -3295,63 +4042,33 @@ static int adap_init0(struct adapter *adap)
        }
 #undef FW_PARAM_PFVF
 #undef FW_PARAM_DEV
+#endif /* CONFIG_CHELSIO_T4_OFFLOAD */
 
-       adap->params.nports = hweight32(port_vec);
-       adap->params.portvec = port_vec;
-       adap->flags |= FW_OK;
-
-       /* These are finalized by FW initialization, load their values now */
+       /*
+        * These are finalized by FW initialization, load their values now.
+        */
        v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
        adap->params.tp.tre = TIMERRESOLUTION_GET(v);
+       adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
        t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
        t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
                     adap->params.b_wnd);
 
-#ifdef CONFIG_PCI_IOV
-       /*
-        * Provision resource limits for Virtual Functions.  We currently
-        * grant them all the same static resource limits except for the Port
-        * Access Rights Mask which we're assigning based on the PF.  All of
-        * the static provisioning stuff for both the PF and VF really needs
-        * to be managed in a persistent manner for each device which the
-        * firmware controls.
-        */
-       {
-               int pf, vf;
-
-               for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
-                       if (num_vf[pf] <= 0)
-                               continue;
+       /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
+       for (j = 0; j < NCHAN; j++)
+               adap->params.tp.tx_modq[j] = j;
 
-                       /* VF numbering starts at 1! */
-                       for (vf = 1; vf <= num_vf[pf]; vf++) {
-                               ret = t4_cfg_pfvf(adap, adap->fn, pf, vf,
-                                                 VFRES_NEQ, VFRES_NETHCTRL,
-                                                 VFRES_NIQFLINT, VFRES_NIQ,
-                                                 VFRES_TC, VFRES_NVI,
-                                                 FW_PFVF_CMD_CMASK_MASK,
-                                                 pfvfres_pmask(adap, pf, vf),
-                                                 VFRES_NEXACTF,
-                                                 VFRES_R_CAPS, VFRES_WX_CAPS);
-                               if (ret < 0)
-                                       dev_warn(adap->pdev_dev, "failed to "
-                                                "provision pf/vf=%d/%d; "
-                                                "err=%d\n", pf, vf, ret);
-                       }
-               }
-       }
-#endif
-
-       setup_memwin(adap);
+       adap->flags |= FW_OK;
        return 0;
 
        /*
-        * If a command timed out or failed with EIO FW does not operate within
-        * its spec or something catastrophic happened to HW/FW, stop issuing
-        * commands.
+        * Something bad happened.  If a command timed out or failed with EIO
+        * FW does not operate within its spec or something catastrophic
+        * happened to HW/FW, stop issuing commands.
         */
-bye:   if (ret != -ETIMEDOUT && ret != -EIO)
-               t4_fw_bye(adap, adap->fn);
+bye:
+       if (ret != -ETIMEDOUT && ret != -EIO)
+               t4_fw_bye(adap, adap->mbox);
        return ret;
 }
 
@@ -3806,7 +4523,9 @@ static int __devinit init_one(struct pci_dev *pdev,
        err = t4_prep_adapter(adapter);
        if (err)
                goto out_unmap_bar;
+       setup_memwin(adapter);
        err = adap_init0(adapter);
+       setup_memwin_rdma(adapter);
        if (err)
                goto out_unmap_bar;
 
@@ -3948,8 +4667,11 @@ static void __devexit remove_one(struct pci_dev *pdev)
 {
        struct adapter *adapter = pci_get_drvdata(pdev);
 
+#ifdef CONFIG_PCI_IOV
        pci_disable_sriov(pdev);
 
+#endif
+
        if (adapter) {
                int i;
 
index d79980c5fc630e44396b3b66af7b4a1ded1cf36c..1b899fea1a91427bac31b171cd799fad2fc384f0 100644 (file)
@@ -100,6 +100,8 @@ struct tid_info {
 
        unsigned int nftids;
        unsigned int ftid_base;
+       unsigned int aftid_base;
+       unsigned int aftid_end;
 
        spinlock_t atid_lock ____cacheline_aligned_in_smp;
        union aopen_entry *afree;
index d49933ed551f7a4fca4d6b71f0528aaf9f6a80d6..3ecc087d732d12ea3e61214235f1a99254f87924 100644 (file)
@@ -68,9 +68,6 @@
  */
 #define RX_PKT_SKB_LEN   512
 
-/* Ethernet header padding prepended to RX_PKTs */
-#define RX_PKT_PAD 2
-
 /*
  * Max number of Tx descriptors we clean up at a time.  Should be modest as
  * freeing skbs isn't cheap and it happens while holding locks.  We just need
  */
 #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
 
-enum {
-       /* packet alignment in FL buffers */
-       FL_ALIGN = L1_CACHE_BYTES < 32 ? 32 : L1_CACHE_BYTES,
-       /* egress status entry size */
-       STAT_LEN = L1_CACHE_BYTES > 64 ? 128 : 64
-};
-
 struct tx_sw_desc {                /* SW state per Tx descriptor */
        struct sk_buff *skb;
        struct ulptx_sgl *sgl;
@@ -155,16 +145,57 @@ struct rx_sw_desc {                /* SW state per Rx descriptor */
 };
 
 /*
- * The low bits of rx_sw_desc.dma_addr have special meaning.
+ * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
+ * buffer).  We currently only support two sizes for 1500- and 9000-byte MTUs.
+ * We could easily support more but there doesn't seem to be much need for
+ * that ...
+ */
+#define FL_MTU_SMALL 1500
+#define FL_MTU_LARGE 9000
+
+static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
+                                         unsigned int mtu)
+{
+       struct sge *s = &adapter->sge;
+
+       return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
+}
+
+#define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
+#define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
+
+/*
+ * Bits 0..3 of rx_sw_desc.dma_addr have special meaning.  The hardware uses
+ * these to specify the buffer size as an index into the SGE Free List Buffer
+ * Size register array.  We also use bit 4, when the buffer has been unmapped
+ * for DMA, but this is of course never sent to the hardware and is only used
+ * to prevent double unmappings.  All of the above requires that the Free List
+ * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
+ * 32-byte or or a power of 2 greater in alignment.  Since the SGE's minimal
+ * Free List Buffer alignment is 32 bytes, this works out for us ...
  */
 enum {
-       RX_LARGE_BUF    = 1 << 0, /* buffer is larger than PAGE_SIZE */
-       RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */
+       RX_BUF_FLAGS     = 0x1f,   /* bottom five bits are special */
+       RX_BUF_SIZE      = 0x0f,   /* bottom three bits are for buf sizes */
+       RX_UNMAPPED_BUF  = 0x10,   /* buffer is not mapped */
+
+       /*
+        * XXX We shouldn't depend on being able to use these indices.
+        * XXX Especially when some other Master PF has initialized the
+        * XXX adapter or we use the Firmware Configuration File.  We
+        * XXX should really search through the Host Buffer Size register
+        * XXX array for the appropriately sized buffer indices.
+        */
+       RX_SMALL_PG_BUF  = 0x0,   /* small (PAGE_SIZE) page buffer */
+       RX_LARGE_PG_BUF  = 0x1,   /* buffer large (FL_PG_ORDER) page buffer */
+
+       RX_SMALL_MTU_BUF = 0x2,   /* small MTU buffer */
+       RX_LARGE_MTU_BUF = 0x3,   /* large MTU buffer */
 };
 
 static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
 {
-       return d->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
+       return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS;
 }
 
 static inline bool is_buf_mapped(const struct rx_sw_desc *d)
@@ -392,14 +423,35 @@ static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
        }
 }
 
-static inline int get_buf_size(const struct rx_sw_desc *d)
+static inline int get_buf_size(struct adapter *adapter,
+                              const struct rx_sw_desc *d)
 {
-#if FL_PG_ORDER > 0
-       return (d->dma_addr & RX_LARGE_BUF) ? (PAGE_SIZE << FL_PG_ORDER) :
-                                             PAGE_SIZE;
-#else
-       return PAGE_SIZE;
-#endif
+       struct sge *s = &adapter->sge;
+       unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
+       int buf_size;
+
+       switch (rx_buf_size_idx) {
+       case RX_SMALL_PG_BUF:
+               buf_size = PAGE_SIZE;
+               break;
+
+       case RX_LARGE_PG_BUF:
+               buf_size = PAGE_SIZE << s->fl_pg_order;
+               break;
+
+       case RX_SMALL_MTU_BUF:
+               buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
+               break;
+
+       case RX_LARGE_MTU_BUF:
+               buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
+               break;
+
+       default:
+               BUG_ON(1);
+       }
+
+       return buf_size;
 }
 
 /**
@@ -418,7 +470,8 @@ static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
 
                if (is_buf_mapped(d))
                        dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
-                                      get_buf_size(d), PCI_DMA_FROMDEVICE);
+                                      get_buf_size(adap, d),
+                                      PCI_DMA_FROMDEVICE);
                put_page(d->page);
                d->page = NULL;
                if (++q->cidx == q->size)
@@ -444,7 +497,7 @@ static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
 
        if (is_buf_mapped(d))
                dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
-                              get_buf_size(d), PCI_DMA_FROMDEVICE);
+                              get_buf_size(adap, d), PCI_DMA_FROMDEVICE);
        d->page = NULL;
        if (++q->cidx == q->size)
                q->cidx = 0;
@@ -485,6 +538,7 @@ static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
                              gfp_t gfp)
 {
+       struct sge *s = &adap->sge;
        struct page *pg;
        dma_addr_t mapping;
        unsigned int cred = q->avail;
@@ -493,25 +547,27 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
 
        gfp |= __GFP_NOWARN | __GFP_COLD;
 
-#if FL_PG_ORDER > 0
+       if (s->fl_pg_order == 0)
+               goto alloc_small_pages;
+
        /*
         * Prefer large buffers
         */
        while (n) {
-               pg = alloc_pages(gfp | __GFP_COMP, FL_PG_ORDER);
+               pg = alloc_pages(gfp | __GFP_COMP, s->fl_pg_order);
                if (unlikely(!pg)) {
                        q->large_alloc_failed++;
                        break;       /* fall back to single pages */
                }
 
                mapping = dma_map_page(adap->pdev_dev, pg, 0,
-                                      PAGE_SIZE << FL_PG_ORDER,
+                                      PAGE_SIZE << s->fl_pg_order,
                                       PCI_DMA_FROMDEVICE);
                if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
-                       __free_pages(pg, FL_PG_ORDER);
+                       __free_pages(pg, s->fl_pg_order);
                        goto out;   /* do not try small pages for this error */
                }
-               mapping |= RX_LARGE_BUF;
+               mapping |= RX_LARGE_PG_BUF;
                *d++ = cpu_to_be64(mapping);
 
                set_rx_sw_desc(sd, pg, mapping);
@@ -525,8 +581,8 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
                }
                n--;
        }
-#endif
 
+alloc_small_pages:
        while (n--) {
                pg = __skb_alloc_page(gfp, NULL);
                if (unlikely(!pg)) {
@@ -769,8 +825,8 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
        wmb();            /* write descriptors before telling HW */
        spin_lock(&q->db_lock);
        if (!q->db_disabled) {
-               t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
-                            V_QID(q->cntxt_id) | V_PIDX(n));
+               t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
+                            QID(q->cntxt_id) | PIDX(n));
        }
        q->db_pidx = q->pidx;
        spin_unlock(&q->db_lock);
@@ -1519,6 +1575,8 @@ static noinline int handle_trace_pkt(struct adapter *adap,
 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
                   const struct cpl_rx_pkt *pkt)
 {
+       struct adapter *adapter = rxq->rspq.adap;
+       struct sge *s = &adapter->sge;
        int ret;
        struct sk_buff *skb;
 
@@ -1529,8 +1587,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
                return;
        }
 
-       copy_frags(skb, gl, RX_PKT_PAD);
-       skb->len = gl->tot_len - RX_PKT_PAD;
+       copy_frags(skb, gl, s->pktshift);
+       skb->len = gl->tot_len - s->pktshift;
        skb->data_len = skb->len;
        skb->truesize += skb->data_len;
        skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1566,6 +1624,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
        struct sk_buff *skb;
        const struct cpl_rx_pkt *pkt;
        struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+       struct sge *s = &q->adap->sge;
 
        if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT))
                return handle_trace_pkt(q->adap, si);
@@ -1585,7 +1644,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
                return 0;
        }
 
-       __skb_pull(skb, RX_PKT_PAD);      /* remove ethernet header padding */
+       __skb_pull(skb, s->pktshift);      /* remove ethernet header padding */
        skb->protocol = eth_type_trans(skb, q->netdev);
        skb_record_rx_queue(skb, q->idx);
        if (skb->dev->features & NETIF_F_RXHASH)
@@ -1696,6 +1755,8 @@ static int process_responses(struct sge_rspq *q, int budget)
        int budget_left = budget;
        const struct rsp_ctrl *rc;
        struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+       struct adapter *adapter = q->adap;
+       struct sge *s = &adapter->sge;
 
        while (likely(budget_left)) {
                rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
@@ -1722,7 +1783,7 @@ static int process_responses(struct sge_rspq *q, int budget)
                        /* gather packet fragments */
                        for (frags = 0, fp = si.frags; ; frags++, fp++) {
                                rsd = &rxq->fl.sdesc[rxq->fl.cidx];
-                               bufsz = get_buf_size(rsd);
+                               bufsz = get_buf_size(adapter, rsd);
                                fp->page = rsd->page;
                                fp->offset = q->offset;
                                fp->size = min(bufsz, len);
@@ -1747,7 +1808,7 @@ static int process_responses(struct sge_rspq *q, int budget)
                        si.nfrags = frags + 1;
                        ret = q->handler(q, q->cur_desc, &si);
                        if (likely(ret == 0))
-                               q->offset += ALIGN(fp->size, FL_ALIGN);
+                               q->offset += ALIGN(fp->size, s->fl_align);
                        else
                                restore_rx_bufs(&si, &rxq->fl, frags);
                } else if (likely(rsp_type == RSP_TYPE_CPL)) {
@@ -1983,6 +2044,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
 {
        int ret, flsz = 0;
        struct fw_iq_cmd c;
+       struct sge *s = &adap->sge;
        struct port_info *pi = netdev_priv(dev);
 
        /* Size needs to be multiple of 16, including status entry. */
@@ -2015,11 +2077,11 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
                fl->size = roundup(fl->size, 8);
                fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
                                      sizeof(struct rx_sw_desc), &fl->addr,
-                                     &fl->sdesc, STAT_LEN, NUMA_NO_NODE);
+                                     &fl->sdesc, s->stat_len, NUMA_NO_NODE);
                if (!fl->desc)
                        goto fl_nomem;
 
-               flsz = fl->size / 8 + STAT_LEN / sizeof(struct tx_desc);
+               flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
                c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN |
                                            FW_IQ_CMD_FL0FETCHRO(1) |
                                            FW_IQ_CMD_FL0DATARO(1) |
@@ -2096,14 +2158,15 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
 {
        int ret, nentries;
        struct fw_eq_eth_cmd c;
+       struct sge *s = &adap->sge;
        struct port_info *pi = netdev_priv(dev);
 
        /* Add status entries */
-       nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
+       nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
 
        txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
                        sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
-                       &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN,
+                       &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
                        netdev_queue_numa_node_read(netdevq));
        if (!txq->q.desc)
                return -ENOMEM;
@@ -2149,10 +2212,11 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
 {
        int ret, nentries;
        struct fw_eq_ctrl_cmd c;
+       struct sge *s = &adap->sge;
        struct port_info *pi = netdev_priv(dev);
 
        /* Add status entries */
-       nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
+       nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
 
        txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
                                 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
@@ -2200,14 +2264,15 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
 {
        int ret, nentries;
        struct fw_eq_ofld_cmd c;
+       struct sge *s = &adap->sge;
        struct port_info *pi = netdev_priv(dev);
 
        /* Add status entries */
-       nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
+       nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
 
        txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
                        sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
-                       &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN,
+                       &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
                        NUMA_NO_NODE);
        if (!txq->q.desc)
                return -ENOMEM;
@@ -2251,8 +2316,10 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
 
 static void free_txq(struct adapter *adap, struct sge_txq *q)
 {
+       struct sge *s = &adap->sge;
+
        dma_free_coherent(adap->pdev_dev,
-                         q->size * sizeof(struct tx_desc) + STAT_LEN,
+                         q->size * sizeof(struct tx_desc) + s->stat_len,
                          q->desc, q->phys_addr);
        q->cntxt_id = 0;
        q->sdesc = NULL;
@@ -2262,6 +2329,7 @@ static void free_txq(struct adapter *adap, struct sge_txq *q)
 static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
                         struct sge_fl *fl)
 {
+       struct sge *s = &adap->sge;
        unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
 
        adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
@@ -2276,7 +2344,7 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
 
        if (fl) {
                free_rx_bufs(adap, fl, fl->avail);
-               dma_free_coherent(adap->pdev_dev, fl->size * 8 + STAT_LEN,
+               dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
                                  fl->desc, fl->addr);
                kfree(fl->sdesc);
                fl->sdesc = NULL;
@@ -2408,18 +2476,112 @@ void t4_sge_stop(struct adapter *adap)
  *     Performs SGE initialization needed every time after a chip reset.
  *     We do not initialize any of the queues here, instead the driver
  *     top-level must request them individually.
+ *
+ *     Called in two different modes:
+ *
+ *      1. Perform actual hardware initialization and record hard-coded
+ *         parameters which were used.  This gets used when we're the
+ *         Master PF and the Firmware Configuration File support didn't
+ *         work for some reason.
+ *
+ *      2. We're not the Master PF or initialization was performed with
+ *         a Firmware Configuration File.  In this case we need to grab
+ *         any of the SGE operating parameters that we need to have in
+ *         order to do our job and make sure we can live with them ...
  */
-void t4_sge_init(struct adapter *adap)
+
+static int t4_sge_init_soft(struct adapter *adap)
 {
-       unsigned int i, v;
        struct sge *s = &adap->sge;
-       unsigned int fl_align_log = ilog2(FL_ALIGN);
+       u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
+       u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
+       u32 ingress_rx_threshold;
 
-       t4_set_reg_field(adap, SGE_CONTROL, PKTSHIFT_MASK |
-                        INGPADBOUNDARY_MASK | EGRSTATUSPAGESIZE,
-                        INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) |
-                        RXPKTCPLMODE |
-                        (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0));
+       /*
+        * Verify that CPL messages are going to the Ingress Queue for
+        * process_responses() and that only packet data is going to the
+        * Free Lists.
+        */
+       if ((t4_read_reg(adap, SGE_CONTROL) & RXPKTCPLMODE_MASK) !=
+           RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
+               dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
+               return -EINVAL;
+       }
+
+       /*
+        * Validate the Host Buffer Register Array indices that we want to
+        * use ...
+        *
+        * XXX Note that we should really read through the Host Buffer Size
+        * XXX register array and find the indices of the Buffer Sizes which
+        * XXX meet our needs!
+        */
+       #define READ_FL_BUF(x) \
+               t4_read_reg(adap, SGE_FL_BUFFER_SIZE0+(x)*sizeof(u32))
+
+       fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
+       fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
+       fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
+       fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
+
+       #undef READ_FL_BUF
+
+       if (fl_small_pg != PAGE_SIZE ||
+           (fl_large_pg != 0 && (fl_large_pg <= fl_small_pg ||
+                                 (fl_large_pg & (fl_large_pg-1)) != 0))) {
+               dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
+                       fl_small_pg, fl_large_pg);
+               return -EINVAL;
+       }
+       if (fl_large_pg)
+               s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
+
+       if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
+           fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
+               dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
+                       fl_small_mtu, fl_large_mtu);
+               return -EINVAL;
+       }
+
+       /*
+        * Retrieve our RX interrupt holdoff timer values and counter
+        * threshold values from the SGE parameters.
+        */
+       timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1);
+       timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3);
+       timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5);
+       s->timer_val[0] = core_ticks_to_us(adap,
+               TIMERVALUE0_GET(timer_value_0_and_1));
+       s->timer_val[1] = core_ticks_to_us(adap,
+               TIMERVALUE1_GET(timer_value_0_and_1));
+       s->timer_val[2] = core_ticks_to_us(adap,
+               TIMERVALUE2_GET(timer_value_2_and_3));
+       s->timer_val[3] = core_ticks_to_us(adap,
+               TIMERVALUE3_GET(timer_value_2_and_3));
+       s->timer_val[4] = core_ticks_to_us(adap,
+               TIMERVALUE4_GET(timer_value_4_and_5));
+       s->timer_val[5] = core_ticks_to_us(adap,
+               TIMERVALUE5_GET(timer_value_4_and_5));
+
+       ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD);
+       s->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold);
+       s->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold);
+       s->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold);
+       s->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold);
+
+       return 0;
+}
+
+static int t4_sge_init_hard(struct adapter *adap)
+{
+       struct sge *s = &adap->sge;
+
+       /*
+        * Set up our basic SGE mode to deliver CPL messages to our Ingress
+        * Queue and Packet Date to the Free List.
+        */
+       t4_set_reg_field(adap, SGE_CONTROL, RXPKTCPLMODE_MASK,
+                        RXPKTCPLMODE_MASK);
 
        /*
         * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
@@ -2433,13 +2595,24 @@ void t4_sge_init(struct adapter *adap)
        t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP,
                        F_ENABLE_DROP);
 
-       for (i = v = 0; i < 32; i += 4)
-               v |= (PAGE_SHIFT - 10) << i;
-       t4_write_reg(adap, SGE_HOST_PAGE_SIZE, v);
-       t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE);
-#if FL_PG_ORDER > 0
-       t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER);
-#endif
+       /*
+        * SGE_FL_BUFFER_SIZE0 (RX_SMALL_PG_BUF) is set up by
+        * t4_fixup_host_params().
+        */
+       s->fl_pg_order = FL_PG_ORDER;
+       if (s->fl_pg_order)
+               t4_write_reg(adap,
+                            SGE_FL_BUFFER_SIZE0+RX_LARGE_PG_BUF*sizeof(u32),
+                            PAGE_SIZE << FL_PG_ORDER);
+       t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_SMALL_MTU_BUF*sizeof(u32),
+                    FL_MTU_SMALL_BUFSIZE(adap));
+       t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_LARGE_MTU_BUF*sizeof(u32),
+                    FL_MTU_LARGE_BUFSIZE(adap));
+
+       /*
+        * Note that the SGE Ingress Packet Count Interrupt Threshold and
+        * Timer Holdoff values must be supplied by our caller.
+        */
        t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD,
                     THRESHOLD_0(s->counter_val[0]) |
                     THRESHOLD_1(s->counter_val[1]) |
@@ -2449,14 +2622,54 @@ void t4_sge_init(struct adapter *adap)
                     TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
                     TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
        t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3,
-                    TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[2])) |
-                    TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[3])));
+                    TIMERVALUE2(us_to_core_ticks(adap, s->timer_val[2])) |
+                    TIMERVALUE3(us_to_core_ticks(adap, s->timer_val[3])));
        t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5,
-                    TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[4])) |
-                    TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[5])));
+                    TIMERVALUE4(us_to_core_ticks(adap, s->timer_val[4])) |
+                    TIMERVALUE5(us_to_core_ticks(adap, s->timer_val[5])));
+
+       return 0;
+}
+
+int t4_sge_init(struct adapter *adap)
+{
+       struct sge *s = &adap->sge;
+       u32 sge_control;
+       int ret;
+
+       /*
+        * Ingress Padding Boundary and Egress Status Page Size are set up by
+        * t4_fixup_host_params().
+        */
+       sge_control = t4_read_reg(adap, SGE_CONTROL);
+       s->pktshift = PKTSHIFT_GET(sge_control);
+       s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64;
+       s->fl_align = 1 << (INGPADBOUNDARY_GET(sge_control) +
+                           X_INGPADBOUNDARY_SHIFT);
+
+       if (adap->flags & USING_SOFT_PARAMS)
+               ret = t4_sge_init_soft(adap);
+       else
+               ret = t4_sge_init_hard(adap);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * A FL with <= fl_starve_thres buffers is starving and a periodic
+        * timer will attempt to refill it.  This needs to be larger than the
+        * SGE's Egress Congestion Threshold.  If it isn't, then we can get
+        * stuck waiting for new packets while the SGE is waiting for us to
+        * give it more Free List entries.  (Note that the SGE's Egress
+        * Congestion Threshold is in units of 2 Free List pointers.)
+        */
+       s->fl_starve_thres
+               = EGRTHRESHOLD_GET(t4_read_reg(adap, SGE_CONM_CTRL))*2 + 1;
+
        setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
        setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
        s->starve_thres = core_ticks_per_usec(adap) * 1000000;  /* 1 s */
        s->idma_state[0] = s->idma_state[1] = 0;
        spin_lock_init(&s->intrq_lock);
+
+       return 0;
 }
index af16013231733212e0e2ca3642d9910799bce118..35b81d8b59e90707fdb37518f76300871b5fd627 100644 (file)
@@ -120,6 +120,28 @@ static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
        }
 }
 
+/**
+ *     t4_write_indirect - write indirectly addressed registers
+ *     @adap: the adapter
+ *     @addr_reg: register holding the indirect addresses
+ *     @data_reg: register holding the value for the indirect registers
+ *     @vals: values to write
+ *     @nregs: how many indirect registers to write
+ *     @start_idx: address of first indirect register to write
+ *
+ *     Writes a sequential block of registers that are accessed indirectly
+ *     through an address/data register pair.
+ */
+void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
+                      unsigned int data_reg, const u32 *vals,
+                      unsigned int nregs, unsigned int start_idx)
+{
+       while (nregs--) {
+               t4_write_reg(adap, addr_reg, start_idx++);
+               t4_write_reg(adap, data_reg, *vals++);
+       }
+}
+
 /*
  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
  */
@@ -330,6 +352,143 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
        return 0;
 }
 
+/*
+ *     t4_mem_win_rw - read/write memory through PCIE memory window
+ *     @adap: the adapter
+ *     @addr: address of first byte requested
+ *     @data: MEMWIN0_APERTURE bytes of data containing the requested address
+ *     @dir: direction of transfer 1 => read, 0 => write
+ *
+ *     Read/write MEMWIN0_APERTURE bytes of data from MC starting at a
+ *     MEMWIN0_APERTURE-byte-aligned address that covers the requested
+ *     address @addr.
+ */
+static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
+{
+       int i;
+
+       /*
+        * Setup offset into PCIE memory window.  Address must be a
+        * MEMWIN0_APERTURE-byte-aligned address.  (Read back MA register to
+        * ensure that changes propagate before we attempt to use the new
+        * values.)
+        */
+       t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET,
+                    addr & ~(MEMWIN0_APERTURE - 1));
+       t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
+
+       /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
+       for (i = 0; i < MEMWIN0_APERTURE; i = i+0x4) {
+               if (dir)
+                       *data++ = t4_read_reg(adap, (MEMWIN0_BASE + i));
+               else
+                       t4_write_reg(adap, (MEMWIN0_BASE + i), *data++);
+       }
+
+       return 0;
+}
+
+/**
+ *     t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
+ *     @adap: the adapter
+ *     @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
+ *     @addr: address within indicated memory type
+ *     @len: amount of memory to transfer
+ *     @buf: host memory buffer
+ *     @dir: direction of transfer 1 => read, 0 => write
+ *
+ *     Reads/writes an [almost] arbitrary memory region in the firmware: the
+ *     firmware memory address, length and host buffer must be aligned on
+ *     32-bit boudaries.  The memory is transferred as a raw byte sequence
+ *     from/to the firmware's memory.  If this memory contains data
+ *     structures which contain multi-byte integers, it's the callers
+ *     responsibility to perform appropriate byte order conversions.
+ */
+static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
+                       __be32 *buf, int dir)
+{
+       u32 pos, start, end, offset, memoffset;
+       int ret;
+
+       /*
+        * Argument sanity checks ...
+        */
+       if ((addr & 0x3) || (len & 0x3))
+               return -EINVAL;
+
+       /*
+        * Offset into the region of memory which is being accessed
+        * MEM_EDC0 = 0
+        * MEM_EDC1 = 1
+        * MEM_MC   = 2
+        */
+       memoffset = (mtype * (5 * 1024 * 1024));
+
+       /* Determine the PCIE_MEM_ACCESS_OFFSET */
+       addr = addr + memoffset;
+
+       /*
+        * The underlaying EDC/MC read routines read MEMWIN0_APERTURE bytes
+        * at a time so we need to round down the start and round up the end.
+        * We'll start copying out of the first line at (addr - start) a word
+        * at a time.
+        */
+       start = addr & ~(MEMWIN0_APERTURE-1);
+       end = (addr + len + MEMWIN0_APERTURE-1) & ~(MEMWIN0_APERTURE-1);
+       offset = (addr - start)/sizeof(__be32);
+
+       for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) {
+               __be32 data[MEMWIN0_APERTURE/sizeof(__be32)];
+
+               /*
+                * If we're writing, copy the data from the caller's memory
+                * buffer
+                */
+               if (!dir) {
+                       /*
+                        * If we're doing a partial write, then we need to do
+                        * a read-modify-write ...
+                        */
+                       if (offset || len < MEMWIN0_APERTURE) {
+                               ret = t4_mem_win_rw(adap, pos, data, 1);
+                               if (ret)
+                                       return ret;
+                       }
+                       while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
+                              len > 0) {
+                               data[offset++] = *buf++;
+                               len -= sizeof(__be32);
+                       }
+               }
+
+               /*
+                * Transfer a block of memory and bail if there's an error.
+                */
+               ret = t4_mem_win_rw(adap, pos, data, dir);
+               if (ret)
+                       return ret;
+
+               /*
+                * If we're reading, copy the data into the caller's memory
+                * buffer.
+                */
+               if (dir)
+                       while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
+                              len > 0) {
+                               *buf++ = data[offset++];
+                               len -= sizeof(__be32);
+                       }
+       }
+
+       return 0;
+}
+
+int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
+                   __be32 *buf)
+{
+       return t4_memory_rw(adap, mtype, addr, len, buf, 0);
+}
+
 #define EEPROM_STAT_ADDR   0x7bfc
 #define VPD_BASE           0
 #define VPD_LEN            512
@@ -355,8 +514,9 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable)
  *
  *     Reads card parameters stored in VPD EEPROM.
  */
-static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
+int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
 {
+       u32 cclk_param, cclk_val;
        int i, ret;
        int ec, sn;
        u8 vpd[VPD_LEN], csum;
@@ -418,6 +578,19 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
        i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
        memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
        strim(p->sn);
+
+       /*
+        * Ask firmware for the Core Clock since it knows how to translate the
+        * Reference Clock ('V2') VPD field into a Core Clock value ...
+        */
+       cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+                     FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
+       ret = t4_query_params(adapter, adapter->mbox, 0, 0,
+                             1, &cclk_param, &cclk_val);
+       if (ret)
+               return ret;
+       p->cclk = cclk_val;
+
        return 0;
 }
 
@@ -717,6 +890,77 @@ static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
        return ret;
 }
 
+/**
+ *     t4_flash_cfg_addr - return the address of the flash configuration file
+ *     @adapter: the adapter
+ *
+ *     Return the address within the flash where the Firmware Configuration
+ *     File is stored.
+ */
+unsigned int t4_flash_cfg_addr(struct adapter *adapter)
+{
+       if (adapter->params.sf_size == 0x100000)
+               return FLASH_FPGA_CFG_START;
+       else
+               return FLASH_CFG_START;
+}
+
+/**
+ *     t4_load_cfg - download config file
+ *     @adap: the adapter
+ *     @cfg_data: the cfg text file to write
+ *     @size: text file size
+ *
+ *     Write the supplied config text file to the card's serial flash.
+ */
+int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
+{
+       int ret, i, n;
+       unsigned int addr;
+       unsigned int flash_cfg_start_sec;
+       unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
+
+       addr = t4_flash_cfg_addr(adap);
+       flash_cfg_start_sec = addr / SF_SEC_SIZE;
+
+       if (size > FLASH_CFG_MAX_SIZE) {
+               dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
+                       FLASH_CFG_MAX_SIZE);
+               return -EFBIG;
+       }
+
+       i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,    /* # of sectors spanned */
+                        sf_sec_size);
+       ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
+                                    flash_cfg_start_sec + i - 1);
+       /*
+        * If size == 0 then we're simply erasing the FLASH sectors associated
+        * with the on-adapter Firmware Configuration File.
+        */
+       if (ret || size == 0)
+               goto out;
+
+       /* this will write to the flash up to SF_PAGE_SIZE at a time */
+       for (i = 0; i < size; i += SF_PAGE_SIZE) {
+               if ((size - i) <  SF_PAGE_SIZE)
+                       n = size - i;
+               else
+                       n = SF_PAGE_SIZE;
+               ret = t4_write_flash(adap, addr, n, cfg_data);
+               if (ret)
+                       goto out;
+
+               addr += SF_PAGE_SIZE;
+               cfg_data += SF_PAGE_SIZE;
+       }
+
+out:
+       if (ret)
+               dev_err(adap->pdev_dev, "config file %s failed %d\n",
+                       (size == 0 ? "clear" : "download"), ret);
+       return ret;
+}
+
 /**
  *     t4_load_fw - download firmware
  *     @adap: the adapter
@@ -1018,9 +1262,9 @@ static void sge_intr_handler(struct adapter *adapter)
                { ERR_INVALID_CIDX_INC,
                  "SGE GTS CIDX increment too large", -1, 0 },
                { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
-               { F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
-               { F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
-               { F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
+               { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
+               { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
+               { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
                { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
                  "SGE IQID > 1023 received CPL for FL", -1, 0 },
                { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
@@ -1520,7 +1764,7 @@ void t4_intr_enable(struct adapter *adapter)
                     ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
                     ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
                     ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
-                    F_DBFIFO_HP_INT | F_DBFIFO_LP_INT |
+                    DBFIFO_HP_INT | DBFIFO_LP_INT |
                     EGRESS_SIZE_ERR);
        t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
        t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
@@ -1716,6 +1960,23 @@ void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
        }
 }
 
+/**
+ *     t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
+ *     @adap: the adapter
+ *     @addr: the indirect TP register address
+ *     @mask: specifies the field within the register to modify
+ *     @val: new value for the field
+ *
+ *     Sets a field of an indirect TP register to the given value.
+ */
+void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
+                           unsigned int mask, unsigned int val)
+{
+       t4_write_reg(adap, TP_PIO_ADDR, addr);
+       val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask;
+       t4_write_reg(adap, TP_PIO_DATA, val);
+}
+
 /**
  *     init_cong_ctrl - initialize congestion control parameters
  *     @a: the alpha values for congestion control
@@ -2000,9 +2261,9 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
        struct fw_ldst_cmd c;
 
        memset(&c, 0, sizeof(c));
-       c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
-                           F_FW_CMD_WRITE |
-                           V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
+       c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
+                           FW_CMD_WRITE |
+                           FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
        c.cycles_to_len16 = htonl(FW_LEN16(c));
        c.u.addrval.addr = htonl(addr);
        c.u.addrval.val = htonl(val);
@@ -2033,8 +2294,8 @@ int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
        if ((addr & 3) || (len + off) > MEMWIN0_APERTURE)
                return -EINVAL;
 
-       t4_write_reg(adap, A_PCIE_MEM_ACCESS_OFFSET, addr & ~15);
-       t4_read_reg(adap, A_PCIE_MEM_ACCESS_OFFSET);
+       t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, addr & ~15);
+       t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
 
        for (i = 0; i < len; i += 4)
                *data++ = t4_read_reg(adap, (MEMWIN0_BASE + off + i));
@@ -2102,39 +2363,129 @@ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
 }
 
 /**
- *     t4_fw_hello - establish communication with FW
- *     @adap: the adapter
- *     @mbox: mailbox to use for the FW command
- *     @evt_mbox: mailbox to receive async FW events
- *     @master: specifies the caller's willingness to be the device master
- *     @state: returns the current device state
+ *      t4_fw_hello - establish communication with FW
+ *      @adap: the adapter
+ *      @mbox: mailbox to use for the FW command
+ *      @evt_mbox: mailbox to receive async FW events
+ *      @master: specifies the caller's willingness to be the device master
+ *     @state: returns the current device state (if non-NULL)
  *
- *     Issues a command to establish communication with FW.
+ *     Issues a command to establish communication with FW.  Returns either
+ *     an error (negative integer) or the mailbox of the Master PF.
  */
 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
                enum dev_master master, enum dev_state *state)
 {
        int ret;
        struct fw_hello_cmd c;
+       u32 v;
+       unsigned int master_mbox;
+       int retries = FW_CMD_HELLO_RETRIES;
 
+retry:
+       memset(&c, 0, sizeof(c));
        INIT_CMD(c, HELLO, WRITE);
        c.err_to_mbasyncnot = htonl(
                FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
                FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
-               FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) |
-               FW_HELLO_CMD_MBASYNCNOT(evt_mbox));
+               FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
+                                     FW_HELLO_CMD_MBMASTER_MASK) |
+               FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
+               FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |
+               FW_HELLO_CMD_CLEARINIT);
 
+       /*
+        * Issue the HELLO command to the firmware.  If it's not successful
+        * but indicates that we got a "busy" or "timeout" condition, retry
+        * the HELLO until we exhaust our retry limit.
+        */
        ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
-       if (ret == 0 && state) {
-               u32 v = ntohl(c.err_to_mbasyncnot);
-               if (v & FW_HELLO_CMD_INIT)
-                       *state = DEV_STATE_INIT;
-               else if (v & FW_HELLO_CMD_ERR)
+       if (ret < 0) {
+               if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
+                       goto retry;
+               return ret;
+       }
+
+       v = ntohl(c.err_to_mbasyncnot);
+       master_mbox = FW_HELLO_CMD_MBMASTER_GET(v);
+       if (state) {
+               if (v & FW_HELLO_CMD_ERR)
                        *state = DEV_STATE_ERR;
+               else if (v & FW_HELLO_CMD_INIT)
+                       *state = DEV_STATE_INIT;
                else
                        *state = DEV_STATE_UNINIT;
        }
-       return ret;
+
+       /*
+        * If we're not the Master PF then we need to wait around for the
+        * Master PF Driver to finish setting up the adapter.
+        *
+        * Note that we also do this wait if we're a non-Master-capable PF and
+        * there is no current Master PF; a Master PF may show up momentarily
+        * and we wouldn't want to fail pointlessly.  (This can happen when an
+        * OS loads lots of different drivers rapidly at the same time).  In
+        * this case, the Master PF returned by the firmware will be
+        * FW_PCIE_FW_MASTER_MASK so the test below will work ...
+        */
+       if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 &&
+           master_mbox != mbox) {
+               int waiting = FW_CMD_HELLO_TIMEOUT;
+
+               /*
+                * Wait for the firmware to either indicate an error or
+                * initialized state.  If we see either of these we bail out
+                * and report the issue to the caller.  If we exhaust the
+                * "hello timeout" and we haven't exhausted our retries, try
+                * again.  Otherwise bail with a timeout error.
+                */
+               for (;;) {
+                       u32 pcie_fw;
+
+                       msleep(50);
+                       waiting -= 50;
+
+                       /*
+                        * If neither Error nor Initialialized are indicated
+                        * by the firmware keep waiting till we exaust our
+                        * timeout ... and then retry if we haven't exhausted
+                        * our retries ...
+                        */
+                       pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
+                       if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) {
+                               if (waiting <= 0) {
+                                       if (retries-- > 0)
+                                               goto retry;
+
+                                       return -ETIMEDOUT;
+                               }
+                               continue;
+                       }
+
+                       /*
+                        * We either have an Error or Initialized condition
+                        * report errors preferentially.
+                        */
+                       if (state) {
+                               if (pcie_fw & FW_PCIE_FW_ERR)
+                                       *state = DEV_STATE_ERR;
+                               else if (pcie_fw & FW_PCIE_FW_INIT)
+                                       *state = DEV_STATE_INIT;
+                       }
+
+                       /*
+                        * If we arrived before a Master PF was selected and
+                        * there's not a valid Master PF, grab its identity
+                        * for our caller.
+                        */
+                       if (master_mbox == FW_PCIE_FW_MASTER_MASK &&
+                           (pcie_fw & FW_PCIE_FW_MASTER_VLD))
+                               master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw);
+                       break;
+               }
+       }
+
+       return master_mbox;
 }
 
 /**
@@ -2185,6 +2536,334 @@ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
+/**
+ *     t4_fw_halt - issue a reset/halt to FW and put uP into RESET
+ *     @adap: the adapter
+ *     @mbox: mailbox to use for the FW RESET command (if desired)
+ *     @force: force uP into RESET even if FW RESET command fails
+ *
+ *     Issues a RESET command to firmware (if desired) with a HALT indication
+ *     and then puts the microprocessor into RESET state.  The RESET command
+ *     will only be issued if a legitimate mailbox is provided (mbox <=
+ *     FW_PCIE_FW_MASTER_MASK).
+ *
+ *     This is generally used in order for the host to safely manipulate the
+ *     adapter without fear of conflicting with whatever the firmware might
+ *     be doing.  The only way out of this state is to RESTART the firmware
+ *     ...
+ */
+int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
+{
+       int ret = 0;
+
+       /*
+        * If a legitimate mailbox is provided, issue a RESET command
+        * with a HALT indication.
+        */
+       if (mbox <= FW_PCIE_FW_MASTER_MASK) {
+               struct fw_reset_cmd c;
+
+               memset(&c, 0, sizeof(c));
+               INIT_CMD(c, RESET, WRITE);
+               c.val = htonl(PIORST | PIORSTMODE);
+               c.halt_pkd = htonl(FW_RESET_CMD_HALT(1U));
+               ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+       }
+
+       /*
+        * Normally we won't complete the operation if the firmware RESET
+        * command fails but if our caller insists we'll go ahead and put the
+        * uP into RESET.  This can be useful if the firmware is hung or even
+        * missing ...  We'll have to take the risk of putting the uP into
+        * RESET without the cooperation of firmware in that case.
+        *
+        * We also force the firmware's HALT flag to be on in case we bypassed
+        * the firmware RESET command above or we're dealing with old firmware
+        * which doesn't have the HALT capability.  This will serve as a flag
+        * for the incoming firmware to know that it's coming out of a HALT
+        * rather than a RESET ... if it's new enough to understand that ...
+        */
+       if (ret == 0 || force) {
+               t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST);
+               t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT,
+                                FW_PCIE_FW_HALT);
+       }
+
+       /*
+        * And we always return the result of the firmware RESET command
+        * even when we force the uP into RESET ...
+        */
+       return ret;
+}
+
+/**
+ *     t4_fw_restart - restart the firmware by taking the uP out of RESET
+ *     @adap: the adapter
+ *     @reset: if we want to do a RESET to restart things
+ *
+ *     Restart firmware previously halted by t4_fw_halt().  On successful
+ *     return the previous PF Master remains as the new PF Master and there
+ *     is no need to issue a new HELLO command, etc.
+ *
+ *     We do this in two ways:
+ *
+ *      1. If we're dealing with newer firmware we'll simply want to take
+ *         the chip's microprocessor out of RESET.  This will cause the
+ *         firmware to start up from its start vector.  And then we'll loop
+ *         until the firmware indicates it's started again (PCIE_FW.HALT
+ *         reset to 0) or we timeout.
+ *
+ *      2. If we're dealing with older firmware then we'll need to RESET
+ *         the chip since older firmware won't recognize the PCIE_FW.HALT
+ *         flag and automatically RESET itself on startup.
+ */
+int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
+{
+       if (reset) {
+               /*
+                * Since we're directing the RESET instead of the firmware
+                * doing it automatically, we need to clear the PCIE_FW.HALT
+                * bit.
+                */
+               t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 0);
+
+               /*
+                * If we've been given a valid mailbox, first try to get the
+                * firmware to do the RESET.  If that works, great and we can
+                * return success.  Otherwise, if we haven't been given a
+                * valid mailbox or the RESET command failed, fall back to
+                * hitting the chip with a hammer.
+                */
+               if (mbox <= FW_PCIE_FW_MASTER_MASK) {
+                       t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
+                       msleep(100);
+                       if (t4_fw_reset(adap, mbox,
+                                       PIORST | PIORSTMODE) == 0)
+                               return 0;
+               }
+
+               t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE);
+               msleep(2000);
+       } else {
+               int ms;
+
+               t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
+               for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
+                       if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT))
+                               return 0;
+                       msleep(100);
+                       ms += 100;
+               }
+               return -ETIMEDOUT;
+       }
+       return 0;
+}
+
+/**
+ *     t4_fw_upgrade - perform all of the steps necessary to upgrade FW
+ *     @adap: the adapter
+ *     @mbox: mailbox to use for the FW RESET command (if desired)
+ *     @fw_data: the firmware image to write
+ *     @size: image size
+ *     @force: force upgrade even if firmware doesn't cooperate
+ *
+ *     Perform all of the steps necessary for upgrading an adapter's
+ *     firmware image.  Normally this requires the cooperation of the
+ *     existing firmware in order to halt all existing activities
+ *     but if an invalid mailbox token is passed in we skip that step
+ *     (though we'll still put the adapter microprocessor into RESET in
+ *     that case).
+ *
+ *     On successful return the new firmware will have been loaded and
+ *     the adapter will have been fully RESET losing all previous setup
+ *     state.  On unsuccessful return the adapter may be completely hosed ...
+ *     positive errno indicates that the adapter is ~probably~ intact, a
+ *     negative errno indicates that things are looking bad ...
+ */
+int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
+                 const u8 *fw_data, unsigned int size, int force)
+{
+       const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
+       int reset, ret;
+
+       ret = t4_fw_halt(adap, mbox, force);
+       if (ret < 0 && !force)
+               return ret;
+
+       ret = t4_load_fw(adap, fw_data, size);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * Older versions of the firmware don't understand the new
+        * PCIE_FW.HALT flag and so won't know to perform a RESET when they
+        * restart.  So for newly loaded older firmware we'll have to do the
+        * RESET for it so it starts up on a clean slate.  We can tell if
+        * the newly loaded firmware will handle this right by checking
+        * its header flags to see if it advertises the capability.
+        */
+       reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
+       return t4_fw_restart(adap, mbox, reset);
+}
+
+
+/**
+ *     t4_fw_config_file - setup an adapter via a Configuration File
+ *     @adap: the adapter
+ *     @mbox: mailbox to use for the FW command
+ *     @mtype: the memory type where the Configuration File is located
+ *     @maddr: the memory address where the Configuration File is located
+ *     @finiver: return value for CF [fini] version
+ *     @finicsum: return value for CF [fini] checksum
+ *     @cfcsum: return value for CF computed checksum
+ *
+ *     Issue a command to get the firmware to process the Configuration
+ *     File located at the specified mtype/maddress.  If the Configuration
+ *     File is processed successfully and return value pointers are
+ *     provided, the Configuration File "[fini] section version and
+ *     checksum values will be returned along with the computed checksum.
+ *     It's up to the caller to decide how it wants to respond to the
+ *     checksums not matching but it recommended that a prominant warning
+ *     be emitted in order to help people rapidly identify changed or
+ *     corrupted Configuration Files.
+ *
+ *     Also note that it's possible to modify things like "niccaps",
+ *     "toecaps",etc. between processing the Configuration File and telling
+ *     the firmware to use the new configuration.  Callers which want to
+ *     do this will need to "hand-roll" their own CAPS_CONFIGS commands for
+ *     Configuration Files if they want to do this.
+ */
+int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
+                     unsigned int mtype, unsigned int maddr,
+                     u32 *finiver, u32 *finicsum, u32 *cfcsum)
+{
+       struct fw_caps_config_cmd caps_cmd;
+       int ret;
+
+       /*
+        * Tell the firmware to process the indicated Configuration File.
+        * If there are no errors and the caller has provided return value
+        * pointers for the [fini] section version, checksum and computed
+        * checksum, pass those back to the caller.
+        */
+       memset(&caps_cmd, 0, sizeof(caps_cmd));
+       caps_cmd.op_to_write =
+               htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+                     FW_CMD_REQUEST |
+                     FW_CMD_READ);
+       caps_cmd.retval_len16 =
+               htonl(FW_CAPS_CONFIG_CMD_CFVALID |
+                     FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
+                     FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
+                     FW_LEN16(caps_cmd));
+       ret = t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd);
+       if (ret < 0)
+               return ret;
+
+       if (finiver)
+               *finiver = ntohl(caps_cmd.finiver);
+       if (finicsum)
+               *finicsum = ntohl(caps_cmd.finicsum);
+       if (cfcsum)
+               *cfcsum = ntohl(caps_cmd.cfcsum);
+
+       /*
+        * And now tell the firmware to use the configuration we just loaded.
+        */
+       caps_cmd.op_to_write =
+               htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+                     FW_CMD_REQUEST |
+                     FW_CMD_WRITE);
+       caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
+       return t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), NULL);
+}
+
+/**
+ *     t4_fixup_host_params - fix up host-dependent parameters
+ *     @adap: the adapter
+ *     @page_size: the host's Base Page Size
+ *     @cache_line_size: the host's Cache Line Size
+ *
+ *     Various registers in T4 contain values which are dependent on the
+ *     host's Base Page and Cache Line Sizes.  This function will fix all of
+ *     those registers with the appropriate values as passed in ...
+ */
+int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
+                        unsigned int cache_line_size)
+{
+       unsigned int page_shift = fls(page_size) - 1;
+       unsigned int sge_hps = page_shift - 10;
+       unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
+       unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
+       unsigned int fl_align_log = fls(fl_align) - 1;
+
+       t4_write_reg(adap, SGE_HOST_PAGE_SIZE,
+                    HOSTPAGESIZEPF0(sge_hps) |
+                    HOSTPAGESIZEPF1(sge_hps) |
+                    HOSTPAGESIZEPF2(sge_hps) |
+                    HOSTPAGESIZEPF3(sge_hps) |
+                    HOSTPAGESIZEPF4(sge_hps) |
+                    HOSTPAGESIZEPF5(sge_hps) |
+                    HOSTPAGESIZEPF6(sge_hps) |
+                    HOSTPAGESIZEPF7(sge_hps));
+
+       t4_set_reg_field(adap, SGE_CONTROL,
+                        INGPADBOUNDARY(INGPADBOUNDARY_MASK) |
+                        EGRSTATUSPAGESIZE_MASK,
+                        INGPADBOUNDARY(fl_align_log - 5) |
+                        EGRSTATUSPAGESIZE(stat_len != 64));
+
+       /*
+        * Adjust various SGE Free List Host Buffer Sizes.
+        *
+        * This is something of a crock since we're using fixed indices into
+        * the array which are also known by the sge.c code and the T4
+        * Firmware Configuration File.  We need to come up with a much better
+        * approach to managing this array.  For now, the first four entries
+        * are:
+        *
+        *   0: Host Page Size
+        *   1: 64KB
+        *   2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
+        *   3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
+        *
+        * For the single-MTU buffers in unpacked mode we need to include
+        * space for the SGE Control Packet Shift, 14 byte Ethernet header,
+        * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
+        * Padding boundry.  All of these are accommodated in the Factory
+        * Default Firmware Configuration File but we need to adjust it for
+        * this host's cache line size.
+        */
+       t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size);
+       t4_write_reg(adap, SGE_FL_BUFFER_SIZE2,
+                    (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1)
+                    & ~(fl_align-1));
+       t4_write_reg(adap, SGE_FL_BUFFER_SIZE3,
+                    (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1)
+                    & ~(fl_align-1));
+
+       t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12));
+
+       return 0;
+}
+
+/**
+ *     t4_fw_initialize - ask FW to initialize the device
+ *     @adap: the adapter
+ *     @mbox: mailbox to use for the FW command
+ *
+ *     Issues a command to FW to partially initialize the device.  This
+ *     performs initialization that generally doesn't depend on user input.
+ */
+int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
+{
+       struct fw_initialize_cmd c;
+
+       memset(&c, 0, sizeof(c));
+       INIT_CMD(c, INITIALIZE, WRITE);
+       return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
 /**
  *     t4_query_params - query FW or device parameters
  *     @adap: the adapter
@@ -2835,10 +3514,6 @@ int __devinit t4_prep_adapter(struct adapter *adapter)
                return ret;
        }
 
-       ret = get_vpd_params(adapter, &adapter->params.vpd);
-       if (ret < 0)
-               return ret;
-
        init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
 
        /*
@@ -2846,6 +3521,7 @@ int __devinit t4_prep_adapter(struct adapter *adapter)
         */
        adapter->params.nports = 1;
        adapter->params.portvec = 1;
+       adapter->params.vpd.cclk = 50000;
        return 0;
 }
 
index c26b455f37de54c1075a93e6d100282ef8c6e134..f534ed7e10e9db34b55a5be07f717d93e860a7df 100644 (file)
@@ -58,6 +58,7 @@ enum {
 
 enum {
        SF_PAGE_SIZE = 256,           /* serial flash page size */
+       SF_SEC_SIZE = 64 * 1024,      /* serial flash sector size */
 };
 
 enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */
@@ -137,4 +138,83 @@ struct rsp_ctrl {
 #define QINTR_CNT_EN       0x1
 #define QINTR_TIMER_IDX(x) ((x) << 1)
 #define QINTR_TIMER_IDX_GET(x) (((x) >> 1) & 0x7)
+
+/*
+ * Flash layout.
+ */
+#define FLASH_START(start)     ((start) * SF_SEC_SIZE)
+#define FLASH_MAX_SIZE(nsecs)  ((nsecs) * SF_SEC_SIZE)
+
+enum {
+       /*
+        * Various Expansion-ROM boot images, etc.
+        */
+       FLASH_EXP_ROM_START_SEC = 0,
+       FLASH_EXP_ROM_NSECS = 6,
+       FLASH_EXP_ROM_START = FLASH_START(FLASH_EXP_ROM_START_SEC),
+       FLASH_EXP_ROM_MAX_SIZE = FLASH_MAX_SIZE(FLASH_EXP_ROM_NSECS),
+
+       /*
+        * iSCSI Boot Firmware Table (iBFT) and other driver-related
+        * parameters ...
+        */
+       FLASH_IBFT_START_SEC = 6,
+       FLASH_IBFT_NSECS = 1,
+       FLASH_IBFT_START = FLASH_START(FLASH_IBFT_START_SEC),
+       FLASH_IBFT_MAX_SIZE = FLASH_MAX_SIZE(FLASH_IBFT_NSECS),
+
+       /*
+        * Boot configuration data.
+        */
+       FLASH_BOOTCFG_START_SEC = 7,
+       FLASH_BOOTCFG_NSECS = 1,
+       FLASH_BOOTCFG_START = FLASH_START(FLASH_BOOTCFG_START_SEC),
+       FLASH_BOOTCFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_BOOTCFG_NSECS),
+
+       /*
+        * Location of firmware image in FLASH.
+        */
+       FLASH_FW_START_SEC = 8,
+       FLASH_FW_NSECS = 8,
+       FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
+       FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
+
+       /*
+        * iSCSI persistent/crash information.
+        */
+       FLASH_ISCSI_CRASH_START_SEC = 29,
+       FLASH_ISCSI_CRASH_NSECS = 1,
+       FLASH_ISCSI_CRASH_START = FLASH_START(FLASH_ISCSI_CRASH_START_SEC),
+       FLASH_ISCSI_CRASH_MAX_SIZE = FLASH_MAX_SIZE(FLASH_ISCSI_CRASH_NSECS),
+
+       /*
+        * FCoE persistent/crash information.
+        */
+       FLASH_FCOE_CRASH_START_SEC = 30,
+       FLASH_FCOE_CRASH_NSECS = 1,
+       FLASH_FCOE_CRASH_START = FLASH_START(FLASH_FCOE_CRASH_START_SEC),
+       FLASH_FCOE_CRASH_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FCOE_CRASH_NSECS),
+
+       /*
+        * Location of Firmware Configuration File in FLASH.  Since the FPGA
+        * "FLASH" is smaller we need to store the Configuration File in a
+        * different location -- which will overlap the end of the firmware
+        * image if firmware ever gets that large ...
+        */
+       FLASH_CFG_START_SEC = 31,
+       FLASH_CFG_NSECS = 1,
+       FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC),
+       FLASH_CFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_CFG_NSECS),
+
+       FLASH_FPGA_CFG_START_SEC = 15,
+       FLASH_FPGA_CFG_START = FLASH_START(FLASH_FPGA_CFG_START_SEC),
+
+       /*
+        * Sectors 32-63 are reserved for FLASH failover.
+        */
+};
+
+#undef FLASH_START
+#undef FLASH_MAX_SIZE
+
 #endif /* __T4_HW_H */
index 111fc323f155c4dbab831ac1fab85e2228a3bb6a..a1a8b57200f607971f8f450495a8b7abe9478d5c 100644 (file)
 #define  CIDXINC_SHIFT     0
 #define  CIDXINC(x)        ((x) << CIDXINC_SHIFT)
 
+#define X_RXPKTCPLMODE_SPLIT     1
+#define X_INGPADBOUNDARY_SHIFT 5
+
 #define SGE_CONTROL 0x1008
 #define  DCASYSTYPE             0x00080000U
-#define  RXPKTCPLMODE           0x00040000U
-#define  EGRSTATUSPAGESIZE      0x00020000U
+#define  RXPKTCPLMODE_MASK      0x00040000U
+#define  RXPKTCPLMODE_SHIFT     18
+#define  RXPKTCPLMODE(x)        ((x) << RXPKTCPLMODE_SHIFT)
+#define  EGRSTATUSPAGESIZE_MASK  0x00020000U
+#define  EGRSTATUSPAGESIZE_SHIFT 17
+#define  EGRSTATUSPAGESIZE(x)    ((x) << EGRSTATUSPAGESIZE_SHIFT)
 #define  PKTSHIFT_MASK          0x00001c00U
 #define  PKTSHIFT_SHIFT         10
 #define  PKTSHIFT(x)            ((x) << PKTSHIFT_SHIFT)
 #define  GLOBALENABLE           0x00000001U
 
 #define SGE_HOST_PAGE_SIZE 0x100c
+
+#define  HOSTPAGESIZEPF7_MASK   0x0000000fU
+#define  HOSTPAGESIZEPF7_SHIFT  28
+#define  HOSTPAGESIZEPF7(x)     ((x) << HOSTPAGESIZEPF7_SHIFT)
+
+#define  HOSTPAGESIZEPF6_MASK   0x0000000fU
+#define  HOSTPAGESIZEPF6_SHIFT  24
+#define  HOSTPAGESIZEPF6(x)     ((x) << HOSTPAGESIZEPF6_SHIFT)
+
+#define  HOSTPAGESIZEPF5_MASK   0x0000000fU
+#define  HOSTPAGESIZEPF5_SHIFT  20
+#define  HOSTPAGESIZEPF5(x)     ((x) << HOSTPAGESIZEPF5_SHIFT)
+
+#define  HOSTPAGESIZEPF4_MASK   0x0000000fU
+#define  HOSTPAGESIZEPF4_SHIFT  16
+#define  HOSTPAGESIZEPF4(x)     ((x) << HOSTPAGESIZEPF4_SHIFT)
+
+#define  HOSTPAGESIZEPF3_MASK   0x0000000fU
+#define  HOSTPAGESIZEPF3_SHIFT  12
+#define  HOSTPAGESIZEPF3(x)     ((x) << HOSTPAGESIZEPF3_SHIFT)
+
+#define  HOSTPAGESIZEPF2_MASK   0x0000000fU
+#define  HOSTPAGESIZEPF2_SHIFT  8
+#define  HOSTPAGESIZEPF2(x)     ((x) << HOSTPAGESIZEPF2_SHIFT)
+
+#define  HOSTPAGESIZEPF1_MASK   0x0000000fU
+#define  HOSTPAGESIZEPF1_SHIFT  4
+#define  HOSTPAGESIZEPF1(x)     ((x) << HOSTPAGESIZEPF1_SHIFT)
+
 #define  HOSTPAGESIZEPF0_MASK   0x0000000fU
 #define  HOSTPAGESIZEPF0_SHIFT  0
 #define  HOSTPAGESIZEPF0(x)     ((x) << HOSTPAGESIZEPF0_SHIFT)
 #define SGE_INT_ENABLE3 0x1040
 #define SGE_FL_BUFFER_SIZE0 0x1044
 #define SGE_FL_BUFFER_SIZE1 0x1048
+#define SGE_FL_BUFFER_SIZE2 0x104c
+#define SGE_FL_BUFFER_SIZE3 0x1050
 #define SGE_INGRESS_RX_THRESHOLD 0x10a0
 #define  THRESHOLD_0_MASK   0x3f000000U
 #define  THRESHOLD_0_SHIFT  24
 #define  THRESHOLD_3(x)     ((x) << THRESHOLD_3_SHIFT)
 #define  THRESHOLD_3_GET(x) (((x) & THRESHOLD_3_MASK) >> THRESHOLD_3_SHIFT)
 
+#define SGE_CONM_CTRL 0x1094
+#define  EGRTHRESHOLD_MASK   0x00003f00U
+#define  EGRTHRESHOLDshift   8
+#define  EGRTHRESHOLD(x)     ((x) << EGRTHRESHOLDshift)
+#define  EGRTHRESHOLD_GET(x) (((x) & EGRTHRESHOLD_MASK) >> EGRTHRESHOLDshift)
+
 #define SGE_TIMER_VALUE_0_AND_1 0x10b8
 #define  TIMERVALUE0_MASK   0xffff0000U
 #define  TIMERVALUE0_SHIFT  16
 #define  TIMERVALUE1_GET(x) (((x) & TIMERVALUE1_MASK) >> TIMERVALUE1_SHIFT)
 
 #define SGE_TIMER_VALUE_2_AND_3 0x10bc
+#define  TIMERVALUE2_MASK   0xffff0000U
+#define  TIMERVALUE2_SHIFT  16
+#define  TIMERVALUE2(x)     ((x) << TIMERVALUE2_SHIFT)
+#define  TIMERVALUE2_GET(x) (((x) & TIMERVALUE2_MASK) >> TIMERVALUE2_SHIFT)
+#define  TIMERVALUE3_MASK   0x0000ffffU
+#define  TIMERVALUE3_SHIFT  0
+#define  TIMERVALUE3(x)     ((x) << TIMERVALUE3_SHIFT)
+#define  TIMERVALUE3_GET(x) (((x) & TIMERVALUE3_MASK) >> TIMERVALUE3_SHIFT)
+
 #define SGE_TIMER_VALUE_4_AND_5 0x10c0
+#define  TIMERVALUE4_MASK   0xffff0000U
+#define  TIMERVALUE4_SHIFT  16
+#define  TIMERVALUE4(x)     ((x) << TIMERVALUE4_SHIFT)
+#define  TIMERVALUE4_GET(x) (((x) & TIMERVALUE4_MASK) >> TIMERVALUE4_SHIFT)
+#define  TIMERVALUE5_MASK   0x0000ffffU
+#define  TIMERVALUE5_SHIFT  0
+#define  TIMERVALUE5(x)     ((x) << TIMERVALUE5_SHIFT)
+#define  TIMERVALUE5_GET(x) (((x) & TIMERVALUE5_MASK) >> TIMERVALUE5_SHIFT)
+
 #define SGE_DEBUG_INDEX 0x10cc
 #define SGE_DEBUG_DATA_HIGH 0x10d0
 #define SGE_DEBUG_DATA_LOW 0x10d4
 #define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
 
-#define S_LP_INT_THRESH    12
-#define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH)
 #define S_HP_INT_THRESH    28
+#define M_HP_INT_THRESH 0xfU
 #define V_HP_INT_THRESH(x) ((x) << S_HP_INT_THRESH)
+#define M_HP_COUNT 0x7ffU
+#define S_HP_COUNT 16
+#define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT)
+#define S_LP_INT_THRESH    12
+#define M_LP_INT_THRESH 0xfU
+#define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH)
+#define M_LP_COUNT 0x7ffU
+#define S_LP_COUNT 0
+#define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT)
 #define A_SGE_DBFIFO_STATUS 0x10a4
 
 #define S_ENABLE_DROP    13
 #define V_ENABLE_DROP(x) ((x) << S_ENABLE_DROP)
 #define F_ENABLE_DROP    V_ENABLE_DROP(1U)
-#define A_SGE_DOORBELL_CONTROL 0x10a8
-
-#define A_SGE_CTXT_CMD 0x11fc
-#define A_SGE_DBQ_CTXT_BADDR 0x1084
-
-#define A_SGE_PF_KDOORBELL 0x0
-
-#define S_QID 15
-#define V_QID(x) ((x) << S_QID)
-
-#define S_PIDX 0
-#define V_PIDX(x) ((x) << S_PIDX)
-
-#define M_LP_COUNT 0x7ffU
-#define S_LP_COUNT 0
-#define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT)
-
-#define M_HP_COUNT 0x7ffU
-#define S_HP_COUNT 16
-#define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT)
-
-#define A_SGE_INT_ENABLE3 0x1040
-
-#define S_DBFIFO_HP_INT 8
-#define V_DBFIFO_HP_INT(x) ((x) << S_DBFIFO_HP_INT)
-#define F_DBFIFO_HP_INT V_DBFIFO_HP_INT(1U)
-
-#define S_DBFIFO_LP_INT 7
-#define V_DBFIFO_LP_INT(x) ((x) << S_DBFIFO_LP_INT)
-#define F_DBFIFO_LP_INT V_DBFIFO_LP_INT(1U)
-
 #define S_DROPPED_DB 0
 #define V_DROPPED_DB(x) ((x) << S_DROPPED_DB)
 #define F_DROPPED_DB V_DROPPED_DB(1U)
+#define A_SGE_DOORBELL_CONTROL 0x10a8
 
-#define S_ERR_DROPPED_DB 18
-#define V_ERR_DROPPED_DB(x) ((x) << S_ERR_DROPPED_DB)
-#define F_ERR_DROPPED_DB V_ERR_DROPPED_DB(1U)
-
-#define A_PCIE_MEM_ACCESS_OFFSET 0x306c
-
-#define M_HP_INT_THRESH 0xfU
-#define M_LP_INT_THRESH 0xfU
+#define A_SGE_CTXT_CMD 0x11fc
+#define A_SGE_DBQ_CTXT_BADDR 0x1084
 
 #define PCIE_PF_CLI 0x44
 #define PCIE_INT_CAUSE 0x3004
 #define  WINDOW(x)       ((x) << WINDOW_SHIFT)
 #define PCIE_MEM_ACCESS_OFFSET 0x306c
 
+#define PCIE_FW 0x30b8
+
 #define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908
 #define  RNPP 0x80000000U
 #define  RPCP 0x20000000U
 #define  MEM_WRAP_CLIENT_NUM_MASK   0x0000000fU
 #define  MEM_WRAP_CLIENT_NUM_SHIFT  0
 #define  MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT)
-
+#define MA_PCIE_FW 0x30b8
 #define MA_PARITY_ERROR_STATUS 0x77f4
 
 #define EDC_0_BASE_ADDR 0x7900
 
 #define CIM_BOOT_CFG 0x7b00
 #define  BOOTADDR_MASK 0xffffff00U
+#define  UPCRST        0x1U
 
 #define CIM_PF_MAILBOX_DATA 0x240
 #define CIM_PF_MAILBOX_CTRL 0x280
 #define  VLANEXTENABLE_MASK  0x0000f000U
 #define  VLANEXTENABLE_SHIFT 12
 
+#define TP_GLOBAL_CONFIG 0x7d08
+#define  FIVETUPLELOOKUP_SHIFT  17
+#define  FIVETUPLELOOKUP_MASK   0x00060000U
+#define  FIVETUPLELOOKUP(x)     ((x) << FIVETUPLELOOKUP_SHIFT)
+#define  FIVETUPLELOOKUP_GET(x) (((x) & FIVETUPLELOOKUP_MASK) >> \
+                               FIVETUPLELOOKUP_SHIFT)
+
 #define TP_PARA_REG2 0x7d68
 #define  MAXRXDATA_MASK    0xffff0000U
 #define  MAXRXDATA_SHIFT   16
 #define  TIMERRESOLUTION_MASK   0x00ff0000U
 #define  TIMERRESOLUTION_SHIFT  16
 #define  TIMERRESOLUTION_GET(x) (((x) & TIMERRESOLUTION_MASK) >> TIMERRESOLUTION_SHIFT)
+#define  DELAYEDACKRESOLUTION_MASK 0x000000ffU
+#define  DELAYEDACKRESOLUTION_SHIFT     0
+#define  DELAYEDACKRESOLUTION_GET(x) \
+       (((x) & DELAYEDACKRESOLUTION_MASK) >> DELAYEDACKRESOLUTION_SHIFT)
 
 #define TP_SHIFT_CNT 0x7dc0
+#define  SYNSHIFTMAX_SHIFT         24
+#define  SYNSHIFTMAX_MASK          0xff000000U
+#define  SYNSHIFTMAX(x)            ((x) << SYNSHIFTMAX_SHIFT)
+#define  SYNSHIFTMAX_GET(x)        (((x) & SYNSHIFTMAX_MASK) >> \
+                                  SYNSHIFTMAX_SHIFT)
+#define  RXTSHIFTMAXR1_SHIFT       20
+#define  RXTSHIFTMAXR1_MASK        0x00f00000U
+#define  RXTSHIFTMAXR1(x)          ((x) << RXTSHIFTMAXR1_SHIFT)
+#define  RXTSHIFTMAXR1_GET(x)      (((x) & RXTSHIFTMAXR1_MASK) >> \
+                                  RXTSHIFTMAXR1_SHIFT)
+#define  RXTSHIFTMAXR2_SHIFT       16
+#define  RXTSHIFTMAXR2_MASK        0x000f0000U
+#define  RXTSHIFTMAXR2(x)          ((x) << RXTSHIFTMAXR2_SHIFT)
+#define  RXTSHIFTMAXR2_GET(x)      (((x) & RXTSHIFTMAXR2_MASK) >> \
+                                  RXTSHIFTMAXR2_SHIFT)
+#define  PERSHIFTBACKOFFMAX_SHIFT  12
+#define  PERSHIFTBACKOFFMAX_MASK   0x0000f000U
+#define  PERSHIFTBACKOFFMAX(x)     ((x) << PERSHIFTBACKOFFMAX_SHIFT)
+#define  PERSHIFTBACKOFFMAX_GET(x) (((x) & PERSHIFTBACKOFFMAX_MASK) >> \
+                                  PERSHIFTBACKOFFMAX_SHIFT)
+#define  PERSHIFTMAX_SHIFT         8
+#define  PERSHIFTMAX_MASK          0x00000f00U
+#define  PERSHIFTMAX(x)            ((x) << PERSHIFTMAX_SHIFT)
+#define  PERSHIFTMAX_GET(x)        (((x) & PERSHIFTMAX_MASK) >> \
+                                  PERSHIFTMAX_SHIFT)
+#define  KEEPALIVEMAXR1_SHIFT      4
+#define  KEEPALIVEMAXR1_MASK       0x000000f0U
+#define  KEEPALIVEMAXR1(x)         ((x) << KEEPALIVEMAXR1_SHIFT)
+#define  KEEPALIVEMAXR1_GET(x)     (((x) & KEEPALIVEMAXR1_MASK) >> \
+                                  KEEPALIVEMAXR1_SHIFT)
+#define KEEPALIVEMAXR2_SHIFT       0
+#define KEEPALIVEMAXR2_MASK        0x0000000fU
+#define KEEPALIVEMAXR2(x)          ((x) << KEEPALIVEMAXR2_SHIFT)
+#define KEEPALIVEMAXR2_GET(x)      (((x) & KEEPALIVEMAXR2_MASK) >> \
+                                  KEEPALIVEMAXR2_SHIFT)
 
 #define TP_CCTRL_TABLE 0x7ddc
 #define TP_MTU_TABLE 0x7de4
 #define TP_INT_CAUSE 0x7e74
 #define  FLMTXFLSTEMPTY 0x40000000U
 
+#define TP_VLAN_PRI_MAP 0x140
+#define  FRAGMENTATION_SHIFT 9
+#define  FRAGMENTATION_MASK  0x00000200U
+#define  MPSHITTYPE_MASK     0x00000100U
+#define  MACMATCH_MASK       0x00000080U
+#define  ETHERTYPE_MASK      0x00000040U
+#define  PROTOCOL_MASK       0x00000020U
+#define  TOS_MASK            0x00000010U
+#define  VLAN_MASK           0x00000008U
+#define  VNIC_ID_MASK        0x00000004U
+#define  PORT_MASK           0x00000002U
+#define  FCOE_SHIFT          0
+#define  FCOE_MASK           0x00000001U
+
 #define TP_INGRESS_CONFIG 0x141
 #define  VNIC                0x00000800U
 #define  CSUM_HAS_PSEUDO_HDR 0x00000400U
index ad53f796b574ca6668d8f2af4658aa222cc1b50d..a6364632b490a7d1a7e57bbb086c74afcbe3181a 100644 (file)
@@ -79,6 +79,8 @@ struct fw_wr_hdr {
 #define FW_WR_FLOWID(x)        ((x) << 8)
 #define FW_WR_LEN16(x) ((x) << 0)
 
+#define HW_TPL_FR_MT_PR_IV_P_FC         0X32B
+
 struct fw_ulptx_wr {
        __be32 op_to_compl;
        __be32 flowid_len16;
@@ -155,6 +157,17 @@ struct fw_eth_tx_pkt_vm_wr {
 
 #define FW_CMD_MAX_TIMEOUT 3000
 
+/*
+ * If a host driver does a HELLO and discovers that there's already a MASTER
+ * selected, we may have to wait for that MASTER to finish issuing RESET,
+ * configuration and INITIALIZE commands.  Also, there's a possibility that
+ * our own HELLO may get lost if it happens right as the MASTER is issuign a
+ * RESET command, so we need to be willing to make a few retries of our HELLO.
+ */
+#define FW_CMD_HELLO_TIMEOUT   (3 * FW_CMD_MAX_TIMEOUT)
+#define FW_CMD_HELLO_RETRIES   3
+
+
 enum fw_cmd_opcodes {
        FW_LDST_CMD                    = 0x01,
        FW_RESET_CMD                   = 0x03,
@@ -304,7 +317,17 @@ struct fw_reset_cmd {
        __be32 op_to_write;
        __be32 retval_len16;
        __be32 val;
-       __be32 r3;
+       __be32 halt_pkd;
+};
+
+#define FW_RESET_CMD_HALT_SHIFT    31
+#define FW_RESET_CMD_HALT_MASK     0x1
+#define FW_RESET_CMD_HALT(x)       ((x) << FW_RESET_CMD_HALT_SHIFT)
+#define FW_RESET_CMD_HALT_GET(x)  \
+       (((x) >> FW_RESET_CMD_HALT_SHIFT) & FW_RESET_CMD_HALT_MASK)
+
+enum fw_hellow_cmd {
+       fw_hello_cmd_stage_os           = 0x0
 };
 
 struct fw_hello_cmd {
@@ -315,8 +338,14 @@ struct fw_hello_cmd {
 #define FW_HELLO_CMD_INIT          (1U << 30)
 #define FW_HELLO_CMD_MASTERDIS(x)   ((x) << 29)
 #define FW_HELLO_CMD_MASTERFORCE(x) ((x) << 28)
-#define FW_HELLO_CMD_MBMASTER(x)    ((x) << 24)
+#define FW_HELLO_CMD_MBMASTER_MASK   0xfU
+#define FW_HELLO_CMD_MBMASTER_SHIFT  24
+#define FW_HELLO_CMD_MBMASTER(x)     ((x) << FW_HELLO_CMD_MBMASTER_SHIFT)
+#define FW_HELLO_CMD_MBMASTER_GET(x) \
+       (((x) >> FW_HELLO_CMD_MBMASTER_SHIFT) & FW_HELLO_CMD_MBMASTER_MASK)
 #define FW_HELLO_CMD_MBASYNCNOT(x)  ((x) << 20)
+#define FW_HELLO_CMD_STAGE(x)       ((x) << 17)
+#define FW_HELLO_CMD_CLEARINIT      (1U << 16)
        __be32 fwrev;
 };
 
@@ -401,6 +430,14 @@ enum fw_caps_config_fcoe {
        FW_CAPS_CONFIG_FCOE_TARGET      = 0x00000002,
 };
 
+enum fw_memtype_cf {
+       FW_MEMTYPE_CF_EDC0              = 0x0,
+       FW_MEMTYPE_CF_EDC1              = 0x1,
+       FW_MEMTYPE_CF_EXTMEM            = 0x2,
+       FW_MEMTYPE_CF_FLASH             = 0x4,
+       FW_MEMTYPE_CF_INTERNAL          = 0x5,
+};
+
 struct fw_caps_config_cmd {
        __be32 op_to_write;
        __be32 retval_len16;
@@ -416,10 +453,15 @@ struct fw_caps_config_cmd {
        __be16 r4;
        __be16 iscsicaps;
        __be16 fcoecaps;
-       __be32 r5;
-       __be64 r6;
+       __be32 cfcsum;
+       __be32 finiver;
+       __be32 finicsum;
 };
 
+#define FW_CAPS_CONFIG_CMD_CFVALID          (1U << 27)
+#define FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x)    ((x) << 24)
+#define FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x) ((x) << 16)
+
 /*
  * params command mnemonics
  */
@@ -451,6 +493,7 @@ enum fw_params_param_dev {
        FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A,
        FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
        FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
+       FW_PARAMS_PARAM_DEV_CF = 0x0D,
 };
 
 /*
@@ -492,6 +535,8 @@ enum fw_params_param_pfvf {
        FW_PARAMS_PARAM_PFVF_IQFLINT_END = 0x2A,
        FW_PARAMS_PARAM_PFVF_EQ_START   = 0x2B,
        FW_PARAMS_PARAM_PFVF_EQ_END     = 0x2C,
+       FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_START = 0x2D,
+       FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_END = 0x2E
 };
 
 /*
@@ -507,8 +552,16 @@ enum fw_params_param_dmaq {
 
 #define FW_PARAMS_MNEM(x)      ((x) << 24)
 #define FW_PARAMS_PARAM_X(x)   ((x) << 16)
-#define FW_PARAMS_PARAM_Y(x)   ((x) << 8)
-#define FW_PARAMS_PARAM_Z(x)   ((x) << 0)
+#define FW_PARAMS_PARAM_Y_SHIFT  8
+#define FW_PARAMS_PARAM_Y_MASK   0xffU
+#define FW_PARAMS_PARAM_Y(x)     ((x) << FW_PARAMS_PARAM_Y_SHIFT)
+#define FW_PARAMS_PARAM_Y_GET(x) (((x) >> FW_PARAMS_PARAM_Y_SHIFT) &\
+               FW_PARAMS_PARAM_Y_MASK)
+#define FW_PARAMS_PARAM_Z_SHIFT  0
+#define FW_PARAMS_PARAM_Z_MASK   0xffu
+#define FW_PARAMS_PARAM_Z(x)     ((x) << FW_PARAMS_PARAM_Z_SHIFT)
+#define FW_PARAMS_PARAM_Z_GET(x) (((x) >> FW_PARAMS_PARAM_Z_SHIFT) &\
+               FW_PARAMS_PARAM_Z_MASK)
 #define FW_PARAMS_PARAM_XYZ(x) ((x) << 0)
 #define FW_PARAMS_PARAM_YZ(x)  ((x) << 0)
 
@@ -1599,6 +1652,16 @@ struct fw_debug_cmd {
        } u;
 };
 
+#define FW_PCIE_FW_ERR           (1U << 31)
+#define FW_PCIE_FW_INIT          (1U << 30)
+#define FW_PCIE_FW_HALT          (1U << 29)
+#define FW_PCIE_FW_MASTER_VLD    (1U << 15)
+#define FW_PCIE_FW_MASTER_MASK   0x7
+#define FW_PCIE_FW_MASTER_SHIFT  12
+#define FW_PCIE_FW_MASTER(x)     ((x) << FW_PCIE_FW_MASTER_SHIFT)
+#define FW_PCIE_FW_MASTER_GET(x) (((x) >> FW_PCIE_FW_MASTER_SHIFT) & \
+                                FW_PCIE_FW_MASTER_MASK)
+
 struct fw_hdr {
        u8 ver;
        u8 reserved1;
@@ -1613,7 +1676,11 @@ struct fw_hdr {
        u8 intfver_iscsi;
        u8 intfver_fcoe;
        u8 reserved2;
-       __be32  reserved3[27];
+       __u32   reserved3;
+       __u32   reserved4;
+       __u32   reserved5;
+       __be32  flags;
+       __be32  reserved6[23];
 };
 
 #define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff)
@@ -1621,18 +1688,8 @@ struct fw_hdr {
 #define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff)
 #define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff)
 
-#define S_FW_CMD_OP 24
-#define V_FW_CMD_OP(x) ((x) << S_FW_CMD_OP)
-
-#define S_FW_CMD_REQUEST 23
-#define V_FW_CMD_REQUEST(x) ((x) << S_FW_CMD_REQUEST)
-#define F_FW_CMD_REQUEST V_FW_CMD_REQUEST(1U)
-
-#define S_FW_CMD_WRITE 21
-#define V_FW_CMD_WRITE(x) ((x) << S_FW_CMD_WRITE)
-#define F_FW_CMD_WRITE V_FW_CMD_WRITE(1U)
-
-#define S_FW_LDST_CMD_ADDRSPACE 0
-#define V_FW_LDST_CMD_ADDRSPACE(x) ((x) << S_FW_LDST_CMD_ADDRSPACE)
+enum fw_hdr_flags {
+       FW_HDR_FLAGS_RESET_HALT = 0x00000001,
+};
 
 #endif /* _T4FW_INTERFACE_H_ */
index 8877fbfefb639ed5e7d6282981dd6f722dd6f5f5..f16745f4b36bf2b2c30bbe8740029aa4327b5020 100644 (file)
@@ -2421,7 +2421,7 @@ int t4vf_sge_init(struct adapter *adapter)
                        fl0, fl1);
                return -EINVAL;
        }
-       if ((sge_params->sge_control & RXPKTCPLMODE) == 0) {
+       if ((sge_params->sge_control & RXPKTCPLMODE_MASK) == 0) {
                dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
                return -EINVAL;
        }
@@ -2431,7 +2431,8 @@ int t4vf_sge_init(struct adapter *adapter)
         */
        if (fl1)
                FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT;
-       STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE) ? 128 : 64);
+       STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
+                   ? 128 : 64);
        PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control);
        FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
                         SGE_INGPADBOUNDARY_SHIFT);
index d266c86a53f71245a3d344c2169696949f9afcb0..cf4c05bdf5fe71262abf8fd4ac10e11e0cb0240a 100644 (file)
@@ -110,6 +110,7 @@ static inline char *nic_name(struct pci_dev *pdev)
 #define MAX_RX_POST            BE_NAPI_WEIGHT /* Frags posted at a time */
 #define RX_FRAGS_REFILL_WM     (RX_Q_LEN - MAX_RX_POST)
 
+#define MAX_VFS                        30 /* Max VFs supported by BE3 FW */
 #define FW_VER_LEN             32
 
 struct be_dma_mem {
@@ -336,7 +337,6 @@ struct phy_info {
        u16 auto_speeds_supported;
        u16 fixed_speeds_supported;
        int link_speed;
-       int forced_port_speed;
        u32 dac_cable_len;
        u32 advertising;
        u32 supported;
index 8c63d06ab12b6ccf899fae8fa13f75904beb22c1..af60bb26e33023ac523b672f76e68dcbdd172f92 100644 (file)
@@ -120,7 +120,7 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
 
                if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
                        dev_warn(&adapter->pdev->dev,
-                                "opcode %d-%d is not permitted\n",
+                                "VF is not privileged to issue opcode %d-%d\n",
                                 opcode, subsystem);
                } else {
                        extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
@@ -165,14 +165,13 @@ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
        }
 }
 
-/* Grp5 QOS Speed evt */
+/* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
                struct be_async_event_grp5_qos_link_speed *evt)
 {
-       if (evt->physical_port == adapter->port_num) {
-               /* qos_link_speed is in units of 10 Mbps */
-               adapter->phy.link_speed = evt->qos_link_speed * 10;
-       }
+       if (adapter->phy.link_speed >= 0 &&
+           evt->physical_port == adapter->port_num)
+               adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
 }
 
 /*Grp5 PVID evt*/
@@ -717,7 +716,7 @@ int be_cmd_eq_create(struct be_adapter *adapter,
 
 /* Use MCC */
 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
-                       u8 type, bool permanent, u32 if_handle, u32 pmac_id)
+                         bool permanent, u32 if_handle, u32 pmac_id)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_mac_query *req;
@@ -734,7 +733,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
                OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
-       req->type = type;
+       req->type = MAC_ADDRESS_TYPE_NETWORK;
        if (permanent) {
                req->permanent = 1;
        } else {
@@ -1326,9 +1325,28 @@ err:
        return status;
 }
 
-/* Uses synchronous mcc */
-int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
-                            u16 *link_speed, u8 *link_status, u32 dom)
+static int be_mac_to_link_speed(int mac_speed)
+{
+       switch (mac_speed) {
+       case PHY_LINK_SPEED_ZERO:
+               return 0;
+       case PHY_LINK_SPEED_10MBPS:
+               return 10;
+       case PHY_LINK_SPEED_100MBPS:
+               return 100;
+       case PHY_LINK_SPEED_1GBPS:
+               return 1000;
+       case PHY_LINK_SPEED_10GBPS:
+               return 10000;
+       }
+       return 0;
+}
+
+/* Uses synchronous mcc
+ * Returns link_speed in Mbps
+ */
+int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
+                            u8 *link_status, u32 dom)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_link_status *req;
@@ -1357,11 +1375,13 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
        status = be_mcc_notify_wait(adapter);
        if (!status) {
                struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
-               if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
-                       if (link_speed)
-                               *link_speed = le16_to_cpu(resp->link_speed);
-                       if (mac_speed)
-                               *mac_speed = resp->mac_speed;
+               if (link_speed) {
+                       *link_speed = resp->link_speed ?
+                                     le16_to_cpu(resp->link_speed) * 10 :
+                                     be_mac_to_link_speed(resp->mac_speed);
+
+                       if (!resp->logical_link_status)
+                               *link_speed = 0;
                }
                if (link_status)
                        *link_status = resp->logical_link_status;
@@ -2405,6 +2425,9 @@ int be_cmd_req_native_mode(struct be_adapter *adapter)
                struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
                adapter->be3_native = le32_to_cpu(resp->cap_flags) &
                                        CAPABILITY_BE3_NATIVE_ERX_API;
+               if (!adapter->be3_native)
+                       dev_warn(&adapter->pdev->dev,
+                                "adapter not in advanced mode\n");
        }
 err:
        mutex_unlock(&adapter->mbox_lock);
index 250f19b5f7b6c3887282f28af82dce11bfc8fd53..0936e21e3cff3d6cdf3767dbf1d0e708744f9b26 100644 (file)
@@ -1687,7 +1687,7 @@ struct be_cmd_req_set_ext_fat_caps {
 extern int be_pci_fnum_get(struct be_adapter *adapter);
 extern int be_fw_wait_ready(struct be_adapter *adapter);
 extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
-                       u8 type, bool permanent, u32 if_handle, u32 pmac_id);
+                                bool permanent, u32 if_handle, u32 pmac_id);
 extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
                        u32 if_id, u32 *pmac_id, u32 domain);
 extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
@@ -1714,8 +1714,8 @@ extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
                        int type);
 extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
                        struct be_queue_info *q);
-extern int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
-                                   u16 *link_speed, u8 *link_status, u32 dom);
+extern int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
+                                   u8 *link_status, u32 dom);
 extern int be_cmd_reset(struct be_adapter *adapter);
 extern int be_cmd_get_stats(struct be_adapter *adapter,
                        struct be_dma_mem *nonemb_cmd);
index c0e700653f965ef204ba38286b33dae76289d897..8e6fb0ba6aa9631132686566859a8228c0c1fb86 100644 (file)
@@ -512,28 +512,6 @@ static u32 convert_to_et_setting(u32 if_type, u32 if_speeds)
        return val;
 }
 
-static int convert_to_et_speed(u32 be_speed)
-{
-       int et_speed = SPEED_10000;
-
-       switch (be_speed) {
-       case PHY_LINK_SPEED_10MBPS:
-               et_speed = SPEED_10;
-               break;
-       case PHY_LINK_SPEED_100MBPS:
-               et_speed = SPEED_100;
-               break;
-       case PHY_LINK_SPEED_1GBPS:
-               et_speed = SPEED_1000;
-               break;
-       case PHY_LINK_SPEED_10GBPS:
-               et_speed = SPEED_10000;
-               break;
-       }
-
-       return et_speed;
-}
-
 bool be_pause_supported(struct be_adapter *adapter)
 {
        return (adapter->phy.interface_type == PHY_TYPE_SFP_PLUS_10GB ||
@@ -544,27 +522,16 @@ bool be_pause_supported(struct be_adapter *adapter)
 static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
-       u8 port_speed = 0;
-       u16 link_speed = 0;
        u8 link_status;
-       u32 et_speed = 0;
+       u16 link_speed = 0;
        int status;
 
-       if (adapter->phy.link_speed < 0 || !(netdev->flags & IFF_UP)) {
-               if (adapter->phy.forced_port_speed < 0) {
-                       status = be_cmd_link_status_query(adapter, &port_speed,
-                                               &link_speed, &link_status, 0);
-                       if (!status)
-                               be_link_status_update(adapter, link_status);
-                       if (link_speed)
-                               et_speed = link_speed * 10;
-                       else if (link_status)
-                               et_speed = convert_to_et_speed(port_speed);
-               } else {
-                       et_speed = adapter->phy.forced_port_speed;
-               }
-
-               ethtool_cmd_speed_set(ecmd, et_speed);
+       if (adapter->phy.link_speed < 0) {
+               status = be_cmd_link_status_query(adapter, &link_speed,
+                                                 &link_status, 0);
+               if (!status)
+                       be_link_status_update(adapter, link_status);
+               ethtool_cmd_speed_set(ecmd, link_speed);
 
                status = be_cmd_get_phy_info(adapter);
                if (status)
@@ -773,8 +740,8 @@ static void
 be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
-       u8 mac_speed = 0;
-       u16 qos_link_speed = 0;
+       int status;
+       u8 link_status = 0;
 
        memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
 
@@ -798,11 +765,11 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
                test->flags |= ETH_TEST_FL_FAILED;
        }
 
-       if (be_cmd_link_status_query(adapter, &mac_speed,
-                                    &qos_link_speed, NULL, 0) != 0) {
+       status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
+       if (status) {
                test->flags |= ETH_TEST_FL_FAILED;
                data[4] = -1;
-       } else if (!mac_speed) {
+       } else if (!link_status) {
                test->flags |= ETH_TEST_FL_FAILED;
                data[4] = 1;
        }
index 95d10472f236c2b98f6a107cb0b7ddb0e4b975d4..eb3f2cb3b93bbef296948f8479ab229783c01a94 100644 (file)
@@ -20,6 +20,7 @@
 #include "be.h"
 #include "be_cmds.h"
 #include <asm/div64.h>
+#include <linux/aer.h>
 
 MODULE_VERSION(DRV_VER);
 MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -240,9 +241,8 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
        if (!is_valid_ether_addr(addr->sa_data))
                return -EADDRNOTAVAIL;
 
-       status = be_cmd_mac_addr_query(adapter, current_mac,
-                               MAC_ADDRESS_TYPE_NETWORK, false,
-                               adapter->if_handle, 0);
+       status = be_cmd_mac_addr_query(adapter, current_mac, false,
+                                      adapter->if_handle, 0);
        if (status)
                goto err;
 
@@ -1075,7 +1075,7 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
 {
        struct pci_dev *dev, *pdev = adapter->pdev;
-       int vfs = 0, assigned_vfs = 0, pos, vf_fn;
+       int vfs = 0, assigned_vfs = 0, pos;
        u16 offset, stride;
 
        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
@@ -1086,9 +1086,7 @@ static int be_find_vfs(struct be_adapter *adapter, int vf_state)
 
        dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
        while (dev) {
-               vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
-               if (dev->is_virtfn && dev->devfn == vf_fn &&
-                       dev->bus->number == pdev->bus->number) {
+               if (dev->is_virtfn && pci_physfn(dev) == pdev) {
                        vfs++;
                        if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
                                assigned_vfs++;
@@ -1896,6 +1894,8 @@ static int be_tx_qs_create(struct be_adapter *adapter)
                        return status;
        }
 
+       dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
+                adapter->num_tx_qs);
        return 0;
 }
 
@@ -1946,10 +1946,9 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
                        return rc;
        }
 
-       if (adapter->num_rx_qs != MAX_RX_QS)
-               dev_info(&adapter->pdev->dev,
-                       "Created only %d receive queues\n", adapter->num_rx_qs);
-
+       dev_info(&adapter->pdev->dev,
+                "created %d RSS queue(s) and 1 default RX queue\n",
+                adapter->num_rx_qs - 1);
        return 0;
 }
 
@@ -2176,8 +2175,7 @@ static uint be_num_rss_want(struct be_adapter *adapter)
 {
        u32 num = 0;
        if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
-            !sriov_want(adapter) && be_physfn(adapter) &&
-            !be_is_mc(adapter)) {
+            !sriov_want(adapter) && be_physfn(adapter)) {
                num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
                num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
        }
@@ -2188,6 +2186,7 @@ static void be_msix_enable(struct be_adapter *adapter)
 {
 #define BE_MIN_MSIX_VECTORS            1
        int i, status, num_vec, num_roce_vec = 0;
+       struct device *dev = &adapter->pdev->dev;
 
        /* If RSS queues are not used, need a vec for default RX Q */
        num_vec = min(be_num_rss_want(adapter), num_online_cpus());
@@ -2212,6 +2211,8 @@ static void be_msix_enable(struct be_adapter *adapter)
                                num_vec) == 0)
                        goto done;
        }
+
+       dev_warn(dev, "MSIx enable failed\n");
        return;
 done:
        if (be_roce_supported(adapter)) {
@@ -2225,6 +2226,7 @@ done:
                }
        } else
                adapter->num_msix_vec = num_vec;
+       dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
        return;
 }
 
@@ -2441,8 +2443,7 @@ static int be_open(struct net_device *netdev)
                be_eq_notify(adapter, eqo->q.id, true, false, 0);
        }
 
-       status = be_cmd_link_status_query(adapter, NULL, NULL,
-                                         &link_status, 0);
+       status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
        if (!status)
                be_link_status_update(adapter, link_status);
 
@@ -2646,8 +2647,8 @@ static int be_vf_setup(struct be_adapter *adapter)
        }
 
        for_all_vfs(adapter, vf_cfg, vf) {
-               status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
-                                                 NULL, vf + 1);
+               lnk_speed = 1000;
+               status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
                if (status)
                        goto err;
                vf_cfg->tx_rate = lnk_speed * 10;
@@ -2671,7 +2672,6 @@ static void be_setup_init(struct be_adapter *adapter)
        adapter->be3_native = false;
        adapter->promiscuous = false;
        adapter->eq_next_idx = 0;
-       adapter->phy.forced_port_speed = -1;
 }
 
 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
@@ -2693,21 +2693,16 @@ static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
                status = be_cmd_get_mac_from_list(adapter, mac,
                                                  active_mac, pmac_id, 0);
                if (*active_mac) {
-                       status = be_cmd_mac_addr_query(adapter, mac,
-                                                      MAC_ADDRESS_TYPE_NETWORK,
-                                                      false, if_handle,
-                                                      *pmac_id);
+                       status = be_cmd_mac_addr_query(adapter, mac, false,
+                                                      if_handle, *pmac_id);
                }
        } else if (be_physfn(adapter)) {
                /* For BE3, for PF get permanent MAC */
-               status = be_cmd_mac_addr_query(adapter, mac,
-                                              MAC_ADDRESS_TYPE_NETWORK, true,
-                                              0, 0);
+               status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
                *active_mac = false;
        } else {
                /* For BE3, for VF get soft MAC assigned by PF*/
-               status = be_cmd_mac_addr_query(adapter, mac,
-                                              MAC_ADDRESS_TYPE_NETWORK, false,
+               status = be_cmd_mac_addr_query(adapter, mac, false,
                                               if_handle, 0);
                *active_mac = true;
        }
@@ -2724,6 +2719,8 @@ static int be_get_config(struct be_adapter *adapter)
        if (pos) {
                pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
                                     &dev_num_vfs);
+               if (!lancer_chip(adapter))
+                       dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
                adapter->dev_num_vfs = dev_num_vfs;
        }
        return 0;
@@ -3437,6 +3434,7 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
        if (mem->va)
                dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
                                  mem->dma);
+       kfree(adapter->pmac_id);
 }
 
 static int be_ctrl_init(struct be_adapter *adapter)
@@ -3473,6 +3471,12 @@ static int be_ctrl_init(struct be_adapter *adapter)
        }
        memset(rx_filter->va, 0, rx_filter->size);
 
+       /* primary mac needs 1 pmac entry */
+       adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
+                                  sizeof(*adapter->pmac_id), GFP_KERNEL);
+       if (!adapter->pmac_id)
+               return -ENOMEM;
+
        mutex_init(&adapter->mbox_lock);
        spin_lock_init(&adapter->mcc_lock);
        spin_lock_init(&adapter->mcc_cq_lock);
@@ -3543,6 +3547,8 @@ static void __devexit be_remove(struct pci_dev *pdev)
 
        be_ctrl_cleanup(adapter);
 
+       pci_disable_pcie_error_reporting(pdev);
+
        pci_set_drvdata(pdev, NULL);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
@@ -3609,12 +3615,6 @@ static int be_get_initial_config(struct be_adapter *adapter)
        else
                adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
 
-       /* primary mac needs 1 pmac entry */
-       adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
-                                 sizeof(u32), GFP_KERNEL);
-       if (!adapter->pmac_id)
-               return -ENOMEM;
-
        status = be_cmd_get_cntl_attributes(adapter);
        if (status)
                return status;
@@ -3800,6 +3800,23 @@ static bool be_reset_required(struct be_adapter *adapter)
        return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
 }
 
+static char *mc_name(struct be_adapter *adapter)
+{
+       if (adapter->function_mode & FLEX10_MODE)
+               return "FLEX10";
+       else if (adapter->function_mode & VNIC_MODE)
+               return "vNIC";
+       else if (adapter->function_mode & UMC_ENABLED)
+               return "UMC";
+       else
+               return "";
+}
+
+static inline char *func_name(struct be_adapter *adapter)
+{
+       return be_physfn(adapter) ? "PF" : "VF";
+}
+
 static int __devinit be_probe(struct pci_dev *pdev,
                        const struct pci_device_id *pdev_id)
 {
@@ -3844,6 +3861,10 @@ static int __devinit be_probe(struct pci_dev *pdev,
                }
        }
 
+       status = pci_enable_pcie_error_reporting(pdev);
+       if (status)
+               dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
+
        status = be_ctrl_init(adapter);
        if (status)
                goto free_netdev;
@@ -3886,7 +3907,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
 
        status = be_setup(adapter);
        if (status)
-               goto msix_disable;
+               goto stats_clean;
 
        be_netdev_init(netdev);
        status = register_netdev(netdev);
@@ -3900,15 +3921,13 @@ static int __devinit be_probe(struct pci_dev *pdev,
 
        be_cmd_query_port_name(adapter, &port_name);
 
-       dev_info(&pdev->dev, "%s: %s port %c\n", netdev->name, nic_name(pdev),
-                port_name);
+       dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
+                func_name(adapter), mc_name(adapter), port_name);
 
        return 0;
 
 unsetup:
        be_clear(adapter);
-msix_disable:
-       be_msix_disable(adapter);
 stats_clean:
        be_stats_cleanup(adapter);
 ctrl_clean:
@@ -4066,6 +4085,7 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
        if (status)
                return PCI_ERS_RESULT_DISCONNECT;
 
+       pci_cleanup_aer_uncorrect_error_status(pdev);
        return PCI_ERS_RESULT_RECOVERED;
 }
 
index 3574e1499dfc30db059160e80b8a77766cd8392f..feff51664dcf76974cc62a58be978bbbe83f0cf7 100644 (file)
@@ -62,6 +62,13 @@ config FSL_PQ_MDIO
        ---help---
          This driver supports the MDIO bus used by the gianfar and UCC drivers.
 
+config FSL_XGMAC_MDIO
+       tristate "Freescale XGMAC MDIO"
+       depends on FSL_SOC
+       select PHYLIB
+       ---help---
+         This driver supports the MDIO bus on the Fman 10G Ethernet MACs.
+
 config UCC_GETH
        tristate "Freescale QE Gigabit Ethernet"
        depends on QUICC_ENGINE
index 1752488c9ee5af1b3dadc54110932fa5b4e46ae8..3d1839afff6574ac96a9f4f2c7955739a8574567 100644 (file)
@@ -9,6 +9,7 @@ ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
 endif
 obj-$(CONFIG_FS_ENET) += fs_enet/
 obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o
+obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o
 obj-$(CONFIG_GIANFAR) += gianfar_driver.o
 obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o
 gianfar_driver-objs := gianfar.o \
index 9527b28d70d1976374b812d9a37fbfaaf1c79aaf..c93a05654b46125b6eeda59cf6e0bbfc83c5f9ef 100644 (file)
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <linux/errno.h>
-#include <linux/unistd.h>
 #include <linux/slab.h>
-#include <linux/interrupt.h>
 #include <linux/init.h>
 #include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/mm.h>
 #include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/crc32.h>
 #include <linux/mii.h>
-#include <linux/phy.h>
-#include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_mdio.h>
-#include <linux/of_platform.h>
+#include <linux/of_device.h>
 
 #include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/uaccess.h>
-#include <asm/ucc.h>
+#include <asm/ucc.h>   /* for ucc_set_qe_mux_mii_mng() */
 
 #include "gianfar.h"
-#include "fsl_pq_mdio.h"
+
+#define MIIMIND_BUSY           0x00000001
+#define MIIMIND_NOTVALID       0x00000004
+#define MIIMCFG_INIT_VALUE     0x00000007
+#define MIIMCFG_RESET          0x80000000
+
+#define MII_READ_COMMAND       0x00000001
+
+struct fsl_pq_mii {
+       u32 miimcfg;    /* MII management configuration reg */
+       u32 miimcom;    /* MII management command reg */
+       u32 miimadd;    /* MII management address reg */
+       u32 miimcon;    /* MII management control reg */
+       u32 miimstat;   /* MII management status reg */
+       u32 miimind;    /* MII management indication reg */
+};
+
+struct fsl_pq_mdio {
+       u8 res1[16];
+       u32 ieventm;    /* MDIO Interrupt event register (for etsec2)*/
+       u32 imaskm;     /* MDIO Interrupt mask register (for etsec2)*/
+       u8 res2[4];
+       u32 emapm;      /* MDIO Event mapping register (for etsec2)*/
+       u8 res3[1280];
+       struct fsl_pq_mii mii;
+       u8 res4[28];
+       u32 utbipar;    /* TBI phy address reg (only on UCC) */
+       u8 res5[2728];
+} __packed;
 
 /* Number of microseconds to wait for an MII register to respond */
 #define MII_TIMEOUT    1000
 
 struct fsl_pq_mdio_priv {
        void __iomem *map;
-       struct fsl_pq_mdio __iomem *regs;
+       struct fsl_pq_mii __iomem *regs;
+       int irqs[PHY_MAX_ADDR];
+};
+
+/*
+ * Per-device-type data.  Each type of device tree node that we support gets
+ * one of these.
+ *
+ * @mii_offset: the offset of the MII registers within the memory map of the
+ * node.  Some nodes define only the MII registers, and some define the whole
+ * MAC (which includes the MII registers).
+ *
+ * @get_tbipa: determines the address of the TBIPA register
+ *
+ * @ucc_configure: a special function for extra QE configuration
+ */
+struct fsl_pq_mdio_data {
+       unsigned int mii_offset;        /* offset of the MII registers */
+       uint32_t __iomem * (*get_tbipa)(void __iomem *p);
+       void (*ucc_configure)(phys_addr_t start, phys_addr_t end);
 };
 
 /*
- * Write value to the PHY at mii_id at register regnum,
- * on the bus attached to the local interface, which may be different from the
- * generic mdio bus (tied to a single interface), waiting until the write is
- * done before returning. This is helpful in programming interfaces like
- * the TBI which control interfaces like onchip SERDES and are always tied to
- * the local mdio pins, which may not be the same as system mdio bus, used for
+ * Write value to the PHY at mii_id at register regnum, on the bus attached
+ * to the local interface, which may be different from the generic mdio bus
+ * (tied to a single interface), waiting until the write is done before
+ * returning. This is helpful in programming interfaces like the TBI which
+ * control interfaces like onchip SERDES and are always tied to the local
+ * mdio pins, which may not be the same as system mdio bus, used for
  * controlling the external PHYs, for example.
  */
-int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
-               int regnum, u16 value)
+static int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+               u16 value)
 {
+       struct fsl_pq_mdio_priv *priv = bus->priv;
+       struct fsl_pq_mii __iomem *regs = priv->regs;
        u32 status;
 
        /* Set the PHY address and the register address we want to write */
@@ -83,20 +119,21 @@ int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
 }
 
 /*
- * Read the bus for PHY at addr mii_id, register regnum, and
- * return the value.  Clears miimcom first.  All PHY operation
- * done on the bus attached to the local interface,
- * which may be different from the generic mdio bus
- * This is helpful in programming interfaces like
- * the TBI which, in turn, control interfaces like onchip SERDES
- * and are always tied to the local mdio pins, which may not be the
+ * Read the bus for PHY at addr mii_id, register regnum, and return the value.
+ * Clears miimcom first.
+ *
+ * All PHY operation done on the bus attached to the local interface, which
+ * may be different from the generic mdio bus.  This is helpful in programming
+ * interfaces like the TBI which, in turn, control interfaces like on-chip
+ * SERDES and are always tied to the local mdio pins, which may not be the
  * same as system mdio bus, used for controlling the external PHYs, for eg.
  */
-int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
-               int mii_id, int regnum)
+static int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 {
-       u16 value;
+       struct fsl_pq_mdio_priv *priv = bus->priv;
+       struct fsl_pq_mii __iomem *regs = priv->regs;
        u32 status;
+       u16 value;
 
        /* Set the PHY address and the register address we want to read */
        out_be32(&regs->miimadd, (mii_id << 8) | regnum);
@@ -115,44 +152,15 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
        /* Grab the value of the register from miimstat */
        value = in_be32(&regs->miimstat);
 
+       dev_dbg(&bus->dev, "read %04x from address %x/%x\n", value, mii_id, regnum);
        return value;
 }
 
-static struct fsl_pq_mdio __iomem *fsl_pq_mdio_get_regs(struct mii_bus *bus)
-{
-       struct fsl_pq_mdio_priv *priv = bus->priv;
-
-       return priv->regs;
-}
-
-/*
- * Write value to the PHY at mii_id at register regnum,
- * on the bus, waiting until the write is done before returning.
- */
-int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
-{
-       struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
-
-       /* Write to the local MII regs */
-       return fsl_pq_local_mdio_write(regs, mii_id, regnum, value);
-}
-
-/*
- * Read the bus for PHY at addr mii_id, register regnum, and
- * return the value.  Clears miimcom first.
- */
-int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
-{
-       struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
-
-       /* Read the local MII regs */
-       return fsl_pq_local_mdio_read(regs, mii_id, regnum);
-}
-
 /* Reset the MIIM registers, and wait for the bus to free */
 static int fsl_pq_mdio_reset(struct mii_bus *bus)
 {
-       struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
+       struct fsl_pq_mdio_priv *priv = bus->priv;
+       struct fsl_pq_mii __iomem *regs = priv->regs;
        u32 status;
 
        mutex_lock(&bus->mdio_lock);
@@ -170,234 +178,291 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
        mutex_unlock(&bus->mdio_lock);
 
        if (!status) {
-               printk(KERN_ERR "%s: The MII Bus is stuck!\n",
-                               bus->name);
+               dev_err(&bus->dev, "timeout waiting for MII bus\n");
                return -EBUSY;
        }
 
        return 0;
 }
 
-void fsl_pq_mdio_bus_name(char *name, struct device_node *np)
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
+/*
+ * This is mildly evil, but so is our hardware for doing this.
+ * Also, we have to cast back to struct gfar because of
+ * definition weirdness done in gianfar.h.
+ */
+static uint32_t __iomem *get_gfar_tbipa(void __iomem *p)
 {
-       const u32 *addr;
-       u64 taddr = OF_BAD_ADDR;
-
-       addr = of_get_address(np, 0, NULL, NULL);
-       if (addr)
-               taddr = of_translate_address(np, addr);
+       struct gfar __iomem *enet_regs = p;
 
-       snprintf(name, MII_BUS_ID_SIZE, "%s@%llx", np->name,
-               (unsigned long long)taddr);
+       return &enet_regs->tbipa;
 }
-EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name);
 
+/*
+ * Return the TBIPAR address for an eTSEC2 node
+ */
+static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
+{
+       return p;
+}
+#endif
 
-static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
+/*
+ * Return the TBIPAR address for a QE MDIO node
+ */
+static uint32_t __iomem *get_ucc_tbipa(void __iomem *p)
 {
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
-       struct gfar __iomem *enet_regs;
+       struct fsl_pq_mdio __iomem *mdio = p;
 
-       /*
-        * This is mildly evil, but so is our hardware for doing this.
-        * Also, we have to cast back to struct gfar because of
-        * definition weirdness done in gianfar.h.
-        */
-       if(of_device_is_compatible(np, "fsl,gianfar-mdio") ||
-               of_device_is_compatible(np, "fsl,gianfar-tbi") ||
-               of_device_is_compatible(np, "gianfar")) {
-               enet_regs = (struct gfar __iomem *)regs;
-               return &enet_regs->tbipa;
-       } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
-                       of_device_is_compatible(np, "fsl,etsec2-tbi")) {
-               return of_iomap(np, 1);
-       }
-#endif
-       return NULL;
+       return &mdio->utbipar;
 }
 
-
-static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
+/*
+ * Find the UCC node that controls the given MDIO node
+ *
+ * For some reason, the QE MDIO nodes are not children of the UCC devices
+ * that control them.  Therefore, we need to scan all UCC nodes looking for
+ * the one that encompases the given MDIO node.  We do this by comparing
+ * physical addresses.  The 'start' and 'end' addresses of the MDIO node are
+ * passed, and the correct UCC node will cover the entire address range.
+ *
+ * This assumes that there is only one QE MDIO node in the entire device tree.
+ */
+static void ucc_configure(phys_addr_t start, phys_addr_t end)
 {
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
+       static bool found_mii_master;
        struct device_node *np = NULL;
-       int err = 0;
 
-       for_each_compatible_node(np, NULL, "ucc_geth") {
-               struct resource tempres;
+       if (found_mii_master)
+               return;
 
-               err = of_address_to_resource(np, 0, &tempres);
-               if (err)
+       for_each_compatible_node(np, NULL, "ucc_geth") {
+               struct resource res;
+               const uint32_t *iprop;
+               uint32_t id;
+               int ret;
+
+               ret = of_address_to_resource(np, 0, &res);
+               if (ret < 0) {
+                       pr_debug("fsl-pq-mdio: no address range in node %s\n",
+                                np->full_name);
                        continue;
+               }
 
                /* if our mdio regs fall within this UCC regs range */
-               if ((start >= tempres.start) && (end <= tempres.end)) {
-                       /* Find the id of the UCC */
-                       const u32 *id;
-
-                       id = of_get_property(np, "cell-index", NULL);
-                       if (!id) {
-                               id = of_get_property(np, "device-id", NULL);
-                               if (!id)
-                                       continue;
+               if ((start < res.start) || (end > res.end))
+                       continue;
+
+               iprop = of_get_property(np, "cell-index", NULL);
+               if (!iprop) {
+                       iprop = of_get_property(np, "device-id", NULL);
+                       if (!iprop) {
+                               pr_debug("fsl-pq-mdio: no UCC ID in node %s\n",
+                                        np->full_name);
+                               continue;
                        }
+               }
 
-                       *ucc_id = *id;
+               id = be32_to_cpup(iprop);
 
-                       return 0;
+               /*
+                * cell-index and device-id for QE nodes are
+                * numbered from 1, not 0.
+                */
+               if (ucc_set_qe_mux_mii_mng(id - 1) < 0) {
+                       pr_debug("fsl-pq-mdio: invalid UCC ID in node %s\n",
+                                np->full_name);
+                       continue;
                }
+
+               pr_debug("fsl-pq-mdio: setting node UCC%u to MII master\n", id);
+               found_mii_master = true;
        }
+}
 
-       if (err)
-               return err;
-       else
-               return -EINVAL;
-#else
-       return -ENODEV;
 #endif
-}
 
-static int fsl_pq_mdio_probe(struct platform_device *ofdev)
+static struct of_device_id fsl_pq_mdio_match[] = {
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
+       {
+               .compatible = "fsl,gianfar-tbi",
+               .data = &(struct fsl_pq_mdio_data) {
+                       .mii_offset = 0,
+                       .get_tbipa = get_gfar_tbipa,
+               },
+       },
+       {
+               .compatible = "fsl,gianfar-mdio",
+               .data = &(struct fsl_pq_mdio_data) {
+                       .mii_offset = 0,
+                       .get_tbipa = get_gfar_tbipa,
+               },
+       },
+       {
+               .type = "mdio",
+               .compatible = "gianfar",
+               .data = &(struct fsl_pq_mdio_data) {
+                       .mii_offset = offsetof(struct fsl_pq_mdio, mii),
+                       .get_tbipa = get_gfar_tbipa,
+               },
+       },
+       {
+               .compatible = "fsl,etsec2-tbi",
+               .data = &(struct fsl_pq_mdio_data) {
+                       .mii_offset = offsetof(struct fsl_pq_mdio, mii),
+                       .get_tbipa = get_etsec_tbipa,
+               },
+       },
+       {
+               .compatible = "fsl,etsec2-mdio",
+               .data = &(struct fsl_pq_mdio_data) {
+                       .mii_offset = offsetof(struct fsl_pq_mdio, mii),
+                       .get_tbipa = get_etsec_tbipa,
+               },
+       },
+#endif
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
+       {
+               .compatible = "fsl,ucc-mdio",
+               .data = &(struct fsl_pq_mdio_data) {
+                       .mii_offset = 0,
+                       .get_tbipa = get_ucc_tbipa,
+                       .ucc_configure = ucc_configure,
+               },
+       },
+       {
+               /* Legacy UCC MDIO node */
+               .type = "mdio",
+               .compatible = "ucc_geth_phy",
+               .data = &(struct fsl_pq_mdio_data) {
+                       .mii_offset = 0,
+                       .get_tbipa = get_ucc_tbipa,
+                       .ucc_configure = ucc_configure,
+               },
+       },
+#endif
+       /* No Kconfig option for Fman support yet */
+       {
+               .compatible = "fsl,fman-mdio",
+               .data = &(struct fsl_pq_mdio_data) {
+                       .mii_offset = 0,
+                       /* Fman TBI operations are handled elsewhere */
+               },
+       },
+
+       {},
+};
+MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
+
+static int fsl_pq_mdio_probe(struct platform_device *pdev)
 {
-       struct device_node *np = ofdev->dev.of_node;
+       const struct of_device_id *id =
+               of_match_device(fsl_pq_mdio_match, &pdev->dev);
+       const struct fsl_pq_mdio_data *data = id->data;
+       struct device_node *np = pdev->dev.of_node;
+       struct resource res;
        struct device_node *tbi;
        struct fsl_pq_mdio_priv *priv;
-       struct fsl_pq_mdio __iomem *regs = NULL;
-       void __iomem *map;
-       u32 __iomem *tbipa;
        struct mii_bus *new_bus;
-       int tbiaddr = -1;
-       const u32 *addrp;
-       u64 addr = 0, size = 0;
        int err;
 
-       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-       if (!priv)
-               return -ENOMEM;
+       dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible);
 
-       new_bus = mdiobus_alloc();
-       if (!new_bus) {
-               err = -ENOMEM;
-               goto err_free_priv;
-       }
+       new_bus = mdiobus_alloc_size(sizeof(*priv));
+       if (!new_bus)
+               return -ENOMEM;
 
+       priv = new_bus->priv;
        new_bus->name = "Freescale PowerQUICC MII Bus",
-       new_bus->read = &fsl_pq_mdio_read,
-       new_bus->write = &fsl_pq_mdio_write,
-       new_bus->reset = &fsl_pq_mdio_reset,
-       new_bus->priv = priv;
-       fsl_pq_mdio_bus_name(new_bus->id, np);
-
-       addrp = of_get_address(np, 0, &size, NULL);
-       if (!addrp) {
-               err = -EINVAL;
-               goto err_free_bus;
+       new_bus->read = &fsl_pq_mdio_read;
+       new_bus->write = &fsl_pq_mdio_write;
+       new_bus->reset = &fsl_pq_mdio_reset;
+       new_bus->irq = priv->irqs;
+
+       err = of_address_to_resource(np, 0, &res);
+       if (err < 0) {
+               dev_err(&pdev->dev, "could not obtain address information\n");
+               goto error;
        }
 
-       /* Set the PHY base address */
-       addr = of_translate_address(np, addrp);
-       if (addr == OF_BAD_ADDR) {
-               err = -EINVAL;
-               goto err_free_bus;
-       }
+       snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s@%llx", np->name,
+               (unsigned long long)res.start);
 
-       map = ioremap(addr, size);
-       if (!map) {
+       priv->map = of_iomap(np, 0);
+       if (!priv->map) {
                err = -ENOMEM;
-               goto err_free_bus;
+               goto error;
        }
-       priv->map = map;
-
-       if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
-                       of_device_is_compatible(np, "fsl,gianfar-tbi") ||
-                       of_device_is_compatible(np, "fsl,ucc-mdio") ||
-                       of_device_is_compatible(np, "ucc_geth_phy"))
-               map -= offsetof(struct fsl_pq_mdio, miimcfg);
-       regs = map;
-       priv->regs = regs;
-
-       new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
 
-       if (NULL == new_bus->irq) {
-               err = -ENOMEM;
-               goto err_unmap_regs;
+       /*
+        * Some device tree nodes represent only the MII registers, and
+        * others represent the MAC and MII registers.  The 'mii_offset' field
+        * contains the offset of the MII registers inside the mapped register
+        * space.
+        */
+       if (data->mii_offset > resource_size(&res)) {
+               dev_err(&pdev->dev, "invalid register map\n");
+               err = -EINVAL;
+               goto error;
        }
+       priv->regs = priv->map + data->mii_offset;
 
-       new_bus->parent = &ofdev->dev;
-       dev_set_drvdata(&ofdev->dev, new_bus);
-
-       if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
-                       of_device_is_compatible(np, "fsl,gianfar-tbi") ||
-                       of_device_is_compatible(np, "fsl,etsec2-mdio") ||
-                       of_device_is_compatible(np, "fsl,etsec2-tbi") ||
-                       of_device_is_compatible(np, "gianfar")) {
-               tbipa = get_gfar_tbipa(regs, np);
-               if (!tbipa) {
-                       err = -EINVAL;
-                       goto err_free_irqs;
-               }
-       } else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
-                       of_device_is_compatible(np, "ucc_geth_phy")) {
-               u32 id;
-               static u32 mii_mng_master;
-
-               tbipa = &regs->utbipar;
-
-               if ((err = get_ucc_id_for_range(addr, addr + size, &id)))
-                       goto err_free_irqs;
+       new_bus->parent = &pdev->dev;
+       dev_set_drvdata(&pdev->dev, new_bus);
 
-               if (!mii_mng_master) {
-                       mii_mng_master = id;
-                       ucc_set_qe_mux_mii_mng(id - 1);
+       if (data->get_tbipa) {
+               for_each_child_of_node(np, tbi) {
+                       if (strcmp(tbi->type, "tbi-phy") == 0) {
+                               dev_dbg(&pdev->dev, "found TBI PHY node %s\n",
+                                       strrchr(tbi->full_name, '/') + 1);
+                               break;
+                       }
                }
-       } else {
-               err = -ENODEV;
-               goto err_free_irqs;
-       }
 
-       for_each_child_of_node(np, tbi) {
-               if (!strncmp(tbi->type, "tbi-phy", 8))
-                       break;
-       }
+               if (tbi) {
+                       const u32 *prop = of_get_property(tbi, "reg", NULL);
+                       uint32_t __iomem *tbipa;
 
-       if (tbi) {
-               const u32 *prop = of_get_property(tbi, "reg", NULL);
+                       if (!prop) {
+                               dev_err(&pdev->dev,
+                                       "missing 'reg' property in node %s\n",
+                                       tbi->full_name);
+                               err = -EBUSY;
+                               goto error;
+                       }
 
-               if (prop)
-                       tbiaddr = *prop;
+                       tbipa = data->get_tbipa(priv->map);
 
-               if (tbiaddr == -1) {
-                       err = -EBUSY;
-                       goto err_free_irqs;
-               } else {
-                       out_be32(tbipa, tbiaddr);
+                       out_be32(tbipa, be32_to_cpup(prop));
                }
        }
 
+       if (data->ucc_configure)
+               data->ucc_configure(res.start, res.end);
+
        err = of_mdiobus_register(new_bus, np);
        if (err) {
-               printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
-                               new_bus->name);
-               goto err_free_irqs;
+               dev_err(&pdev->dev, "cannot register %s as MDIO bus\n",
+                       new_bus->name);
+               goto error;
        }
 
        return 0;
 
-err_free_irqs:
-       kfree(new_bus->irq);
-err_unmap_regs:
-       iounmap(priv->map);
-err_free_bus:
+error:
+       if (priv->map)
+               iounmap(priv->map);
+
        kfree(new_bus);
-err_free_priv:
-       kfree(priv);
+
        return err;
 }
 
 
-static int fsl_pq_mdio_remove(struct platform_device *ofdev)
+static int fsl_pq_mdio_remove(struct platform_device *pdev)
 {
-       struct device *device = &ofdev->dev;
+       struct device *device = &pdev->dev;
        struct mii_bus *bus = dev_get_drvdata(device);
        struct fsl_pq_mdio_priv *priv = bus->priv;
 
@@ -406,41 +471,11 @@ static int fsl_pq_mdio_remove(struct platform_device *ofdev)
        dev_set_drvdata(device, NULL);
 
        iounmap(priv->map);
-       bus->priv = NULL;
        mdiobus_free(bus);
-       kfree(priv);
 
        return 0;
 }
 
-static struct of_device_id fsl_pq_mdio_match[] = {
-       {
-               .type = "mdio",
-               .compatible = "ucc_geth_phy",
-       },
-       {
-               .type = "mdio",
-               .compatible = "gianfar",
-       },
-       {
-               .compatible = "fsl,ucc-mdio",
-       },
-       {
-               .compatible = "fsl,gianfar-tbi",
-       },
-       {
-               .compatible = "fsl,gianfar-mdio",
-       },
-       {
-               .compatible = "fsl,etsec2-tbi",
-       },
-       {
-               .compatible = "fsl,etsec2-mdio",
-       },
-       {},
-};
-MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
-
 static struct platform_driver fsl_pq_mdio_driver = {
        .driver = {
                .name = "fsl-pq_mdio",
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.h b/drivers/net/ethernet/freescale/fsl_pq_mdio.h
deleted file mode 100644 (file)
index bd17a2a..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Freescale PowerQUICC MDIO Driver -- MII Management Bus Implementation
- * Driver for the MDIO bus controller on Freescale PowerQUICC processors
- *
- * Author: Andy Fleming
- * Modifier: Sandeep Gopalpet
- *
- * Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- *
- */
-#ifndef __FSL_PQ_MDIO_H
-#define __FSL_PQ_MDIO_H
-
-#define MIIMIND_BUSY            0x00000001
-#define MIIMIND_NOTVALID        0x00000004
-#define MIIMCFG_INIT_VALUE     0x00000007
-#define MIIMCFG_RESET           0x80000000
-
-#define MII_READ_COMMAND       0x00000001
-
-struct fsl_pq_mdio {
-       u8 res1[16];
-       u32 ieventm;    /* MDIO Interrupt event register (for etsec2)*/
-       u32 imaskm;     /* MDIO Interrupt mask register (for etsec2)*/
-       u8 res2[4];
-       u32 emapm;      /* MDIO Event mapping register (for etsec2)*/
-       u8 res3[1280];
-       u32 miimcfg;            /* MII management configuration reg */
-       u32 miimcom;            /* MII management command reg */
-       u32 miimadd;            /* MII management address reg */
-       u32 miimcon;            /* MII management control reg */
-       u32 miimstat;           /* MII management status reg */
-       u32 miimind;            /* MII management indication reg */
-       u8 reserved[28];        /* Space holder */
-       u32 utbipar;            /* TBI phy address reg (only on UCC) */
-       u8 res4[2728];
-} __packed;
-
-int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
-int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
-int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
-                         int regnum, u16 value);
-int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs, int mii_id, int regnum);
-int __init fsl_pq_mdio_init(void);
-void fsl_pq_mdio_exit(void);
-void fsl_pq_mdio_bus_name(char *name, struct device_node *np);
-#endif /* FSL_PQ_MDIO_H */
index d3233f59a82e47b1d70a372c0973e1447d0ef90b..a1b52ec3b930981e240cad7ba4d3522936ed2eab 100644 (file)
 #include <linux/of_net.h>
 
 #include "gianfar.h"
-#include "fsl_pq_mdio.h"
 
 #define TX_TIMEOUT      (1*HZ)
 
@@ -395,7 +394,13 @@ static void gfar_init_mac(struct net_device *ndev)
        if (ndev->features & NETIF_F_IP_CSUM)
                tctrl |= TCTRL_INIT_CSUM;
 
-       tctrl |= TCTRL_TXSCHED_PRIO;
+       if (priv->prio_sched_en)
+               tctrl |= TCTRL_TXSCHED_PRIO;
+       else {
+               tctrl |= TCTRL_TXSCHED_WRRS;
+               gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
+               gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
+       }
 
        gfar_write(&regs->tctrl, tctrl);
 
@@ -1161,6 +1166,9 @@ static int gfar_probe(struct platform_device *ofdev)
        priv->rx_filer_enable = 1;
        /* Enable most messages by default */
        priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
+       /* use pritority h/w tx queue scheduling for single queue devices */
+       if (priv->num_tx_queues == 1)
+               priv->prio_sched_en = 1;
 
        /* Carrier starts down, phylib will bring it up */
        netif_carrier_off(dev);
index 2136c7ff5e6d2331bf22195194b7f8b028f98587..4141ef2ddafc3974563b2e16e433d312bd1cb894 100644 (file)
@@ -301,8 +301,16 @@ extern const char gfar_driver_version[];
 #define TCTRL_TFCPAUSE         0x00000008
 #define TCTRL_TXSCHED_MASK     0x00000006
 #define TCTRL_TXSCHED_INIT     0x00000000
+/* priority scheduling */
 #define TCTRL_TXSCHED_PRIO     0x00000002
+/* weighted round-robin scheduling (WRRS) */
 #define TCTRL_TXSCHED_WRRS     0x00000004
+/* default WRRS weight and policy setting,
+ * tailored to the tr03wt and tr47wt registers:
+ * equal weight for all Tx Qs, measured in 64byte units
+ */
+#define DEFAULT_WRRS_WEIGHT    0x18181818
+
 #define TCTRL_INIT_CSUM                (TCTRL_TUCSEN | TCTRL_IPCSEN)
 
 #define IEVENT_INIT_CLEAR      0xffffffff
@@ -1098,7 +1106,8 @@ struct gfar_private {
                extended_hash:1,
                bd_stash_en:1,
                rx_filer_enable:1,
-               wol_en:1; /* Wake-on-LAN enabled */
+               wol_en:1, /* Wake-on-LAN enabled */
+               prio_sched_en:1; /* Enable priorty based Tx scheduling in Hw */
        unsigned short padding;
 
        /* PHY stuff */
index 0daa66b8eca088735974f7ee45595d22c6be4c56..b9db0e0405636780bf02bc598b12e79ea5f71074 100644 (file)
@@ -510,7 +510,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
 
        spin_unlock_irqrestore(&etsects->lock, flags);
 
-       etsects->clock = ptp_clock_register(&etsects->caps);
+       etsects->clock = ptp_clock_register(&etsects->caps, &dev->dev);
        if (IS_ERR(etsects->clock)) {
                err = PTR_ERR(etsects->clock);
                goto no_clock;
index 21c6574c5f15cdecd5dd5ce2ccfc02f0e60d423e..164288439220c69f59f755c091afb2c53289c420 100644 (file)
@@ -42,7 +42,6 @@
 #include <asm/machdep.h>
 
 #include "ucc_geth.h"
-#include "fsl_pq_mdio.h"
 
 #undef DEBUG
 
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
new file mode 100644 (file)
index 0000000..1afb5ea
--- /dev/null
@@ -0,0 +1,274 @@
+/*
+ * QorIQ 10G MDIO Controller
+ *
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * Authors: Andy Fleming <afleming@freescale.com>
+ *          Timur Tabi <timur@freescale.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/mdio.h>
+#include <linux/of_platform.h>
+#include <linux/of_mdio.h>
+
+/* Number of microseconds to wait for a register to respond */
+#define TIMEOUT        1000
+
+struct tgec_mdio_controller {
+       __be32  reserved[12];
+       __be32  mdio_stat;      /* MDIO configuration and status */
+       __be32  mdio_ctl;       /* MDIO control */
+       __be32  mdio_data;      /* MDIO data */
+       __be32  mdio_addr;      /* MDIO address */
+} __packed;
+
+#define MDIO_STAT_CLKDIV(x)    (((x>>1) & 0xff) << 8)
+#define MDIO_STAT_BSY          (1 << 0)
+#define MDIO_STAT_RD_ER                (1 << 1)
+#define MDIO_CTL_DEV_ADDR(x)   (x & 0x1f)
+#define MDIO_CTL_PORT_ADDR(x)  ((x & 0x1f) << 5)
+#define MDIO_CTL_PRE_DIS       (1 << 10)
+#define MDIO_CTL_SCAN_EN       (1 << 11)
+#define MDIO_CTL_POST_INC      (1 << 14)
+#define MDIO_CTL_READ          (1 << 15)
+
+#define MDIO_DATA(x)           (x & 0xffff)
+#define MDIO_DATA_BSY          (1 << 31)
+
+/*
+ * Wait untill the MDIO bus is free
+ */
+static int xgmac_wait_until_free(struct device *dev,
+                                struct tgec_mdio_controller __iomem *regs)
+{
+       uint32_t status;
+
+       /* Wait till the bus is free */
+       status = spin_event_timeout(
+               !((in_be32(&regs->mdio_stat)) & MDIO_STAT_BSY), TIMEOUT, 0);
+       if (!status) {
+               dev_err(dev, "timeout waiting for bus to be free\n");
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+/*
+ * Wait till the MDIO read or write operation is complete
+ */
+static int xgmac_wait_until_done(struct device *dev,
+                                struct tgec_mdio_controller __iomem *regs)
+{
+       uint32_t status;
+
+       /* Wait till the MDIO write is complete */
+       status = spin_event_timeout(
+               !((in_be32(&regs->mdio_data)) & MDIO_DATA_BSY), TIMEOUT, 0);
+       if (!status) {
+               dev_err(dev, "timeout waiting for operation to complete\n");
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+/*
+ * Write value to the PHY for this device to the register at regnum,waiting
+ * until the write is done before it returns.  All PHY configuration has to be
+ * done through the TSEC1 MIIM regs.
+ */
+static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
+{
+       struct tgec_mdio_controller __iomem *regs = bus->priv;
+       uint16_t dev_addr = regnum >> 16;
+       int ret;
+
+       /* Setup the MII Mgmt clock speed */
+       out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
+
+       ret = xgmac_wait_until_free(&bus->dev, regs);
+       if (ret)
+               return ret;
+
+       /* Set the port and dev addr */
+       out_be32(&regs->mdio_ctl,
+                MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr));
+
+       /* Set the register address */
+       out_be32(&regs->mdio_addr, regnum & 0xffff);
+
+       ret = xgmac_wait_until_free(&bus->dev, regs);
+       if (ret)
+               return ret;
+
+       /* Write the value to the register */
+       out_be32(&regs->mdio_data, MDIO_DATA(value));
+
+       ret = xgmac_wait_until_done(&bus->dev, regs);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+/*
+ * Reads from register regnum in the PHY for device dev, returning the value.
+ * Clears miimcom first.  All PHY configuration has to be done through the
+ * TSEC1 MIIM regs.
+ */
+static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+{
+       struct tgec_mdio_controller __iomem *regs = bus->priv;
+       uint16_t dev_addr = regnum >> 16;
+       uint32_t mdio_ctl;
+       uint16_t value;
+       int ret;
+
+       /* Setup the MII Mgmt clock speed */
+       out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
+
+       ret = xgmac_wait_until_free(&bus->dev, regs);
+       if (ret)
+               return ret;
+
+       /* Set the Port and Device Addrs */
+       mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+       out_be32(&regs->mdio_ctl, mdio_ctl);
+
+       /* Set the register address */
+       out_be32(&regs->mdio_addr, regnum & 0xffff);
+
+       ret = xgmac_wait_until_free(&bus->dev, regs);
+       if (ret)
+               return ret;
+
+       /* Initiate the read */
+       out_be32(&regs->mdio_ctl, mdio_ctl | MDIO_CTL_READ);
+
+       ret = xgmac_wait_until_done(&bus->dev, regs);
+       if (ret)
+               return ret;
+
+       /* Return all Fs if nothing was there */
+       if (in_be32(&regs->mdio_stat) & MDIO_STAT_RD_ER) {
+               dev_err(&bus->dev, "MDIO read error\n");
+               return 0xffff;
+       }
+
+       value = in_be32(&regs->mdio_data) & 0xffff;
+       dev_dbg(&bus->dev, "read %04x\n", value);
+
+       return value;
+}
+
+/* Reset the MIIM registers, and wait for the bus to free */
+static int xgmac_mdio_reset(struct mii_bus *bus)
+{
+       struct tgec_mdio_controller __iomem *regs = bus->priv;
+       int ret;
+
+       mutex_lock(&bus->mdio_lock);
+
+       /* Setup the MII Mgmt clock speed */
+       out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
+
+       ret = xgmac_wait_until_free(&bus->dev, regs);
+
+       mutex_unlock(&bus->mdio_lock);
+
+       return ret;
+}
+
+static int __devinit xgmac_mdio_probe(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct mii_bus *bus;
+       struct resource res;
+       int ret;
+
+       ret = of_address_to_resource(np, 0, &res);
+       if (ret) {
+               dev_err(&pdev->dev, "could not obtain address\n");
+               return ret;
+       }
+
+       bus = mdiobus_alloc_size(PHY_MAX_ADDR * sizeof(int));
+       if (!bus)
+               return -ENOMEM;
+
+       bus->name = "Freescale XGMAC MDIO Bus";
+       bus->read = xgmac_mdio_read;
+       bus->write = xgmac_mdio_write;
+       bus->reset = xgmac_mdio_reset;
+       bus->irq = bus->priv;
+       bus->parent = &pdev->dev;
+       snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start);
+
+       /* Set the PHY base address */
+       bus->priv = of_iomap(np, 0);
+       if (!bus->priv) {
+               ret = -ENOMEM;
+               goto err_ioremap;
+       }
+
+       ret = of_mdiobus_register(bus, np);
+       if (ret) {
+               dev_err(&pdev->dev, "cannot register MDIO bus\n");
+               goto err_registration;
+       }
+
+       dev_set_drvdata(&pdev->dev, bus);
+
+       return 0;
+
+err_registration:
+       iounmap(bus->priv);
+
+err_ioremap:
+       mdiobus_free(bus);
+
+       return ret;
+}
+
+static int __devexit xgmac_mdio_remove(struct platform_device *pdev)
+{
+       struct mii_bus *bus = dev_get_drvdata(&pdev->dev);
+
+       mdiobus_unregister(bus);
+       iounmap(bus->priv);
+       mdiobus_free(bus);
+
+       return 0;
+}
+
+static struct of_device_id xgmac_mdio_match[] = {
+       {
+               .compatible = "fsl,fman-xmdio",
+       },
+       {},
+};
+MODULE_DEVICE_TABLE(of, xgmac_mdio_match);
+
+static struct platform_driver xgmac_mdio_driver = {
+       .driver = {
+               .name = "fsl-fman_xmdio",
+               .of_match_table = xgmac_mdio_match,
+       },
+       .probe = xgmac_mdio_probe,
+       .remove = xgmac_mdio_remove,
+};
+
+module_platform_driver(xgmac_mdio_driver);
+
+MODULE_DESCRIPTION("Freescale QorIQ 10G MDIO Controller");
+MODULE_LICENSE("GPL v2");
index fed5080a6b621a07da4837b45b095e1107119d4b..959faf7388e21ba77316d1817f66e337d6f89315 100644 (file)
@@ -150,7 +150,7 @@ config SUN3_82586
 
 config ZNET
        tristate "Zenith Z-Note support (EXPERIMENTAL)"
-       depends on EXPERIMENTAL && ISA_DMA_API
+       depends on EXPERIMENTAL && ISA_DMA_API && X86
        ---help---
          The Zenith Z-Note notebook computer has a built-in network
          (Ethernet) card, and this is the Linux driver for it. Note that the
index ba4e0cea3506f80da5cc36a69f22994a7a3e470e..c9479e081b8aa0507a980d6bb1bc11af1d1adbc0 100644 (file)
@@ -865,14 +865,14 @@ static void hardware_init(struct net_device *dev)
        disable_dma(znet->rx_dma);              /* reset by an interrupting task. */
        clear_dma_ff(znet->rx_dma);
        set_dma_mode(znet->rx_dma, DMA_RX_MODE);
-       set_dma_addr(znet->rx_dma, (unsigned int) znet->rx_start);
+       set_dma_addr(znet->rx_dma, isa_virt_to_bus(znet->rx_start));
        set_dma_count(znet->rx_dma, RX_BUF_SIZE);
        enable_dma(znet->rx_dma);
        /* Now set up the Tx channel. */
        disable_dma(znet->tx_dma);
        clear_dma_ff(znet->tx_dma);
        set_dma_mode(znet->tx_dma, DMA_TX_MODE);
-       set_dma_addr(znet->tx_dma, (unsigned int) znet->tx_start);
+       set_dma_addr(znet->tx_dma, isa_virt_to_bus(znet->tx_start));
        set_dma_count(znet->tx_dma, znet->tx_buf_len<<1);
        enable_dma(znet->tx_dma);
        release_dma_lock(flags);
index 736a7d987db599fb1f855cc74d89dcdc0d3ce640..9089d00f14216431b9bf33db13a51b35414184b9 100644 (file)
@@ -174,6 +174,20 @@ static int e1000_get_settings(struct net_device *netdev,
 
        ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
                         hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+       /* MDI-X => 1; MDI => 0 */
+       if ((hw->media_type == e1000_media_type_copper) &&
+           netif_carrier_ok(netdev))
+               ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ?
+                                                       ETH_TP_MDI_X :
+                                                       ETH_TP_MDI);
+       else
+               ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+
+       if (hw->mdix == AUTO_ALL_MODES)
+               ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+       else
+               ecmd->eth_tp_mdix_ctrl = hw->mdix;
        return 0;
 }
 
@@ -183,6 +197,22 @@ static int e1000_set_settings(struct net_device *netdev,
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 
+       /*
+        * MDI setting is only allowed when autoneg enabled because
+        * some hardware doesn't allow MDI setting when speed or
+        * duplex is forced.
+        */
+       if (ecmd->eth_tp_mdix_ctrl) {
+               if (hw->media_type != e1000_media_type_copper)
+                       return -EOPNOTSUPP;
+
+               if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+                   (ecmd->autoneg != AUTONEG_ENABLE)) {
+                       e_err(drv, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
+                       return -EINVAL;
+               }
+       }
+
        while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
                msleep(1);
 
@@ -199,12 +229,21 @@ static int e1000_set_settings(struct net_device *netdev,
                ecmd->advertising = hw->autoneg_advertised;
        } else {
                u32 speed = ethtool_cmd_speed(ecmd);
+               /* calling this overrides forced MDI setting */
                if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
                        clear_bit(__E1000_RESETTING, &adapter->flags);
                        return -EINVAL;
                }
        }
 
+       /* MDI-X => 2; MDI => 1; Auto => 3 */
+       if (ecmd->eth_tp_mdix_ctrl) {
+               if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+                       hw->mdix = AUTO_ALL_MODES;
+               else
+                       hw->mdix = ecmd->eth_tp_mdix_ctrl;
+       }
+
        /* reset the link */
 
        if (netif_running(adapter->netdev)) {
index f3f9aeb7d1e189f045b294a6716e27ce473133a0..222bfaff4622959df30eb7b89f25a2f7764dcc32 100644 (file)
@@ -2014,6 +2014,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
                e1000_unmap_and_free_tx_resource(adapter, buffer_info);
        }
 
+       netdev_reset_queue(adapter->netdev);
        size = sizeof(struct e1000_buffer) * tx_ring->count;
        memset(tx_ring->buffer_info, 0, size);
 
@@ -3273,6 +3274,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                             nr_frags, mss);
 
        if (count) {
+               netdev_sent_queue(netdev, skb->len);
                skb_tx_timestamp(skb);
 
                e1000_tx_queue(adapter, tx_ring, tx_flags, count);
@@ -3860,6 +3862,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
        unsigned int i, eop;
        unsigned int count = 0;
        unsigned int total_tx_bytes=0, total_tx_packets=0;
+       unsigned int bytes_compl = 0, pkts_compl = 0;
 
        i = tx_ring->next_to_clean;
        eop = tx_ring->buffer_info[i].next_to_watch;
@@ -3877,6 +3880,11 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
                        if (cleaned) {
                                total_tx_packets += buffer_info->segs;
                                total_tx_bytes += buffer_info->bytecount;
+                               if (buffer_info->skb) {
+                                       bytes_compl += buffer_info->skb->len;
+                                       pkts_compl++;
+                               }
+
                        }
                        e1000_unmap_and_free_tx_resource(adapter, buffer_info);
                        tx_desc->upper.data = 0;
@@ -3890,6 +3898,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
 
        tx_ring->next_to_clean = i;
 
+       netdev_completed_queue(netdev, pkts_compl, bytes_compl);
+
 #define TX_WAKE_THRESHOLD 32
        if (unlikely(count && netif_carrier_ok(netdev) &&
                     E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
@@ -4950,6 +4960,10 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
        default:
                goto err_inval;
        }
+
+       /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
+       hw->mdix = AUTO_ALL_MODES;
+
        return 0;
 
 err_inval:
index 080c89093feb5df6393c6d8f58421ff254f2b05d..c98586408005a0dbbb557e4ae22fc3f49ba04a48 100644 (file)
@@ -653,7 +653,7 @@ static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
  **/
 static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
 {
-       u16 data = er32(POEMB);
+       u32 data = er32(POEMB);
 
        if (active)
                data |= E1000_PHY_CTRL_D0A_LPLU;
@@ -677,7 +677,7 @@ static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
  **/
 static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
 {
-       u16 data = er32(POEMB);
+       u32 data = er32(POEMB);
 
        if (!active) {
                data &= ~E1000_PHY_CTRL_NOND0A_LPLU;
index 0349e2478df8f294b04c4c54fc036cc627312afe..c11ac2756667bbf7205be4b5d1c903b00e7c3e4b 100644 (file)
@@ -199,6 +199,11 @@ static int e1000_get_settings(struct net_device *netdev,
        else
                ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
 
+       if (hw->phy.mdix == AUTO_ALL_MODES)
+               ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+       else
+               ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+
        return 0;
 }
 
@@ -241,6 +246,10 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
        default:
                goto err_inval;
        }
+
+       /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
+       adapter->hw.phy.mdix = AUTO_ALL_MODES;
+
        return 0;
 
 err_inval:
@@ -264,6 +273,22 @@ static int e1000_set_settings(struct net_device *netdev,
                return -EINVAL;
        }
 
+       /*
+        * MDI setting is only allowed when autoneg enabled because
+        * some hardware doesn't allow MDI setting when speed or
+        * duplex is forced.
+        */
+       if (ecmd->eth_tp_mdix_ctrl) {
+               if (hw->phy.media_type != e1000_media_type_copper)
+                       return -EOPNOTSUPP;
+
+               if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+                   (ecmd->autoneg != AUTONEG_ENABLE)) {
+                       e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
+                       return -EINVAL;
+               }
+       }
+
        while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
                usleep_range(1000, 2000);
 
@@ -282,20 +307,32 @@ static int e1000_set_settings(struct net_device *netdev,
                        hw->fc.requested_mode = e1000_fc_default;
        } else {
                u32 speed = ethtool_cmd_speed(ecmd);
+               /* calling this overrides forced MDI setting */
                if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
                        clear_bit(__E1000_RESETTING, &adapter->state);
                        return -EINVAL;
                }
        }
 
+       /* MDI-X => 2; MDI => 1; Auto => 3 */
+       if (ecmd->eth_tp_mdix_ctrl) {
+               /*
+                * fix up the value for auto (3 => 0) as zero is mapped
+                * internally to auto
+                */
+               if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+                       hw->phy.mdix = AUTO_ALL_MODES;
+               else
+                       hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
+       }
+
        /* reset the link */
 
        if (netif_running(adapter->netdev)) {
                e1000e_down(adapter);
                e1000e_up(adapter);
-       } else {
+       } else
                e1000e_reset(adapter);
-       }
 
        clear_bit(__E1000_RESETTING, &adapter->state);
        return 0;
@@ -1905,7 +1942,8 @@ static int e1000_set_coalesce(struct net_device *netdev,
                return -EINVAL;
 
        if (ec->rx_coalesce_usecs == 4) {
-               adapter->itr = adapter->itr_setting = 4;
+               adapter->itr_setting = 4;
+               adapter->itr = adapter->itr_setting;
        } else if (ec->rx_coalesce_usecs <= 3) {
                adapter->itr = 20000;
                adapter->itr_setting = ec->rx_coalesce_usecs;
index 3f0223ac4c7c2152ca8b125ec582303f3546d980..fb659dd8db038941842e9eb937b94eed8640d98e 100644 (file)
@@ -56,7 +56,7 @@
 
 #define DRV_EXTRAVERSION "-k"
 
-#define DRV_VERSION "2.0.0" DRV_EXTRAVERSION
+#define DRV_VERSION "2.1.4" DRV_EXTRAVERSION
 char e1000e_driver_name[] = "e1000e";
 const char e1000e_driver_version[] = DRV_VERSION;
 
@@ -3446,7 +3446,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
 
                        /*
                         * if short on Rx space, Rx wins and must trump Tx
-                        * adjustment or use Early Receive if available
+                        * adjustment
                         */
                        if (pba < min_rx_space)
                                pba = min_rx_space;
@@ -3755,6 +3755,10 @@ static irqreturn_t e1000_intr_msi_test(int irq, void *data)
        e_dbg("icr is %08X\n", icr);
        if (icr & E1000_ICR_RXSEQ) {
                adapter->flags &= ~FLAG_MSI_TEST_FAILED;
+               /*
+                * Force memory writes to complete before acknowledging the
+                * interrupt is handled.
+                */
                wmb();
        }
 
@@ -3796,6 +3800,10 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
                goto msi_test_failed;
        }
 
+       /*
+        * Force memory writes to complete before enabling and firing an
+        * interrupt.
+        */
        wmb();
 
        e1000_irq_enable(adapter);
@@ -3807,7 +3815,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
 
        e1000_irq_disable(adapter);
 
-       rmb();
+       rmb();                  /* read flags after interrupt has been fired */
 
        if (adapter->flags & FLAG_MSI_TEST_FAILED) {
                adapter->int_mode = E1000E_INT_MODE_LEGACY;
@@ -4670,7 +4678,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
        struct e1000_buffer *buffer_info;
        unsigned int i;
        u32 cmd_length = 0;
-       u16 ipcse = 0, tucse, mss;
+       u16 ipcse = 0, mss;
        u8 ipcss, ipcso, tucss, tucso, hdr_len;
 
        if (!skb_is_gso(skb))
@@ -4704,7 +4712,6 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
        ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
        tucss = skb_transport_offset(skb);
        tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
-       tucse = 0;
 
        cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
                       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
@@ -4718,7 +4725,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
        context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
        context_desc->upper_setup.tcp_fields.tucss = tucss;
        context_desc->upper_setup.tcp_fields.tucso = tucso;
-       context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
+       context_desc->upper_setup.tcp_fields.tucse = 0;
        context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
        context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
        context_desc->cmd_and_length = cpu_to_le32(cmd_length);
index b860d4f7ea2a950a7b24d0db8ca6f15446f1bfd3..fc62a3f3a5bec8b3e7ce0fba893ae50add89f22a 100644 (file)
@@ -84,8 +84,9 @@ static const u16 e1000_igp_2_cable_length_table[] = {
 #define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
 
 /* I82577 PHY Control 2 */
-#define I82577_PHY_CTRL2_AUTO_MDIX        0x0400
-#define I82577_PHY_CTRL2_FORCE_MDI_MDIX   0x0200
+#define I82577_PHY_CTRL2_MANUAL_MDIX      0x0200
+#define I82577_PHY_CTRL2_AUTO_MDI_MDIX    0x0400
+#define I82577_PHY_CTRL2_MDIX_CFG_MASK    0x0600
 
 /* I82577 PHY Diagnostics Status */
 #define I82577_DSTATUS_CABLE_LENGTH       0x03FC
@@ -702,6 +703,32 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
        if (ret_val)
                return ret_val;
 
+       /* Set MDI/MDIX mode */
+       ret_val = e1e_rphy(hw, I82577_PHY_CTRL_2, &phy_data);
+       if (ret_val)
+               return ret_val;
+       phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK;
+       /*
+        * Options:
+        *   0 - Auto (default)
+        *   1 - MDI mode
+        *   2 - MDI-X mode
+        */
+       switch (hw->phy.mdix) {
+       case 1:
+               break;
+       case 2:
+               phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX;
+               break;
+       case 0:
+       default:
+               phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX;
+               break;
+       }
+       ret_val = e1e_wphy(hw, I82577_PHY_CTRL_2, phy_data);
+       if (ret_val)
+               return ret_val;
+
        return e1000_set_master_slave_mode(hw);
 }
 
index ba994fb4cec69bc60baaff7c9407faf9553d40be..ca4641e2f74870c1bac8c147ad231b19ba660e12 100644 (file)
@@ -2223,11 +2223,10 @@ out:
 s32 igb_set_eee_i350(struct e1000_hw *hw)
 {
        s32 ret_val = 0;
-       u32 ipcnfg, eeer, ctrl_ext;
+       u32 ipcnfg, eeer;
 
-       ctrl_ext = rd32(E1000_CTRL_EXT);
-       if ((hw->mac.type != e1000_i350) ||
-           (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK))
+       if ((hw->mac.type < e1000_i350) ||
+           (hw->phy.media_type != e1000_media_type_copper))
                goto out;
        ipcnfg = rd32(E1000_IPCNFG);
        eeer = rd32(E1000_EEER);
@@ -2240,6 +2239,14 @@ s32 igb_set_eee_i350(struct e1000_hw *hw)
                        E1000_EEER_RX_LPI_EN |
                        E1000_EEER_LPI_FC);
 
+               /* keep the LPI clock running before EEE is enabled */
+               if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
+                       u32 eee_su;
+                       eee_su = rd32(E1000_EEE_SU);
+                       eee_su &= ~E1000_EEE_SU_LPI_CLK_STP;
+                       wr32(E1000_EEE_SU, eee_su);
+               }
+
        } else {
                ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
                        E1000_IPCNFG_EEE_100M_AN);
@@ -2249,6 +2256,8 @@ s32 igb_set_eee_i350(struct e1000_hw *hw)
        }
        wr32(E1000_IPCNFG, ipcnfg);
        wr32(E1000_EEER, eeer);
+       rd32(E1000_IPCNFG);
+       rd32(E1000_EEER);
 out:
 
        return ret_val;
index ec7e4fe3e3ee24d240880aeb6cee95eec215cd12..de4b41ec3c402da0829357edc5f77c1821369a5e 100644 (file)
 #define E1000_FCRTC_RTH_COAL_SHIFT      4
 #define E1000_PCIEMISC_LX_DECISION      0x00000080 /* Lx power decision */
 
+/* Timestamp in Rx buffer */
+#define E1000_RXPBS_CFG_TS_EN           0x80000000
+
 /* SerDes Control */
 #define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
 
 #define E1000_ICR_RXDMT0        0x00000010 /* rx desc min. threshold (0) */
 #define E1000_ICR_RXT0          0x00000080 /* rx timer intr (ring 0) */
 #define E1000_ICR_VMMB          0x00000100 /* VM MB event */
+#define E1000_ICR_TS            0x00080000 /* Time Sync Interrupt */
 #define E1000_ICR_DRSTA         0x40000000 /* Device Reset Asserted */
 /* If this bit asserted, the driver should claim the interrupt */
 #define E1000_ICR_INT_ASSERTED  0x80000000
 #define E1000_IMS_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
 #define E1000_IMS_LSC       E1000_ICR_LSC       /* Link Status Change */
 #define E1000_IMS_VMMB      E1000_ICR_VMMB      /* Mail box activity */
+#define E1000_IMS_TS        E1000_ICR_TS        /* Time Sync Interrupt */
 #define E1000_IMS_RXSEQ     E1000_ICR_RXSEQ     /* rx sequence error */
 #define E1000_IMS_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
 #define E1000_IMS_RXT0      E1000_ICR_RXT0      /* rx timer intr */
 
 #define E1000_TIMINCA_16NS_SHIFT 24
 
+#define E1000_TSICR_TXTS 0x00000002
+#define E1000_TSIM_TXTS 0x00000002
+
 #define E1000_MDICNFG_EXT_MDIO    0x80000000      /* MDI ext/int destination */
 #define E1000_MDICNFG_COM_MDIO    0x40000000      /* MDI shared w/ lan 0 */
 #define E1000_MDICNFG_PHY_MASK    0x03E00000
 #define E1000_IPCNFG_EEE_100M_AN     0x00000004  /* EEE Enable 100M AN */
 #define E1000_EEER_TX_LPI_EN         0x00010000  /* EEE Tx LPI Enable */
 #define E1000_EEER_RX_LPI_EN         0x00020000  /* EEE Rx LPI Enable */
-#define E1000_EEER_FRC_AN            0x10000000 /* Enable EEE in loopback */
+#define E1000_EEER_FRC_AN            0x10000000  /* Enable EEE in loopback */
 #define E1000_EEER_LPI_FC            0x00040000  /* EEE Enable on FC */
+#define E1000_EEE_SU_LPI_CLK_STP     0X00800000  /* EEE LPI Clock Stop */
 
 /* SerDes Control */
 #define E1000_GEN_CTL_READY             0x80000000
index 7be98b6f105235f85446972378cfd24badfdf5f0..3404bc79f4cadf76382c5dbfca62b4d4ebe1f693 100644 (file)
@@ -464,6 +464,32 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw)
        phy_data |= I82580_CFG_ENABLE_DOWNSHIFT;
 
        ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data);
+       if (ret_val)
+               goto out;
+
+       /* Set MDI/MDIX mode */
+       ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data);
+       if (ret_val)
+               goto out;
+       phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK;
+       /*
+        * Options:
+        *   0 - Auto (default)
+        *   1 - MDI mode
+        *   2 - MDI-X mode
+        */
+       switch (hw->phy.mdix) {
+       case 1:
+               break;
+       case 2:
+               phy_data |= I82580_PHY_CTRL2_MANUAL_MDIX;
+               break;
+       case 0:
+       default:
+               phy_data |= I82580_PHY_CTRL2_AUTO_MDI_MDIX;
+               break;
+       }
+       ret_val = hw->phy.ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data);
 
 out:
        return ret_val;
@@ -2246,8 +2272,7 @@ s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)
        if (ret_val)
                goto out;
 
-       phy_data &= ~I82580_PHY_CTRL2_AUTO_MDIX;
-       phy_data &= ~I82580_PHY_CTRL2_FORCE_MDI_MDIX;
+       phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK;
 
        ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data);
        if (ret_val)
index 34e40619f16b908618b35b9708054d91b3a749f9..6ac3299bfcb9fefe23845b294a72e2119563ec36 100644 (file)
@@ -111,8 +111,9 @@ s32  igb_check_polarity_m88(struct e1000_hw *hw);
 #define I82580_PHY_STATUS2_SPEED_100MBPS  0x0100
 
 /* I82580 PHY Control 2 */
-#define I82580_PHY_CTRL2_AUTO_MDIX        0x0400
-#define I82580_PHY_CTRL2_FORCE_MDI_MDIX   0x0200
+#define I82580_PHY_CTRL2_MANUAL_MDIX      0x0200
+#define I82580_PHY_CTRL2_AUTO_MDI_MDIX    0x0400
+#define I82580_PHY_CTRL2_MDIX_CFG_MASK    0x0600
 
 /* I82580 PHY Diagnostics Status */
 #define I82580_DSTATUS_CABLE_LENGTH       0x03FC
index 28394bea5253fc280e1aba973b20958ed1b9662f..e5db48594e8a929daab778dca1fe7c9b92f0e7fa 100644 (file)
@@ -91,6 +91,8 @@
 #define E1000_TIMINCA    0x0B608 /* Increment attributes register - RW */
 #define E1000_TSAUXC     0x0B640 /* Timesync Auxiliary Control register */
 #define E1000_SYSTIMR    0x0B6F8 /* System time register Residue */
+#define E1000_TSICR      0x0B66C /* Interrupt Cause Register */
+#define E1000_TSIM       0x0B674 /* Interrupt Mask Register */
 
 /* Filtering Registers */
 #define E1000_SAQF(_n) (0x5980 + 4 * (_n))
 /* Energy Efficient Ethernet "EEE" register */
 #define E1000_IPCNFG  0x0E38  /* Internal PHY Configuration */
 #define E1000_EEER    0x0E30  /* Energy Efficient Ethernet */
+#define E1000_EEE_SU  0X0E34  /* EEE Setup */
 
 /* Thermal Sensor Register */
 #define E1000_THSTAT    0x08110 /* Thermal Sensor Status */
index 9e572dd29ab288e98bd72c4859f16fbf839c9c3b..8aad230c0592e03f5f3b753f636f0a38dd35d4a3 100644 (file)
 #include "e1000_mac.h"
 #include "e1000_82575.h"
 
+#ifdef CONFIG_IGB_PTP
 #include <linux/clocksource.h>
 #include <linux/net_tstamp.h>
 #include <linux/ptp_clock_kernel.h>
+#endif /* CONFIG_IGB_PTP */
 #include <linux/bitops.h>
 #include <linux/if_vlan.h>
 
@@ -99,7 +101,6 @@ struct vf_data_storage {
        u16 pf_vlan; /* When set, guest VLAN config not allowed. */
        u16 pf_qos;
        u16 tx_rate;
-       struct pci_dev *vfdev;
 };
 
 #define IGB_VF_FLAG_CTS            0x00000001 /* VF is clear to send data */
@@ -131,9 +132,9 @@ struct vf_data_storage {
 #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
 
 /* Supported Rx Buffer Sizes */
-#define IGB_RXBUFFER_512   512
+#define IGB_RXBUFFER_256   256
 #define IGB_RXBUFFER_16384 16384
-#define IGB_RX_HDR_LEN     IGB_RXBUFFER_512
+#define IGB_RX_HDR_LEN     IGB_RXBUFFER_256
 
 /* How many Tx Descriptors do we need to call netif_wake_queue ? */
 #define IGB_TX_QUEUE_WAKE      16
@@ -167,8 +168,8 @@ struct igb_tx_buffer {
        unsigned int bytecount;
        u16 gso_segs;
        __be16 protocol;
-       dma_addr_t dma;
-       u32 length;
+       DEFINE_DMA_UNMAP_ADDR(dma);
+       DEFINE_DMA_UNMAP_LEN(len);
        u32 tx_flags;
 };
 
@@ -212,7 +213,6 @@ struct igb_q_vector {
        struct igb_ring_container rx, tx;
 
        struct napi_struct napi;
-       int numa_node;
 
        u16 itr_val;
        u8 set_itr;
@@ -257,7 +257,6 @@ struct igb_ring {
        };
        /* Items past this point are only used during ring alloc / free */
        dma_addr_t dma;                /* phys address of the ring */
-       int numa_node;                  /* node to alloc ring memory on */
 };
 
 enum e1000_ring_flags_t {
@@ -342,7 +341,6 @@ struct igb_adapter {
 
        /* OS defined structs */
        struct pci_dev *pdev;
-       struct hwtstamp_config hwtstamp_config;
 
        spinlock_t stats64_lock;
        struct rtnl_link_stats64 stats64;
@@ -373,15 +371,19 @@ struct igb_adapter {
        int vf_rate_link_speed;
        u32 rss_queues;
        u32 wvbr;
-       int node;
        u32 *shadow_vfta;
 
+#ifdef CONFIG_IGB_PTP
        struct ptp_clock *ptp_clock;
-       struct ptp_clock_info caps;
-       struct delayed_work overflow_work;
+       struct ptp_clock_info ptp_caps;
+       struct delayed_work ptp_overflow_work;
+       struct work_struct ptp_tx_work;
+       struct sk_buff *ptp_tx_skb;
        spinlock_t tmreg_lock;
        struct cyclecounter cc;
        struct timecounter tc;
+#endif /* CONFIG_IGB_PTP */
+
        char fw_version[32];
 };
 
@@ -390,6 +392,7 @@ struct igb_adapter {
 #define IGB_FLAG_QUAD_PORT_A       (1 << 2)
 #define IGB_FLAG_QUEUE_PAIRS       (1 << 3)
 #define IGB_FLAG_DMAC              (1 << 4)
+#define IGB_FLAG_PTP               (1 << 5)
 
 /* DMA Coalescing defines */
 #define IGB_MIN_TXPBSIZE           20408
@@ -435,13 +438,17 @@ extern void igb_power_up_link(struct igb_adapter *);
 extern void igb_set_fw_version(struct igb_adapter *);
 #ifdef CONFIG_IGB_PTP
 extern void igb_ptp_init(struct igb_adapter *adapter);
-extern void igb_ptp_remove(struct igb_adapter *adapter);
-
-extern void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
-                                  struct skb_shared_hwtstamps *hwtstamps,
-                                  u64 systim);
+extern void igb_ptp_stop(struct igb_adapter *adapter);
+extern void igb_ptp_reset(struct igb_adapter *adapter);
+extern void igb_ptp_tx_work(struct work_struct *work);
+extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
+extern void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
+                               union e1000_adv_rx_desc *rx_desc,
+                               struct sk_buff *skb);
+extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
+                                 struct ifreq *ifr, int cmd);
+#endif /* CONFIG_IGB_PTP */
 
-#endif
 static inline s32 igb_reset_phy(struct e1000_hw *hw)
 {
        if (hw->phy.ops.reset)
index 70591117051bf2faa446af5a3b81055f091d491d..2ea012849825224af910ba7189aa850b58610806 100644 (file)
@@ -148,9 +148,9 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
                                   SUPPORTED_100baseT_Full |
                                   SUPPORTED_1000baseT_Full|
                                   SUPPORTED_Autoneg |
-                                  SUPPORTED_TP);
-               ecmd->advertising = (ADVERTISED_TP |
-                                    ADVERTISED_Pause);
+                                  SUPPORTED_TP |
+                                  SUPPORTED_Pause);
+               ecmd->advertising = ADVERTISED_TP;
 
                if (hw->mac.autoneg == 1) {
                        ecmd->advertising |= ADVERTISED_Autoneg;
@@ -158,6 +158,21 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
                        ecmd->advertising |= hw->phy.autoneg_advertised;
                }
 
+               if (hw->mac.autoneg != 1)
+                       ecmd->advertising &= ~(ADVERTISED_Pause |
+                                              ADVERTISED_Asym_Pause);
+
+               if (hw->fc.requested_mode == e1000_fc_full)
+                       ecmd->advertising |= ADVERTISED_Pause;
+               else if (hw->fc.requested_mode == e1000_fc_rx_pause)
+                       ecmd->advertising |= (ADVERTISED_Pause |
+                                             ADVERTISED_Asym_Pause);
+               else if (hw->fc.requested_mode == e1000_fc_tx_pause)
+                       ecmd->advertising |=  ADVERTISED_Asym_Pause;
+               else
+                       ecmd->advertising &= ~(ADVERTISED_Pause |
+                                              ADVERTISED_Asym_Pause);
+
                ecmd->port = PORT_TP;
                ecmd->phy_address = hw->phy.addr;
        } else {
@@ -198,6 +213,19 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
        }
 
        ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+       /* MDI-X => 2; MDI =>1; Invalid =>0 */
+       if (hw->phy.media_type == e1000_media_type_copper)
+               ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
+                                                     ETH_TP_MDI;
+       else
+               ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+
+       if (hw->phy.mdix == AUTO_ALL_MODES)
+               ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+       else
+               ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+
        return 0;
 }
 
@@ -214,6 +242,22 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
                return -EINVAL;
        }
 
+       /*
+        * MDI setting is only allowed when autoneg enabled because
+        * some hardware doesn't allow MDI setting when speed or
+        * duplex is forced.
+        */
+       if (ecmd->eth_tp_mdix_ctrl) {
+               if (hw->phy.media_type != e1000_media_type_copper)
+                       return -EOPNOTSUPP;
+
+               if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+                   (ecmd->autoneg != AUTONEG_ENABLE)) {
+                       dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
+                       return -EINVAL;
+               }
+       }
+
        while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
                msleep(1);
 
@@ -227,12 +271,25 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
                        hw->fc.requested_mode = e1000_fc_default;
        } else {
                u32 speed = ethtool_cmd_speed(ecmd);
+               /* calling this overrides forced MDI setting */
                if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) {
                        clear_bit(__IGB_RESETTING, &adapter->state);
                        return -EINVAL;
                }
        }
 
+       /* MDI-X => 2; MDI => 1; Auto => 3 */
+       if (ecmd->eth_tp_mdix_ctrl) {
+               /*
+                * fix up the value for auto (3 => 0) as zero is mapped
+                * internally to auto
+                */
+               if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+                       hw->phy.mdix = AUTO_ALL_MODES;
+               else
+                       hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
+       }
+
        /* reset the link */
        if (netif_running(adapter->netdev)) {
                igb_down(adapter);
@@ -1469,33 +1526,22 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
        u32 ctrl_reg = 0;
-       u16 phy_reg = 0;
 
        hw->mac.autoneg = false;
 
-       switch (hw->phy.type) {
-       case e1000_phy_m88:
-               /* Auto-MDI/MDIX Off */
-               igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
-               /* reset to update Auto-MDI/MDIX */
-               igb_write_phy_reg(hw, PHY_CONTROL, 0x9140);
-               /* autoneg off */
-               igb_write_phy_reg(hw, PHY_CONTROL, 0x8140);
-               break;
-       case e1000_phy_82580:
-               /* enable MII loopback */
-               igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041);
-               break;
-       case e1000_phy_i210:
-               /* set loopback speed in PHY */
-               igb_read_phy_reg(hw, (GS40G_PAGE_SELECT & GS40G_PAGE_2),
-                                       &phy_reg);
-               phy_reg |= GS40G_MAC_SPEED_1G;
-               igb_write_phy_reg(hw, (GS40G_PAGE_SELECT & GS40G_PAGE_2),
-                                       phy_reg);
-               ctrl_reg = rd32(E1000_CTRL_EXT);
-       default:
-               break;
+       if (hw->phy.type == e1000_phy_m88) {
+               if (hw->phy.id != I210_I_PHY_ID) {
+                       /* Auto-MDI/MDIX Off */
+                       igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
+                       /* reset to update Auto-MDI/MDIX */
+                       igb_write_phy_reg(hw, PHY_CONTROL, 0x9140);
+                       /* autoneg off */
+                       igb_write_phy_reg(hw, PHY_CONTROL, 0x8140);
+               } else {
+                       /* force 1000, set loopback  */
+                       igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
+                       igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
+               }
        }
 
        /* add small delay to avoid loopback test failure */
@@ -1513,7 +1559,7 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
                     E1000_CTRL_FD |     /* Force Duplex to FULL */
                     E1000_CTRL_SLU);    /* Set link up enable bit */
 
-       if ((hw->phy.type == e1000_phy_m88) || (hw->phy.type == e1000_phy_i210))
+       if (hw->phy.type == e1000_phy_m88)
                ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
 
        wr32(E1000_CTRL, ctrl_reg);
@@ -1521,11 +1567,10 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
        /* Disable the receiver on the PHY so when a cable is plugged in, the
         * PHY does not begin to autoneg when a cable is reconnected to the NIC.
         */
-       if ((hw->phy.type == e1000_phy_m88) || (hw->phy.type == e1000_phy_i210))
+       if (hw->phy.type == e1000_phy_m88)
                igb_phy_disable_receiver(adapter);
 
-       udelay(500);
-
+       mdelay(500);
        return 0;
 }
 
@@ -1785,13 +1830,6 @@ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
                *data = 0;
                goto out;
        }
-       if ((adapter->hw.mac.type == e1000_i210)
-               || (adapter->hw.mac.type == e1000_i211)) {
-               dev_err(&adapter->pdev->dev,
-                       "Loopback test not supported on this part at this time.\n");
-               *data = 0;
-               goto out;
-       }
        *data = igb_setup_desc_rings(adapter);
        if (*data)
                goto out;
@@ -2257,6 +2295,54 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
        }
 }
 
+static int igb_get_ts_info(struct net_device *dev,
+                          struct ethtool_ts_info *info)
+{
+       struct igb_adapter *adapter = netdev_priv(dev);
+
+       switch (adapter->hw.mac.type) {
+#ifdef CONFIG_IGB_PTP
+       case e1000_82576:
+       case e1000_82580:
+       case e1000_i350:
+       case e1000_i210:
+       case e1000_i211:
+               info->so_timestamping =
+                       SOF_TIMESTAMPING_TX_HARDWARE |
+                       SOF_TIMESTAMPING_RX_HARDWARE |
+                       SOF_TIMESTAMPING_RAW_HARDWARE;
+
+               if (adapter->ptp_clock)
+                       info->phc_index = ptp_clock_index(adapter->ptp_clock);
+               else
+                       info->phc_index = -1;
+
+               info->tx_types =
+                       (1 << HWTSTAMP_TX_OFF) |
+                       (1 << HWTSTAMP_TX_ON);
+
+               info->rx_filters = 1 << HWTSTAMP_FILTER_NONE;
+
+               /* 82576 does not support timestamping all packets. */
+               if (adapter->hw.mac.type >= e1000_82580)
+                       info->rx_filters |= 1 << HWTSTAMP_FILTER_ALL;
+               else
+                       info->rx_filters |=
+                               (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+                               (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+                               (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+                               (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+                               (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+                               (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+                               (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+               return 0;
+#endif /* CONFIG_IGB_PTP */
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
 static int igb_ethtool_begin(struct net_device *netdev)
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
@@ -2270,38 +2356,6 @@ static void igb_ethtool_complete(struct net_device *netdev)
        pm_runtime_put(&adapter->pdev->dev);
 }
 
-#ifdef CONFIG_IGB_PTP
-static int igb_ethtool_get_ts_info(struct net_device *dev,
-                                  struct ethtool_ts_info *info)
-{
-       struct igb_adapter *adapter = netdev_priv(dev);
-
-       info->so_timestamping =
-               SOF_TIMESTAMPING_TX_HARDWARE |
-               SOF_TIMESTAMPING_RX_HARDWARE |
-               SOF_TIMESTAMPING_RAW_HARDWARE;
-
-       if (adapter->ptp_clock)
-               info->phc_index = ptp_clock_index(adapter->ptp_clock);
-       else
-               info->phc_index = -1;
-
-       info->tx_types =
-               (1 << HWTSTAMP_TX_OFF) |
-               (1 << HWTSTAMP_TX_ON);
-
-       info->rx_filters =
-               (1 << HWTSTAMP_FILTER_NONE) |
-               (1 << HWTSTAMP_FILTER_ALL) |
-               (1 << HWTSTAMP_FILTER_SOME) |
-               (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
-               (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
-               (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
-
-       return 0;
-}
-
-#endif
 static const struct ethtool_ops igb_ethtool_ops = {
        .get_settings           = igb_get_settings,
        .set_settings           = igb_set_settings,
@@ -2328,11 +2382,9 @@ static const struct ethtool_ops igb_ethtool_ops = {
        .get_ethtool_stats      = igb_get_ethtool_stats,
        .get_coalesce           = igb_get_coalesce,
        .set_coalesce           = igb_set_coalesce,
+       .get_ts_info            = igb_get_ts_info,
        .begin                  = igb_ethtool_begin,
        .complete               = igb_ethtool_complete,
-#ifdef CONFIG_IGB_PTP
-       .get_ts_info            = igb_ethtool_get_ts_info,
-#endif
 };
 
 void igb_set_ethtool_ops(struct net_device *netdev)
index f88c822e57a6d3b3a9f54ac3659ecf1852f36b28..e1ceb37ef12e406fd7ff6dff13b08c18548fcea6 100644 (file)
@@ -172,8 +172,7 @@ static void igb_check_vf_rate_limit(struct igb_adapter *);
 
 #ifdef CONFIG_PCI_IOV
 static int igb_vf_configure(struct igb_adapter *adapter, int vf);
-static int igb_find_enabled_vfs(struct igb_adapter *adapter);
-static int igb_check_vf_assignment(struct igb_adapter *adapter);
+static bool igb_vfs_are_assigned(struct igb_adapter *adapter);
 #endif
 
 #ifdef CONFIG_PM
@@ -404,8 +403,8 @@ static void igb_dump(struct igb_adapter *adapter)
                buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
                pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
                        n, tx_ring->next_to_use, tx_ring->next_to_clean,
-                       (u64)buffer_info->dma,
-                       buffer_info->length,
+                       (u64)dma_unmap_addr(buffer_info, dma),
+                       dma_unmap_len(buffer_info, len),
                        buffer_info->next_to_watch,
                        (u64)buffer_info->time_stamp);
        }
@@ -456,8 +455,8 @@ static void igb_dump(struct igb_adapter *adapter)
                                " %04X  %p %016llX %p%s\n", i,
                                le64_to_cpu(u0->a),
                                le64_to_cpu(u0->b),
-                               (u64)buffer_info->dma,
-                               buffer_info->length,
+                               (u64)dma_unmap_addr(buffer_info, dma),
+                               dma_unmap_len(buffer_info, len),
                                buffer_info->next_to_watch,
                                (u64)buffer_info->time_stamp,
                                buffer_info->skb, next_desc);
@@ -466,7 +465,8 @@ static void igb_dump(struct igb_adapter *adapter)
                                print_hex_dump(KERN_INFO, "",
                                        DUMP_PREFIX_ADDRESS,
                                        16, 1, buffer_info->skb->data,
-                                       buffer_info->length, true);
+                                       dma_unmap_len(buffer_info, len),
+                                       true);
                }
        }
 
@@ -683,52 +683,29 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
 {
        struct igb_ring *ring;
        int i;
-       int orig_node = adapter->node;
 
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               if (orig_node == -1) {
-                       int cur_node = next_online_node(adapter->node);
-                       if (cur_node == MAX_NUMNODES)
-                               cur_node = first_online_node;
-                       adapter->node = cur_node;
-               }
-               ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
-                                   adapter->node);
-               if (!ring)
-                       ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
+               ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
                if (!ring)
                        goto err;
                ring->count = adapter->tx_ring_count;
                ring->queue_index = i;
                ring->dev = &adapter->pdev->dev;
                ring->netdev = adapter->netdev;
-               ring->numa_node = adapter->node;
                /* For 82575, context index must be unique per ring. */
                if (adapter->hw.mac.type == e1000_82575)
                        set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
                adapter->tx_ring[i] = ring;
        }
-       /* Restore the adapter's original node */
-       adapter->node = orig_node;
 
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               if (orig_node == -1) {
-                       int cur_node = next_online_node(adapter->node);
-                       if (cur_node == MAX_NUMNODES)
-                               cur_node = first_online_node;
-                       adapter->node = cur_node;
-               }
-               ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
-                                   adapter->node);
-               if (!ring)
-                       ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
+               ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
                if (!ring)
                        goto err;
                ring->count = adapter->rx_ring_count;
                ring->queue_index = i;
                ring->dev = &adapter->pdev->dev;
                ring->netdev = adapter->netdev;
-               ring->numa_node = adapter->node;
                /* set flag indicating ring supports SCTP checksum offload */
                if (adapter->hw.mac.type >= e1000_82576)
                        set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
@@ -742,16 +719,12 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
 
                adapter->rx_ring[i] = ring;
        }
-       /* Restore the adapter's original node */
-       adapter->node = orig_node;
 
        igb_cache_ring_register(adapter);
 
        return 0;
 
 err:
-       /* Restore the adapter's original node */
-       adapter->node = orig_node;
        igb_free_queues(adapter);
 
        return -ENOMEM;
@@ -1117,24 +1090,10 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
        struct igb_q_vector *q_vector;
        struct e1000_hw *hw = &adapter->hw;
        int v_idx;
-       int orig_node = adapter->node;
 
        for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
-               if ((adapter->num_q_vectors == (adapter->num_rx_queues +
-                                               adapter->num_tx_queues)) &&
-                   (adapter->num_rx_queues == v_idx))
-                       adapter->node = orig_node;
-               if (orig_node == -1) {
-                       int cur_node = next_online_node(adapter->node);
-                       if (cur_node == MAX_NUMNODES)
-                               cur_node = first_online_node;
-                       adapter->node = cur_node;
-               }
-               q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL,
-                                       adapter->node);
-               if (!q_vector)
-                       q_vector = kzalloc(sizeof(struct igb_q_vector),
-                                          GFP_KERNEL);
+               q_vector = kzalloc(sizeof(struct igb_q_vector),
+                                  GFP_KERNEL);
                if (!q_vector)
                        goto err_out;
                q_vector->adapter = adapter;
@@ -1143,14 +1102,10 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
                netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
                adapter->q_vector[v_idx] = q_vector;
        }
-       /* Restore the adapter's original node */
-       adapter->node = orig_node;
 
        return 0;
 
 err_out:
-       /* Restore the adapter's original node */
-       adapter->node = orig_node;
        igb_free_q_vectors(adapter);
        return -ENOMEM;
 }
@@ -1751,6 +1706,11 @@ void igb_reset(struct igb_adapter *adapter)
        /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
        wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
 
+#ifdef CONFIG_IGB_PTP
+       /* Re-enable PTP, where applicable. */
+       igb_ptp_reset(adapter);
+#endif /* CONFIG_IGB_PTP */
+
        igb_get_phy_info(hw);
 }
 
@@ -2180,11 +2140,12 @@ static int __devinit igb_probe(struct pci_dev *pdev,
        }
 
 #endif
+
 #ifdef CONFIG_IGB_PTP
        /* do hw tstamp init after resetting */
        igb_ptp_init(adapter);
+#endif /* CONFIG_IGB_PTP */
 
-#endif
        dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
        /* print bus type/speed/width info */
        dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
@@ -2259,9 +2220,9 @@ static void __devexit igb_remove(struct pci_dev *pdev)
 
        pm_runtime_get_noresume(&pdev->dev);
 #ifdef CONFIG_IGB_PTP
-       igb_ptp_remove(adapter);
+       igb_ptp_stop(adapter);
+#endif /* CONFIG_IGB_PTP */
 
-#endif
        /*
         * The watchdog timer may be rescheduled, so explicitly
         * disable watchdog from being rescheduled.
@@ -2294,11 +2255,11 @@ static void __devexit igb_remove(struct pci_dev *pdev)
        /* reclaim resources allocated to VFs */
        if (adapter->vf_data) {
                /* disable iov and allow time for transactions to clear */
-               if (!igb_check_vf_assignment(adapter)) {
+               if (igb_vfs_are_assigned(adapter)) {
+                       dev_info(&pdev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
+               } else {
                        pci_disable_sriov(pdev);
                        msleep(500);
-               } else {
-                       dev_info(&pdev->dev, "VF(s) assigned to guests!\n");
                }
 
                kfree(adapter->vf_data);
@@ -2338,7 +2299,7 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
 #ifdef CONFIG_PCI_IOV
        struct pci_dev *pdev = adapter->pdev;
        struct e1000_hw *hw = &adapter->hw;
-       int old_vfs = igb_find_enabled_vfs(adapter);
+       int old_vfs = pci_num_vf(adapter->pdev);
        int i;
 
        /* Virtualization features not supported on i210 family. */
@@ -2418,8 +2379,6 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
                                  VLAN_HLEN;
        adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
 
-       adapter->node = -1;
-
        spin_lock_init(&adapter->stats64_lock);
 #ifdef CONFIG_PCI_IOV
        switch (hw->mac.type) {
@@ -2666,13 +2625,11 @@ static int igb_close(struct net_device *netdev)
 int igb_setup_tx_resources(struct igb_ring *tx_ring)
 {
        struct device *dev = tx_ring->dev;
-       int orig_node = dev_to_node(dev);
        int size;
 
        size = sizeof(struct igb_tx_buffer) * tx_ring->count;
-       tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
-       if (!tx_ring->tx_buffer_info)
-               tx_ring->tx_buffer_info = vzalloc(size);
+
+       tx_ring->tx_buffer_info = vzalloc(size);
        if (!tx_ring->tx_buffer_info)
                goto err;
 
@@ -2680,18 +2637,10 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
        tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
        tx_ring->size = ALIGN(tx_ring->size, 4096);
 
-       set_dev_node(dev, tx_ring->numa_node);
        tx_ring->desc = dma_alloc_coherent(dev,
                                           tx_ring->size,
                                           &tx_ring->dma,
                                           GFP_KERNEL);
-       set_dev_node(dev, orig_node);
-       if (!tx_ring->desc)
-               tx_ring->desc = dma_alloc_coherent(dev,
-                                                  tx_ring->size,
-                                                  &tx_ring->dma,
-                                                  GFP_KERNEL);
-
        if (!tx_ring->desc)
                goto err;
 
@@ -2702,8 +2651,8 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
 
 err:
        vfree(tx_ring->tx_buffer_info);
-       dev_err(dev,
-               "Unable to allocate memory for the transmit descriptor ring\n");
+       tx_ring->tx_buffer_info = NULL;
+       dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
        return -ENOMEM;
 }
 
@@ -2820,34 +2769,23 @@ static void igb_configure_tx(struct igb_adapter *adapter)
 int igb_setup_rx_resources(struct igb_ring *rx_ring)
 {
        struct device *dev = rx_ring->dev;
-       int orig_node = dev_to_node(dev);
-       int size, desc_len;
+       int size;
 
        size = sizeof(struct igb_rx_buffer) * rx_ring->count;
-       rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
-       if (!rx_ring->rx_buffer_info)
-               rx_ring->rx_buffer_info = vzalloc(size);
+
+       rx_ring->rx_buffer_info = vzalloc(size);
        if (!rx_ring->rx_buffer_info)
                goto err;
 
-       desc_len = sizeof(union e1000_adv_rx_desc);
 
        /* Round up to nearest 4K */
-       rx_ring->size = rx_ring->count * desc_len;
+       rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
        rx_ring->size = ALIGN(rx_ring->size, 4096);
 
-       set_dev_node(dev, rx_ring->numa_node);
        rx_ring->desc = dma_alloc_coherent(dev,
                                           rx_ring->size,
                                           &rx_ring->dma,
                                           GFP_KERNEL);
-       set_dev_node(dev, orig_node);
-       if (!rx_ring->desc)
-               rx_ring->desc = dma_alloc_coherent(dev,
-                                                  rx_ring->size,
-                                                  &rx_ring->dma,
-                                                  GFP_KERNEL);
-
        if (!rx_ring->desc)
                goto err;
 
@@ -2859,8 +2797,7 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
 err:
        vfree(rx_ring->rx_buffer_info);
        rx_ring->rx_buffer_info = NULL;
-       dev_err(dev, "Unable to allocate memory for the receive descriptor"
-               " ring\n");
+       dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
        return -ENOMEM;
 }
 
@@ -2898,57 +2835,48 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
        u32 mrqc, rxcsum;
-       u32 j, num_rx_queues, shift = 0, shift2 = 0;
-       union e1000_reta {
-               u32 dword;
-               u8  bytes[4];
-       } reta;
-       static const u8 rsshash[40] = {
-               0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
-               0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
-               0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
-               0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
+       u32 j, num_rx_queues, shift = 0;
+       static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741,
+                                       0xB08FA343, 0xCB2BCAD0, 0xB4307BAE,
+                                       0xA32DCB77, 0x0CF23080, 0x3BB7426A,
+                                       0xFA01ACBE };
 
        /* Fill out hash function seeds */
-       for (j = 0; j < 10; j++) {
-               u32 rsskey = rsshash[(j * 4)];
-               rsskey |= rsshash[(j * 4) + 1] << 8;
-               rsskey |= rsshash[(j * 4) + 2] << 16;
-               rsskey |= rsshash[(j * 4) + 3] << 24;
-               array_wr32(E1000_RSSRK(0), j, rsskey);
-       }
+       for (j = 0; j < 10; j++)
+               wr32(E1000_RSSRK(j), rsskey[j]);
 
        num_rx_queues = adapter->rss_queues;
 
-       if (adapter->vfs_allocated_count) {
-               /* 82575 and 82576 supports 2 RSS queues for VMDq */
-               switch (hw->mac.type) {
-               case e1000_i350:
-               case e1000_82580:
-                       num_rx_queues = 1;
-                       shift = 0;
-                       break;
-               case e1000_82576:
+       switch (hw->mac.type) {
+       case e1000_82575:
+               shift = 6;
+               break;
+       case e1000_82576:
+               /* 82576 supports 2 RSS queues for SR-IOV */
+               if (adapter->vfs_allocated_count) {
                        shift = 3;
                        num_rx_queues = 2;
-                       break;
-               case e1000_82575:
-                       shift = 2;
-                       shift2 = 6;
-               default:
-                       break;
                }
-       } else {
-               if (hw->mac.type == e1000_82575)
-                       shift = 6;
+               break;
+       default:
+               break;
        }
 
-       for (j = 0; j < (32 * 4); j++) {
-               reta.bytes[j & 3] = (j % num_rx_queues) << shift;
-               if (shift2)
-                       reta.bytes[j & 3] |= num_rx_queues << shift2;
-               if ((j & 3) == 3)
-                       wr32(E1000_RETA(j >> 2), reta.dword);
+       /*
+        * Populate the indirection table 4 entries at a time.  To do this
+        * we are generating the results for n and n+2 and then interleaving
+        * those with the results with n+1 and n+3.
+        */
+       for (j = 0; j < 32; j++) {
+               /* first pass generates n and n+2 */
+               u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues;
+               u32 reta = (base & 0x07800780) >> (7 - shift);
+
+               /* second pass generates n+1 and n+3 */
+               base += 0x00010001 * num_rx_queues;
+               reta |= (base & 0x07800780) << (1 + shift);
+
+               wr32(E1000_RETA(j), reta);
        }
 
        /*
@@ -3184,8 +3112,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
        srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
 #endif
        srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+#ifdef CONFIG_IGB_PTP
        if (hw->mac.type >= e1000_82580)
                srrctl |= E1000_SRRCTL_TIMESTAMP;
+#endif /* CONFIG_IGB_PTP */
        /* Only set Drop Enable if we are supporting multiple queues */
        if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
                srrctl |= E1000_SRRCTL_DROP_EN;
@@ -3269,20 +3199,20 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
 {
        if (tx_buffer->skb) {
                dev_kfree_skb_any(tx_buffer->skb);
-               if (tx_buffer->dma)
+               if (dma_unmap_len(tx_buffer, len))
                        dma_unmap_single(ring->dev,
-                                        tx_buffer->dma,
-                                        tx_buffer->length,
+                                        dma_unmap_addr(tx_buffer, dma),
+                                        dma_unmap_len(tx_buffer, len),
                                         DMA_TO_DEVICE);
-       } else if (tx_buffer->dma) {
+       } else if (dma_unmap_len(tx_buffer, len)) {
                dma_unmap_page(ring->dev,
-                              tx_buffer->dma,
-                              tx_buffer->length,
+                              dma_unmap_addr(tx_buffer, dma),
+                              dma_unmap_len(tx_buffer, len),
                               DMA_TO_DEVICE);
        }
        tx_buffer->next_to_watch = NULL;
        tx_buffer->skb = NULL;
-       tx_buffer->dma = 0;
+       dma_unmap_len_set(tx_buffer, len, 0);
        /* buffer_info must be completely set up in the transmit path */
 }
 
@@ -4229,9 +4159,11 @@ static __le32 igb_tx_cmd_type(u32 tx_flags)
        if (tx_flags & IGB_TX_FLAGS_VLAN)
                cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
 
+#ifdef CONFIG_IGB_PTP
        /* set timestamp bit if present */
-       if (tx_flags & IGB_TX_FLAGS_TSTAMP)
+       if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP))
                cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
+#endif /* CONFIG_IGB_PTP */
 
        /* set segmentation bits for TSO */
        if (tx_flags & IGB_TX_FLAGS_TSO)
@@ -4275,7 +4207,7 @@ static void igb_tx_map(struct igb_ring *tx_ring,
                       const u8 hdr_len)
 {
        struct sk_buff *skb = first->skb;
-       struct igb_tx_buffer *tx_buffer_info;
+       struct igb_tx_buffer *tx_buffer;
        union e1000_adv_tx_desc *tx_desc;
        dma_addr_t dma;
        struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
@@ -4296,8 +4228,8 @@ static void igb_tx_map(struct igb_ring *tx_ring,
                goto dma_error;
 
        /* record length, and DMA address */
-       first->length = size;
-       first->dma = dma;
+       dma_unmap_len_set(first, len, size);
+       dma_unmap_addr_set(first, dma, dma);
        tx_desc->read.buffer_addr = cpu_to_le64(dma);
 
        for (;;) {
@@ -4339,9 +4271,9 @@ static void igb_tx_map(struct igb_ring *tx_ring,
                if (dma_mapping_error(tx_ring->dev, dma))
                        goto dma_error;
 
-               tx_buffer_info = &tx_ring->tx_buffer_info[i];
-               tx_buffer_info->length = size;
-               tx_buffer_info->dma = dma;
+               tx_buffer = &tx_ring->tx_buffer_info[i];
+               dma_unmap_len_set(tx_buffer, len, size);
+               dma_unmap_addr_set(tx_buffer, dma, dma);
 
                tx_desc->read.olinfo_status = 0;
                tx_desc->read.buffer_addr = cpu_to_le64(dma);
@@ -4392,9 +4324,9 @@ dma_error:
 
        /* clear dma mappings for failed tx_buffer_info map */
        for (;;) {
-               tx_buffer_info = &tx_ring->tx_buffer_info[i];
-               igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
-               if (tx_buffer_info == first)
+               tx_buffer = &tx_ring->tx_buffer_info[i];
+               igb_unmap_and_free_tx_resource(tx_ring, tx_buffer);
+               if (tx_buffer == first)
                        break;
                if (i == 0)
                        i = tx_ring->count;
@@ -4440,6 +4372,9 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
 netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
                                struct igb_ring *tx_ring)
 {
+#ifdef CONFIG_IGB_PTP
+       struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
+#endif /* CONFIG_IGB_PTP */
        struct igb_tx_buffer *first;
        int tso;
        u32 tx_flags = 0;
@@ -4462,10 +4397,17 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
        first->bytecount = skb->len;
        first->gso_segs = 1;
 
-       if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+#ifdef CONFIG_IGB_PTP
+       if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+                    !(adapter->ptp_tx_skb))) {
                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
                tx_flags |= IGB_TX_FLAGS_TSTAMP;
+
+               adapter->ptp_tx_skb = skb_get(skb);
+               if (adapter->hw.mac.type == e1000_82576)
+                       schedule_work(&adapter->ptp_tx_work);
        }
+#endif /* CONFIG_IGB_PTP */
 
        if (vlan_tx_tag_present(skb)) {
                tx_flags |= IGB_TX_FLAGS_VLAN;
@@ -4661,11 +4603,13 @@ void igb_update_stats(struct igb_adapter *adapter,
        bytes = 0;
        packets = 0;
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
+               u32 rqdpc = rd32(E1000_RQDPC(i));
                struct igb_ring *ring = adapter->rx_ring[i];
 
-               ring->rx_stats.drops += rqdpc_tmp;
-               net_stats->rx_fifo_errors += rqdpc_tmp;
+               if (rqdpc) {
+                       ring->rx_stats.drops += rqdpc;
+                       net_stats->rx_fifo_errors += rqdpc;
+               }
 
                do {
                        start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
@@ -4755,7 +4699,11 @@ void igb_update_stats(struct igb_adapter *adapter,
        reg = rd32(E1000_CTRL_EXT);
        if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
                adapter->stats.rxerrc += rd32(E1000_RXERRC);
-               adapter->stats.tncrs += rd32(E1000_TNCRS);
+
+               /* this stat has invalid values on i210/i211 */
+               if ((hw->mac.type != e1000_i210) &&
+                   (hw->mac.type != e1000_i211))
+                       adapter->stats.tncrs += rd32(E1000_TNCRS);
        }
 
        adapter->stats.tsctc += rd32(E1000_TSCTC);
@@ -4852,6 +4800,19 @@ static irqreturn_t igb_msix_other(int irq, void *data)
                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
+#ifdef CONFIG_IGB_PTP
+       if (icr & E1000_ICR_TS) {
+               u32 tsicr = rd32(E1000_TSICR);
+
+               if (tsicr & E1000_TSICR_TXTS) {
+                       /* acknowledge the interrupt */
+                       wr32(E1000_TSICR, E1000_TSICR_TXTS);
+                       /* retrieve hardware timestamp */
+                       schedule_work(&adapter->ptp_tx_work);
+               }
+       }
+#endif /* CONFIG_IGB_PTP */
+
        wr32(E1000_EIMS, adapter->eims_other);
 
        return IRQ_HANDLED;
@@ -5002,102 +4963,43 @@ static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
 static int igb_vf_configure(struct igb_adapter *adapter, int vf)
 {
        unsigned char mac_addr[ETH_ALEN];
-       struct pci_dev *pdev = adapter->pdev;
-       struct e1000_hw *hw = &adapter->hw;
-       struct pci_dev *pvfdev;
-       unsigned int device_id;
-       u16 thisvf_devfn;
 
        eth_random_addr(mac_addr);
        igb_set_vf_mac(adapter, vf, mac_addr);
 
-       switch (adapter->hw.mac.type) {
-       case e1000_82576:
-               device_id = IGB_82576_VF_DEV_ID;
-               /* VF Stride for 82576 is 2 */
-               thisvf_devfn = (pdev->devfn + 0x80 + (vf << 1)) |
-                       (pdev->devfn & 1);
-               break;
-       case e1000_i350:
-               device_id = IGB_I350_VF_DEV_ID;
-               /* VF Stride for I350 is 4 */
-               thisvf_devfn = (pdev->devfn + 0x80 + (vf << 2)) |
-                               (pdev->devfn & 3);
-               break;
-       default:
-               device_id = 0;
-               thisvf_devfn = 0;
-               break;
-       }
-
-       pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
-       while (pvfdev) {
-               if (pvfdev->devfn == thisvf_devfn)
-                       break;
-               pvfdev = pci_get_device(hw->vendor_id,
-                                       device_id, pvfdev);
-       }
-
-       if (pvfdev)
-               adapter->vf_data[vf].vfdev = pvfdev;
-       else
-               dev_err(&pdev->dev,
-                       "Couldn't find pci dev ptr for VF %4.4x\n",
-                       thisvf_devfn);
-       return pvfdev != NULL;
+       return 0;
 }
 
-static int igb_find_enabled_vfs(struct igb_adapter *adapter)
+static bool igb_vfs_are_assigned(struct igb_adapter *adapter)
 {
-       struct e1000_hw *hw = &adapter->hw;
        struct pci_dev *pdev = adapter->pdev;
-       struct pci_dev *pvfdev;
-       u16 vf_devfn = 0;
-       u16 vf_stride;
-       unsigned int device_id;
-       int vfs_found = 0;
+       struct pci_dev *vfdev;
+       int dev_id;
 
        switch (adapter->hw.mac.type) {
        case e1000_82576:
-               device_id = IGB_82576_VF_DEV_ID;
-               /* VF Stride for 82576 is 2 */
-               vf_stride = 2;
+               dev_id = IGB_82576_VF_DEV_ID;
                break;
        case e1000_i350:
-               device_id = IGB_I350_VF_DEV_ID;
-               /* VF Stride for I350 is 4 */
-               vf_stride = 4;
+               dev_id = IGB_I350_VF_DEV_ID;
                break;
        default:
-               device_id = 0;
-               vf_stride = 0;
-               break;
-       }
-
-       vf_devfn = pdev->devfn + 0x80;
-       pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
-       while (pvfdev) {
-               if (pvfdev->devfn == vf_devfn &&
-                   (pvfdev->bus->number >= pdev->bus->number))
-                       vfs_found++;
-               vf_devfn += vf_stride;
-               pvfdev = pci_get_device(hw->vendor_id,
-                                       device_id, pvfdev);
+               return false;
        }
 
-       return vfs_found;
-}
-
-static int igb_check_vf_assignment(struct igb_adapter *adapter)
-{
-       int i;
-       for (i = 0; i < adapter->vfs_allocated_count; i++) {
-               if (adapter->vf_data[i].vfdev) {
-                       if (adapter->vf_data[i].vfdev->dev_flags &
-                           PCI_DEV_FLAGS_ASSIGNED)
+       /* loop through all the VFs to see if we own any that are assigned */
+       vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL);
+       while (vfdev) {
+               /* if we don't own it we don't care */
+               if (vfdev->is_virtfn && vfdev->physfn == pdev) {
+                       /* if it is assigned we cannot release it */
+                       if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
                                return true;
                }
+
+               vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev);
        }
+
        return false;
 }
 
@@ -5643,6 +5545,19 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
+#ifdef CONFIG_IGB_PTP
+       if (icr & E1000_ICR_TS) {
+               u32 tsicr = rd32(E1000_TSICR);
+
+               if (tsicr & E1000_TSICR_TXTS) {
+                       /* acknowledge the interrupt */
+                       wr32(E1000_TSICR, E1000_TSICR_TXTS);
+                       /* retrieve hardware timestamp */
+                       schedule_work(&adapter->ptp_tx_work);
+               }
+       }
+#endif /* CONFIG_IGB_PTP */
+
        napi_schedule(&q_vector->napi);
 
        return IRQ_HANDLED;
@@ -5684,6 +5599,19 @@ static irqreturn_t igb_intr(int irq, void *data)
                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
+#ifdef CONFIG_IGB_PTP
+       if (icr & E1000_ICR_TS) {
+               u32 tsicr = rd32(E1000_TSICR);
+
+               if (tsicr & E1000_TSICR_TXTS) {
+                       /* acknowledge the interrupt */
+                       wr32(E1000_TSICR, E1000_TSICR_TXTS);
+                       /* retrieve hardware timestamp */
+                       schedule_work(&adapter->ptp_tx_work);
+               }
+       }
+#endif /* CONFIG_IGB_PTP */
+
        napi_schedule(&q_vector->napi);
 
        return IRQ_HANDLED;
@@ -5743,37 +5671,6 @@ static int igb_poll(struct napi_struct *napi, int budget)
        return 0;
 }
 
-#ifdef CONFIG_IGB_PTP
-/**
- * igb_tx_hwtstamp - utility function which checks for TX time stamp
- * @q_vector: pointer to q_vector containing needed info
- * @buffer: pointer to igb_tx_buffer structure
- *
- * If we were asked to do hardware stamping and such a time stamp is
- * available, then it must have been for this skb here because we only
- * allow only one such packet into the queue.
- */
-static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
-                           struct igb_tx_buffer *buffer_info)
-{
-       struct igb_adapter *adapter = q_vector->adapter;
-       struct e1000_hw *hw = &adapter->hw;
-       struct skb_shared_hwtstamps shhwtstamps;
-       u64 regval;
-
-       /* if skb does not support hw timestamp or TX stamp not valid exit */
-       if (likely(!(buffer_info->tx_flags & IGB_TX_FLAGS_TSTAMP)) ||
-           !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
-               return;
-
-       regval = rd32(E1000_TXSTMPL);
-       regval |= (u64)rd32(E1000_TXSTMPH) << 32;
-
-       igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
-       skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
-}
-
-#endif
 /**
  * igb_clean_tx_irq - Reclaim resources after transmit completes
  * @q_vector: pointer to q_vector containing needed info
@@ -5785,7 +5682,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
        struct igb_adapter *adapter = q_vector->adapter;
        struct igb_ring *tx_ring = q_vector->tx.ring;
        struct igb_tx_buffer *tx_buffer;
-       union e1000_adv_tx_desc *tx_desc, *eop_desc;
+       union e1000_adv_tx_desc *tx_desc;
        unsigned int total_bytes = 0, total_packets = 0;
        unsigned int budget = q_vector->tx.work_limit;
        unsigned int i = tx_ring->next_to_clean;
@@ -5797,16 +5694,16 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
        tx_desc = IGB_TX_DESC(tx_ring, i);
        i -= tx_ring->count;
 
-       for (; budget; budget--) {
-               eop_desc = tx_buffer->next_to_watch;
-
-               /* prevent any other reads prior to eop_desc */
-               rmb();
+       do {
+               union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
 
                /* if next_to_watch is not set then there is no work pending */
                if (!eop_desc)
                        break;
 
+               /* prevent any other reads prior to eop_desc */
+               rmb();
+
                /* if DD is not set pending work has not been completed */
                if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
                        break;
@@ -5818,25 +5715,21 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
                total_bytes += tx_buffer->bytecount;
                total_packets += tx_buffer->gso_segs;
 
-#ifdef CONFIG_IGB_PTP
-               /* retrieve hardware timestamp */
-               igb_tx_hwtstamp(q_vector, tx_buffer);
-
-#endif
                /* free the skb */
                dev_kfree_skb_any(tx_buffer->skb);
-               tx_buffer->skb = NULL;
 
                /* unmap skb header data */
                dma_unmap_single(tx_ring->dev,
-                                tx_buffer->dma,
-                                tx_buffer->length,
+                                dma_unmap_addr(tx_buffer, dma),
+                                dma_unmap_len(tx_buffer, len),
                                 DMA_TO_DEVICE);
 
+               /* clear tx_buffer data */
+               tx_buffer->skb = NULL;
+               dma_unmap_len_set(tx_buffer, len, 0);
+
                /* clear last DMA location and unmap remaining buffers */
                while (tx_desc != eop_desc) {
-                       tx_buffer->dma = 0;
-
                        tx_buffer++;
                        tx_desc++;
                        i++;
@@ -5847,17 +5740,15 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
                        }
 
                        /* unmap any remaining paged data */
-                       if (tx_buffer->dma) {
+                       if (dma_unmap_len(tx_buffer, len)) {
                                dma_unmap_page(tx_ring->dev,
-                                              tx_buffer->dma,
-                                              tx_buffer->length,
+                                              dma_unmap_addr(tx_buffer, dma),
+                                              dma_unmap_len(tx_buffer, len),
                                               DMA_TO_DEVICE);
+                               dma_unmap_len_set(tx_buffer, len, 0);
                        }
                }
 
-               /* clear last DMA location */
-               tx_buffer->dma = 0;
-
                /* move us one more past the eop_desc for start of next pkt */
                tx_buffer++;
                tx_desc++;
@@ -5867,7 +5758,13 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
                        tx_buffer = tx_ring->tx_buffer_info;
                        tx_desc = IGB_TX_DESC(tx_ring, 0);
                }
-       }
+
+               /* issue prefetch for next Tx descriptor */
+               prefetch(tx_desc);
+
+               /* update budget accounting */
+               budget--;
+       } while (likely(budget));
 
        netdev_tx_completed_queue(txring_txq(tx_ring),
                                  total_packets, total_bytes);
@@ -5883,12 +5780,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
        if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
                struct e1000_hw *hw = &adapter->hw;
 
-               eop_desc = tx_buffer->next_to_watch;
-
                /* Detect a transmit hang in hardware, this serializes the
                 * check with the clearing of time_stamp and movement of i */
                clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
-               if (eop_desc &&
+               if (tx_buffer->next_to_watch &&
                    time_after(jiffies, tx_buffer->time_stamp +
                               (adapter->tx_timeout_factor * HZ)) &&
                    !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
@@ -5912,9 +5807,9 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
                                tx_ring->next_to_use,
                                tx_ring->next_to_clean,
                                tx_buffer->time_stamp,
-                               eop_desc,
+                               tx_buffer->next_to_watch,
                                jiffies,
-                               eop_desc->wb.status);
+                               tx_buffer->next_to_watch->wb.status);
                        netif_stop_subqueue(tx_ring->netdev,
                                            tx_ring->queue_index);
 
@@ -5994,47 +5889,6 @@ static inline void igb_rx_hash(struct igb_ring *ring,
                skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
 }
 
-#ifdef CONFIG_IGB_PTP
-static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
-                           union e1000_adv_rx_desc *rx_desc,
-                           struct sk_buff *skb)
-{
-       struct igb_adapter *adapter = q_vector->adapter;
-       struct e1000_hw *hw = &adapter->hw;
-       u64 regval;
-
-       if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
-                                      E1000_RXDADV_STAT_TS))
-               return;
-
-       /*
-        * If this bit is set, then the RX registers contain the time stamp. No
-        * other packet will be time stamped until we read these registers, so
-        * read the registers to make them available again. Because only one
-        * packet can be time stamped at a time, we know that the register
-        * values must belong to this one here and therefore we don't need to
-        * compare any of the additional attributes stored for it.
-        *
-        * If nothing went wrong, then it should have a shared tx_flags that we
-        * can turn into a skb_shared_hwtstamps.
-        */
-       if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
-               u32 *stamp = (u32 *)skb->data;
-               regval = le32_to_cpu(*(stamp + 2));
-               regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
-               skb_pull(skb, IGB_TS_HDR_LEN);
-       } else {
-               if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
-                       return;
-
-               regval = rd32(E1000_RXSTMPL);
-               regval |= (u64)rd32(E1000_RXSTMPH) << 32;
-       }
-
-       igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
-}
-
-#endif
 static void igb_rx_vlan(struct igb_ring *ring,
                        union e1000_adv_rx_desc *rx_desc,
                        struct sk_buff *skb)
@@ -6146,8 +6000,8 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
                }
 
 #ifdef CONFIG_IGB_PTP
-               igb_rx_hwtstamp(q_vector, rx_desc, skb);
-#endif
+               igb_ptp_rx_hwtstamp(q_vector, rx_desc, skb);
+#endif /* CONFIG_IGB_PTP */
                igb_rx_hash(rx_ring, rx_desc, skb);
                igb_rx_checksum(rx_ring, rx_desc, skb);
                igb_rx_vlan(rx_ring, rx_desc, skb);
@@ -6340,181 +6194,6 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
        return 0;
 }
 
-/**
- * igb_hwtstamp_ioctl - control hardware time stamping
- * @netdev:
- * @ifreq:
- * @cmd:
- *
- * Outgoing time stamping can be enabled and disabled. Play nice and
- * disable it when requested, although it shouldn't case any overhead
- * when no packet needs it. At most one packet in the queue may be
- * marked for time stamping, otherwise it would be impossible to tell
- * for sure to which packet the hardware time stamp belongs.
- *
- * Incoming time stamping has to be configured via the hardware
- * filters. Not all combinations are supported, in particular event
- * type has to be specified. Matching the kind of event packet is
- * not supported, with the exception of "all V2 events regardless of
- * level 2 or 4".
- *
- **/
-static int igb_hwtstamp_ioctl(struct net_device *netdev,
-                             struct ifreq *ifr, int cmd)
-{
-       struct igb_adapter *adapter = netdev_priv(netdev);
-       struct e1000_hw *hw = &adapter->hw;
-       struct hwtstamp_config config;
-       u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
-       u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
-       u32 tsync_rx_cfg = 0;
-       bool is_l4 = false;
-       bool is_l2 = false;
-       u32 regval;
-
-       if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
-               return -EFAULT;
-
-       /* reserved for future extensions */
-       if (config.flags)
-               return -EINVAL;
-
-       switch (config.tx_type) {
-       case HWTSTAMP_TX_OFF:
-               tsync_tx_ctl = 0;
-       case HWTSTAMP_TX_ON:
-               break;
-       default:
-               return -ERANGE;
-       }
-
-       switch (config.rx_filter) {
-       case HWTSTAMP_FILTER_NONE:
-               tsync_rx_ctl = 0;
-               break;
-       case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
-       case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
-       case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
-       case HWTSTAMP_FILTER_ALL:
-               /*
-                * register TSYNCRXCFG must be set, therefore it is not
-                * possible to time stamp both Sync and Delay_Req messages
-                * => fall back to time stamping all packets
-                */
-               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
-               config.rx_filter = HWTSTAMP_FILTER_ALL;
-               break;
-       case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
-               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
-               tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
-               is_l4 = true;
-               break;
-       case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
-               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
-               tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
-               is_l4 = true;
-               break;
-       case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
-       case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
-               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
-               tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
-               is_l2 = true;
-               is_l4 = true;
-               config.rx_filter = HWTSTAMP_FILTER_SOME;
-               break;
-       case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
-       case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
-               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
-               tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
-               is_l2 = true;
-               is_l4 = true;
-               config.rx_filter = HWTSTAMP_FILTER_SOME;
-               break;
-       case HWTSTAMP_FILTER_PTP_V2_EVENT:
-       case HWTSTAMP_FILTER_PTP_V2_SYNC:
-       case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
-               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
-               config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
-               is_l2 = true;
-               is_l4 = true;
-               break;
-       default:
-               return -ERANGE;
-       }
-
-       if (hw->mac.type == e1000_82575) {
-               if (tsync_rx_ctl | tsync_tx_ctl)
-                       return -EINVAL;
-               return 0;
-       }
-
-       /*
-        * Per-packet timestamping only works if all packets are
-        * timestamped, so enable timestamping in all packets as
-        * long as one rx filter was configured.
-        */
-       if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
-               tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
-               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
-       }
-
-       /* enable/disable TX */
-       regval = rd32(E1000_TSYNCTXCTL);
-       regval &= ~E1000_TSYNCTXCTL_ENABLED;
-       regval |= tsync_tx_ctl;
-       wr32(E1000_TSYNCTXCTL, regval);
-
-       /* enable/disable RX */
-       regval = rd32(E1000_TSYNCRXCTL);
-       regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
-       regval |= tsync_rx_ctl;
-       wr32(E1000_TSYNCRXCTL, regval);
-
-       /* define which PTP packets are time stamped */
-       wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
-
-       /* define ethertype filter for timestamped packets */
-       if (is_l2)
-               wr32(E1000_ETQF(3),
-                               (E1000_ETQF_FILTER_ENABLE | /* enable filter */
-                                E1000_ETQF_1588 | /* enable timestamping */
-                                ETH_P_1588));     /* 1588 eth protocol type */
-       else
-               wr32(E1000_ETQF(3), 0);
-
-#define PTP_PORT 319
-       /* L4 Queue Filter[3]: filter by destination port and protocol */
-       if (is_l4) {
-               u32 ftqf = (IPPROTO_UDP /* UDP */
-                       | E1000_FTQF_VF_BP /* VF not compared */
-                       | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
-                       | E1000_FTQF_MASK); /* mask all inputs */
-               ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
-
-               wr32(E1000_IMIR(3), htons(PTP_PORT));
-               wr32(E1000_IMIREXT(3),
-                    (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
-               if (hw->mac.type == e1000_82576) {
-                       /* enable source port check */
-                       wr32(E1000_SPQF(3), htons(PTP_PORT));
-                       ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
-               }
-               wr32(E1000_FTQF(3), ftqf);
-       } else {
-               wr32(E1000_FTQF(3), E1000_FTQF_MASK);
-       }
-       wrfl();
-
-       adapter->hwtstamp_config = config;
-
-       /* clear TX/RX time stamp registers, just to be sure */
-       regval = rd32(E1000_TXSTMPH);
-       regval = rd32(E1000_RXSTMPH);
-
-       return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-               -EFAULT : 0;
-}
-
 /**
  * igb_ioctl -
  * @netdev:
@@ -6528,8 +6207,10 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
        case SIOCGMIIREG:
        case SIOCSMIIREG:
                return igb_mii_ioctl(netdev, ifr, cmd);
+#ifdef CONFIG_IGB_PTP
        case SIOCSHWTSTAMP:
-               return igb_hwtstamp_ioctl(netdev, ifr, cmd);
+               return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
+#endif /* CONFIG_IGB_PTP */
        default:
                return -EOPNOTSUPP;
        }
@@ -6667,6 +6348,10 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
        default:
                goto err_inval;
        }
+
+       /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
+       adapter->hw.phy.mdix = AUTO_ALL_MODES;
+
        return 0;
 
 err_inval:
index c846ea9131a3ab514a410165de0e15e8ff1697b4..ee21445157a3e8907a7b01267806d10b3ffab6e2 100644 (file)
  *   2^40 * 10^-9 /  60  = 18.3 minutes.
  */
 
-#define IGB_OVERFLOW_PERIOD    (HZ * 60 * 9)
-#define INCPERIOD_82576                (1 << E1000_TIMINCA_16NS_SHIFT)
-#define INCVALUE_82576_MASK    ((1 << E1000_TIMINCA_16NS_SHIFT) - 1)
-#define INCVALUE_82576         (16 << IGB_82576_TSYNC_SHIFT)
-#define IGB_NBITS_82580                40
+#define IGB_SYSTIM_OVERFLOW_PERIOD     (HZ * 60 * 9)
+#define INCPERIOD_82576                        (1 << E1000_TIMINCA_16NS_SHIFT)
+#define INCVALUE_82576_MASK            ((1 << E1000_TIMINCA_16NS_SHIFT) - 1)
+#define INCVALUE_82576                 (16 << IGB_82576_TSYNC_SHIFT)
+#define IGB_NBITS_82580                        40
 
 /*
  * SYSTIM read access for the 82576
  */
 
-static cycle_t igb_82576_systim_read(const struct cyclecounter *cc)
+static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc)
 {
-       u64 val;
-       u32 lo, hi;
        struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
        struct e1000_hw *hw = &igb->hw;
+       u64 val;
+       u32 lo, hi;
 
        lo = rd32(E1000_SYSTIML);
        hi = rd32(E1000_SYSTIMH);
@@ -99,12 +99,12 @@ static cycle_t igb_82576_systim_read(const struct cyclecounter *cc)
  * SYSTIM read access for the 82580
  */
 
-static cycle_t igb_82580_systim_read(const struct cyclecounter *cc)
+static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
 {
-       u64 val;
-       u32 lo, hi, jk;
        struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
        struct e1000_hw *hw = &igb->hw;
+       u64 val;
+       u32 lo, hi, jk;
 
        /*
         * The timestamp latches on lowest register read. For the 82580
@@ -121,17 +121,102 @@ static cycle_t igb_82580_systim_read(const struct cyclecounter *cc)
        return val;
 }
 
+/*
+ * SYSTIM read access for I210/I211
+ */
+
+static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       u32 sec, nsec, jk;
+
+       /*
+        * The timestamp latches on lowest register read. For I210/I211, the
+        * lowest register is SYSTIMR. Since we only need to provide nanosecond
+        * resolution, we can ignore it.
+        */
+       jk = rd32(E1000_SYSTIMR);
+       nsec = rd32(E1000_SYSTIML);
+       sec = rd32(E1000_SYSTIMH);
+
+       ts->tv_sec = sec;
+       ts->tv_nsec = nsec;
+}
+
+static void igb_ptp_write_i210(struct igb_adapter *adapter,
+                              const struct timespec *ts)
+{
+       struct e1000_hw *hw = &adapter->hw;
+
+       /*
+        * Writing the SYSTIMR register is not necessary as it only provides
+        * sub-nanosecond resolution.
+        */
+       wr32(E1000_SYSTIML, ts->tv_nsec);
+       wr32(E1000_SYSTIMH, ts->tv_sec);
+}
+
+/**
+ * igb_ptp_systim_to_hwtstamp - convert system time value to hw timestamp
+ * @adapter: board private structure
+ * @hwtstamps: timestamp structure to update
+ * @systim: unsigned 64bit system time value.
+ *
+ * We need to convert the system time value stored in the RX/TXSTMP registers
+ * into a hwtstamp which can be used by the upper level timestamping functions.
+ *
+ * The 'tmreg_lock' spinlock is used to protect the consistency of the
+ * system time value. This is needed because reading the 64 bit time
+ * value involves reading two (or three) 32 bit registers. The first
+ * read latches the value. Ditto for writing.
+ *
+ * In addition, here have extended the system time with an overflow
+ * counter in software.
+ **/
+static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter,
+                                      struct skb_shared_hwtstamps *hwtstamps,
+                                      u64 systim)
+{
+       unsigned long flags;
+       u64 ns;
+
+       switch (adapter->hw.mac.type) {
+       case e1000_82576:
+       case e1000_82580:
+       case e1000_i350:
+               spin_lock_irqsave(&adapter->tmreg_lock, flags);
+
+               ns = timecounter_cyc2time(&adapter->tc, systim);
+
+               spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+               memset(hwtstamps, 0, sizeof(*hwtstamps));
+               hwtstamps->hwtstamp = ns_to_ktime(ns);
+               break;
+       case e1000_i210:
+       case e1000_i211:
+               memset(hwtstamps, 0, sizeof(*hwtstamps));
+               /* Upper 32 bits contain s, lower 32 bits contain ns. */
+               hwtstamps->hwtstamp = ktime_set(systim >> 32,
+                                               systim & 0xFFFFFFFF);
+               break;
+       default:
+               break;
+       }
+}
+
 /*
  * PTP clock operations
  */
 
-static int ptp_82576_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb)
 {
+       struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+                                              ptp_caps);
+       struct e1000_hw *hw = &igb->hw;
+       int neg_adj = 0;
        u64 rate;
        u32 incvalue;
-       int neg_adj = 0;
-       struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
-       struct e1000_hw *hw = &igb->hw;
 
        if (ppb < 0) {
                neg_adj = 1;
@@ -153,13 +238,14 @@ static int ptp_82576_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
        return 0;
 }
 
-static int ptp_82580_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int igb_ptp_adjfreq_82580(struct ptp_clock_info *ptp, s32 ppb)
 {
+       struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+                                              ptp_caps);
+       struct e1000_hw *hw = &igb->hw;
+       int neg_adj = 0;
        u64 rate;
        u32 inca;
-       int neg_adj = 0;
-       struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
-       struct e1000_hw *hw = &igb->hw;
 
        if (ppb < 0) {
                neg_adj = 1;
@@ -178,11 +264,12 @@ static int ptp_82580_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
        return 0;
 }
 
-static int igb_adjtime(struct ptp_clock_info *ptp, s64 delta)
+static int igb_ptp_adjtime_82576(struct ptp_clock_info *ptp, s64 delta)
 {
-       s64 now;
+       struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+                                              ptp_caps);
        unsigned long flags;
-       struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
+       s64 now;
 
        spin_lock_irqsave(&igb->tmreg_lock, flags);
 
@@ -195,12 +282,32 @@ static int igb_adjtime(struct ptp_clock_info *ptp, s64 delta)
        return 0;
 }
 
-static int igb_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta)
 {
+       struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+                                              ptp_caps);
+       unsigned long flags;
+       struct timespec now, then = ns_to_timespec(delta);
+
+       spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+       igb_ptp_read_i210(igb, &now);
+       now = timespec_add(now, then);
+       igb_ptp_write_i210(igb, (const struct timespec *)&now);
+
+       spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+       return 0;
+}
+
+static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp,
+                                struct timespec *ts)
+{
+       struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+                                              ptp_caps);
+       unsigned long flags;
        u64 ns;
        u32 remainder;
-       unsigned long flags;
-       struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
 
        spin_lock_irqsave(&igb->tmreg_lock, flags);
 
@@ -214,11 +321,29 @@ static int igb_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
        return 0;
 }
 
-static int igb_settime(struct ptp_clock_info *ptp, const struct timespec *ts)
+static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp,
+                               struct timespec *ts)
 {
-       u64 ns;
+       struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+                                              ptp_caps);
        unsigned long flags;
-       struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
+
+       spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+       igb_ptp_read_i210(igb, ts);
+
+       spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+       return 0;
+}
+
+static int igb_ptp_settime_82576(struct ptp_clock_info *ptp,
+                                const struct timespec *ts)
+{
+       struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+                                              ptp_caps);
+       unsigned long flags;
+       u64 ns;
 
        ns = ts->tv_sec * 1000000000ULL;
        ns += ts->tv_nsec;
@@ -232,77 +357,369 @@ static int igb_settime(struct ptp_clock_info *ptp, const struct timespec *ts)
        return 0;
 }
 
-static int ptp_82576_enable(struct ptp_clock_info *ptp,
-                           struct ptp_clock_request *rq, int on)
+static int igb_ptp_settime_i210(struct ptp_clock_info *ptp,
+                               const struct timespec *ts)
 {
-       return -EOPNOTSUPP;
+       struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+                                              ptp_caps);
+       unsigned long flags;
+
+       spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+       igb_ptp_write_i210(igb, ts);
+
+       spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+       return 0;
 }
 
-static int ptp_82580_enable(struct ptp_clock_info *ptp,
-                           struct ptp_clock_request *rq, int on)
+static int igb_ptp_enable(struct ptp_clock_info *ptp,
+                         struct ptp_clock_request *rq, int on)
 {
        return -EOPNOTSUPP;
 }
 
-static void igb_overflow_check(struct work_struct *work)
+/**
+ * igb_ptp_tx_work
+ * @work: pointer to work struct
+ *
+ * This work function polls the TSYNCTXCTL valid bit to determine when a
+ * timestamp has been taken for the current stored skb.
+ */
+void igb_ptp_tx_work(struct work_struct *work)
+{
+       struct igb_adapter *adapter = container_of(work, struct igb_adapter,
+                                                  ptp_tx_work);
+       struct e1000_hw *hw = &adapter->hw;
+       u32 tsynctxctl;
+
+       if (!adapter->ptp_tx_skb)
+               return;
+
+       tsynctxctl = rd32(E1000_TSYNCTXCTL);
+       if (tsynctxctl & E1000_TSYNCTXCTL_VALID)
+               igb_ptp_tx_hwtstamp(adapter);
+       else
+               /* reschedule to check later */
+               schedule_work(&adapter->ptp_tx_work);
+}
+
+static void igb_ptp_overflow_check(struct work_struct *work)
 {
-       struct timespec ts;
        struct igb_adapter *igb =
-               container_of(work, struct igb_adapter, overflow_work.work);
+               container_of(work, struct igb_adapter, ptp_overflow_work.work);
+       struct timespec ts;
 
-       igb_gettime(&igb->caps, &ts);
+       igb->ptp_caps.gettime(&igb->ptp_caps, &ts);
 
        pr_debug("igb overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
 
-       schedule_delayed_work(&igb->overflow_work, IGB_OVERFLOW_PERIOD);
+       schedule_delayed_work(&igb->ptp_overflow_work,
+                             IGB_SYSTIM_OVERFLOW_PERIOD);
+}
+
+/**
+ * igb_ptp_tx_hwtstamp - utility function which checks for TX time stamp
+ * @adapter: Board private structure.
+ *
+ * If we were asked to do hardware stamping and such a time stamp is
+ * available, then it must have been for this skb here because we only
+ * allow only one such packet into the queue.
+ */
+void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       struct skb_shared_hwtstamps shhwtstamps;
+       u64 regval;
+
+       regval = rd32(E1000_TXSTMPL);
+       regval |= (u64)rd32(E1000_TXSTMPH) << 32;
+
+       igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
+       skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
+       dev_kfree_skb_any(adapter->ptp_tx_skb);
+       adapter->ptp_tx_skb = NULL;
+}
+
+void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
+                        union e1000_adv_rx_desc *rx_desc,
+                        struct sk_buff *skb)
+{
+       struct igb_adapter *adapter = q_vector->adapter;
+       struct e1000_hw *hw = &adapter->hw;
+       u64 regval;
+
+       if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
+                                      E1000_RXDADV_STAT_TS))
+               return;
+
+       /*
+        * If this bit is set, then the RX registers contain the time stamp. No
+        * other packet will be time stamped until we read these registers, so
+        * read the registers to make them available again. Because only one
+        * packet can be time stamped at a time, we know that the register
+        * values must belong to this one here and therefore we don't need to
+        * compare any of the additional attributes stored for it.
+        *
+        * If nothing went wrong, then it should have a shared tx_flags that we
+        * can turn into a skb_shared_hwtstamps.
+        */
+       if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+               u32 *stamp = (u32 *)skb->data;
+               regval = le32_to_cpu(*(stamp + 2));
+               regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
+               skb_pull(skb, IGB_TS_HDR_LEN);
+       } else {
+               if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
+                       return;
+
+               regval = rd32(E1000_RXSTMPL);
+               regval |= (u64)rd32(E1000_RXSTMPH) << 32;
+       }
+
+       igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
+}
+
+/**
+ * igb_ptp_hwtstamp_ioctl - control hardware time stamping
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ *
+ * Outgoing time stamping can be enabled and disabled. Play nice and
+ * disable it when requested, although it shouldn't case any overhead
+ * when no packet needs it. At most one packet in the queue may be
+ * marked for time stamping, otherwise it would be impossible to tell
+ * for sure to which packet the hardware time stamp belongs.
+ *
+ * Incoming time stamping has to be configured via the hardware
+ * filters. Not all combinations are supported, in particular event
+ * type has to be specified. Matching the kind of event packet is
+ * not supported, with the exception of "all V2 events regardless of
+ * level 2 or 4".
+ *
+ **/
+int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
+                          struct ifreq *ifr, int cmd)
+{
+       struct igb_adapter *adapter = netdev_priv(netdev);
+       struct e1000_hw *hw = &adapter->hw;
+       struct hwtstamp_config config;
+       u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
+       u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
+       u32 tsync_rx_cfg = 0;
+       bool is_l4 = false;
+       bool is_l2 = false;
+       u32 regval;
+
+       if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+               return -EFAULT;
+
+       /* reserved for future extensions */
+       if (config.flags)
+               return -EINVAL;
+
+       switch (config.tx_type) {
+       case HWTSTAMP_TX_OFF:
+               tsync_tx_ctl = 0;
+       case HWTSTAMP_TX_ON:
+               break;
+       default:
+               return -ERANGE;
+       }
+
+       switch (config.rx_filter) {
+       case HWTSTAMP_FILTER_NONE:
+               tsync_rx_ctl = 0;
+               break;
+       case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+       case HWTSTAMP_FILTER_ALL:
+               /*
+                * register TSYNCRXCFG must be set, therefore it is not
+                * possible to time stamp both Sync and Delay_Req messages
+                * => fall back to time stamping all packets
+                */
+               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+               config.rx_filter = HWTSTAMP_FILTER_ALL;
+               break;
+       case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
+               tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
+               is_l4 = true;
+               break;
+       case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
+               tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
+               is_l4 = true;
+               break;
+       case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
+               tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
+               is_l2 = true;
+               is_l4 = true;
+               config.rx_filter = HWTSTAMP_FILTER_SOME;
+               break;
+       case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
+               tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
+               is_l2 = true;
+               is_l4 = true;
+               config.rx_filter = HWTSTAMP_FILTER_SOME;
+               break;
+       case HWTSTAMP_FILTER_PTP_V2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
+               config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+               is_l2 = true;
+               is_l4 = true;
+               break;
+       default:
+               return -ERANGE;
+       }
+
+       if (hw->mac.type == e1000_82575) {
+               if (tsync_rx_ctl | tsync_tx_ctl)
+                       return -EINVAL;
+               return 0;
+       }
+
+       /*
+        * Per-packet timestamping only works if all packets are
+        * timestamped, so enable timestamping in all packets as
+        * long as one rx filter was configured.
+        */
+       if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
+               tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
+               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+
+               if ((hw->mac.type == e1000_i210) ||
+                   (hw->mac.type == e1000_i211)) {
+                       regval = rd32(E1000_RXPBS);
+                       regval |= E1000_RXPBS_CFG_TS_EN;
+                       wr32(E1000_RXPBS, regval);
+               }
+       }
+
+       /* enable/disable TX */
+       regval = rd32(E1000_TSYNCTXCTL);
+       regval &= ~E1000_TSYNCTXCTL_ENABLED;
+       regval |= tsync_tx_ctl;
+       wr32(E1000_TSYNCTXCTL, regval);
+
+       /* enable/disable RX */
+       regval = rd32(E1000_TSYNCRXCTL);
+       regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
+       regval |= tsync_rx_ctl;
+       wr32(E1000_TSYNCRXCTL, regval);
+
+       /* define which PTP packets are time stamped */
+       wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
+
+       /* define ethertype filter for timestamped packets */
+       if (is_l2)
+               wr32(E1000_ETQF(3),
+                    (E1000_ETQF_FILTER_ENABLE | /* enable filter */
+                     E1000_ETQF_1588 | /* enable timestamping */
+                     ETH_P_1588));     /* 1588 eth protocol type */
+       else
+               wr32(E1000_ETQF(3), 0);
+
+#define PTP_PORT 319
+       /* L4 Queue Filter[3]: filter by destination port and protocol */
+       if (is_l4) {
+               u32 ftqf = (IPPROTO_UDP /* UDP */
+                       | E1000_FTQF_VF_BP /* VF not compared */
+                       | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
+                       | E1000_FTQF_MASK); /* mask all inputs */
+               ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
+
+               wr32(E1000_IMIR(3), htons(PTP_PORT));
+               wr32(E1000_IMIREXT(3),
+                    (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
+               if (hw->mac.type == e1000_82576) {
+                       /* enable source port check */
+                       wr32(E1000_SPQF(3), htons(PTP_PORT));
+                       ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
+               }
+               wr32(E1000_FTQF(3), ftqf);
+       } else {
+               wr32(E1000_FTQF(3), E1000_FTQF_MASK);
+       }
+       wrfl();
+
+       /* clear TX/RX time stamp registers, just to be sure */
+       regval = rd32(E1000_TXSTMPL);
+       regval = rd32(E1000_TXSTMPH);
+       regval = rd32(E1000_RXSTMPL);
+       regval = rd32(E1000_RXSTMPH);
+
+       return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+               -EFAULT : 0;
 }
 
 void igb_ptp_init(struct igb_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
+       struct net_device *netdev = adapter->netdev;
 
        switch (hw->mac.type) {
-       case e1000_i210:
-       case e1000_i211:
-       case e1000_i350:
+       case e1000_82576:
+               snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
+               adapter->ptp_caps.owner = THIS_MODULE;
+               adapter->ptp_caps.max_adj = 1000000000;
+               adapter->ptp_caps.n_ext_ts = 0;
+               adapter->ptp_caps.pps = 0;
+               adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
+               adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
+               adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
+               adapter->ptp_caps.settime = igb_ptp_settime_82576;
+               adapter->ptp_caps.enable = igb_ptp_enable;
+               adapter->cc.read = igb_ptp_read_82576;
+               adapter->cc.mask = CLOCKSOURCE_MASK(64);
+               adapter->cc.mult = 1;
+               adapter->cc.shift = IGB_82576_TSYNC_SHIFT;
+               /* Dial the nominal frequency. */
+               wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
+               break;
        case e1000_82580:
-               adapter->caps.owner     = THIS_MODULE;
-               strcpy(adapter->caps.name, "igb-82580");
-               adapter->caps.max_adj   = 62499999;
-               adapter->caps.n_ext_ts  = 0;
-               adapter->caps.pps       = 0;
-               adapter->caps.adjfreq   = ptp_82580_adjfreq;
-               adapter->caps.adjtime   = igb_adjtime;
-               adapter->caps.gettime   = igb_gettime;
-               adapter->caps.settime   = igb_settime;
-               adapter->caps.enable    = ptp_82580_enable;
-               adapter->cc.read        = igb_82580_systim_read;
-               adapter->cc.mask        = CLOCKSOURCE_MASK(IGB_NBITS_82580);
-               adapter->cc.mult        = 1;
-               adapter->cc.shift       = 0;
+       case e1000_i350:
+               snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
+               adapter->ptp_caps.owner = THIS_MODULE;
+               adapter->ptp_caps.max_adj = 62499999;
+               adapter->ptp_caps.n_ext_ts = 0;
+               adapter->ptp_caps.pps = 0;
+               adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
+               adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
+               adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
+               adapter->ptp_caps.settime = igb_ptp_settime_82576;
+               adapter->ptp_caps.enable = igb_ptp_enable;
+               adapter->cc.read = igb_ptp_read_82580;
+               adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580);
+               adapter->cc.mult = 1;
+               adapter->cc.shift = 0;
                /* Enable the timer functions by clearing bit 31. */
                wr32(E1000_TSAUXC, 0x0);
                break;
-
-       case e1000_82576:
-               adapter->caps.owner     = THIS_MODULE;
-               strcpy(adapter->caps.name, "igb-82576");
-               adapter->caps.max_adj   = 1000000000;
-               adapter->caps.n_ext_ts  = 0;
-               adapter->caps.pps       = 0;
-               adapter->caps.adjfreq   = ptp_82576_adjfreq;
-               adapter->caps.adjtime   = igb_adjtime;
-               adapter->caps.gettime   = igb_gettime;
-               adapter->caps.settime   = igb_settime;
-               adapter->caps.enable    = ptp_82576_enable;
-               adapter->cc.read        = igb_82576_systim_read;
-               adapter->cc.mask        = CLOCKSOURCE_MASK(64);
-               adapter->cc.mult        = 1;
-               adapter->cc.shift       = IGB_82576_TSYNC_SHIFT;
-               /* Dial the nominal frequency. */
-               wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
+       case e1000_i210:
+       case e1000_i211:
+               snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
+               adapter->ptp_caps.owner = THIS_MODULE;
+               adapter->ptp_caps.max_adj = 62499999;
+               adapter->ptp_caps.n_ext_ts = 0;
+               adapter->ptp_caps.pps = 0;
+               adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
+               adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
+               adapter->ptp_caps.gettime = igb_ptp_gettime_i210;
+               adapter->ptp_caps.settime = igb_ptp_settime_i210;
+               adapter->ptp_caps.enable = igb_ptp_enable;
+               /* Enable the timer functions by clearing bit 31. */
+               wr32(E1000_TSAUXC, 0x0);
                break;
-
        default:
                adapter->ptp_clock = NULL;
                return;
@@ -310,86 +727,114 @@ void igb_ptp_init(struct igb_adapter *adapter)
 
        wrfl();
 
-       timecounter_init(&adapter->tc, &adapter->cc,
-                        ktime_to_ns(ktime_get_real()));
+       spin_lock_init(&adapter->tmreg_lock);
+       INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
+
+       /* Initialize the clock and overflow work for devices that need it. */
+       if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
+               struct timespec ts = ktime_to_timespec(ktime_get_real());
 
-       INIT_DELAYED_WORK(&adapter->overflow_work, igb_overflow_check);
+               igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
+       } else {
+               timecounter_init(&adapter->tc, &adapter->cc,
+                                ktime_to_ns(ktime_get_real()));
 
-       spin_lock_init(&adapter->tmreg_lock);
+               INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
+                                 igb_ptp_overflow_check);
 
-       schedule_delayed_work(&adapter->overflow_work, IGB_OVERFLOW_PERIOD);
+               schedule_delayed_work(&adapter->ptp_overflow_work,
+                                     IGB_SYSTIM_OVERFLOW_PERIOD);
+       }
+
+       /* Initialize the time sync interrupts for devices that support it. */
+       if (hw->mac.type >= e1000_82580) {
+               wr32(E1000_TSIM, E1000_TSIM_TXTS);
+               wr32(E1000_IMS, E1000_IMS_TS);
+       }
 
-       adapter->ptp_clock = ptp_clock_register(&adapter->caps);
+       adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
+                                               &adapter->pdev->dev);
        if (IS_ERR(adapter->ptp_clock)) {
                adapter->ptp_clock = NULL;
                dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n");
-       } else
+       } else {
                dev_info(&adapter->pdev->dev, "added PHC on %s\n",
                         adapter->netdev->name);
+               adapter->flags |= IGB_FLAG_PTP;
+       }
 }
 
-void igb_ptp_remove(struct igb_adapter *adapter)
+/**
+ * igb_ptp_stop - Disable PTP device and stop the overflow check.
+ * @adapter: Board private structure.
+ *
+ * This function stops the PTP support and cancels the delayed work.
+ **/
+void igb_ptp_stop(struct igb_adapter *adapter)
 {
        switch (adapter->hw.mac.type) {
-       case e1000_i211:
-       case e1000_i210:
-       case e1000_i350:
-       case e1000_82580:
        case e1000_82576:
-               cancel_delayed_work_sync(&adapter->overflow_work);
+       case e1000_82580:
+       case e1000_i350:
+               cancel_delayed_work_sync(&adapter->ptp_overflow_work);
+               break;
+       case e1000_i210:
+       case e1000_i211:
+               /* No delayed work to cancel. */
                break;
        default:
                return;
        }
 
+       cancel_work_sync(&adapter->ptp_tx_work);
+
        if (adapter->ptp_clock) {
                ptp_clock_unregister(adapter->ptp_clock);
                dev_info(&adapter->pdev->dev, "removed PHC on %s\n",
                         adapter->netdev->name);
+               adapter->flags &= ~IGB_FLAG_PTP;
        }
 }
 
 /**
- * igb_systim_to_hwtstamp - convert system time value to hw timestamp
- * @adapter: board private structure
- * @hwtstamps: timestamp structure to update
- * @systim: unsigned 64bit system time value.
- *
- * We need to convert the system time value stored in the RX/TXSTMP registers
- * into a hwtstamp which can be used by the upper level timestamping functions.
+ * igb_ptp_reset - Re-enable the adapter for PTP following a reset.
+ * @adapter: Board private structure.
  *
- * The 'tmreg_lock' spinlock is used to protect the consistency of the
- * system time value. This is needed because reading the 64 bit time
- * value involves reading two (or three) 32 bit registers. The first
- * read latches the value. Ditto for writing.
- *
- * In addition, here have extended the system time with an overflow
- * counter in software.
+ * This function handles the reset work required to re-enable the PTP device.
  **/
-void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
-                           struct skb_shared_hwtstamps *hwtstamps,
-                           u64 systim)
+void igb_ptp_reset(struct igb_adapter *adapter)
 {
-       u64 ns;
-       unsigned long flags;
+       struct e1000_hw *hw = &adapter->hw;
+
+       if (!(adapter->flags & IGB_FLAG_PTP))
+               return;
 
        switch (adapter->hw.mac.type) {
+       case e1000_82576:
+               /* Dial the nominal frequency. */
+               wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
+               break;
+       case e1000_82580:
+       case e1000_i350:
        case e1000_i210:
        case e1000_i211:
-       case e1000_i350:
-       case e1000_82580:
-       case e1000_82576:
+               /* Enable the timer functions and interrupts. */
+               wr32(E1000_TSAUXC, 0x0);
+               wr32(E1000_TSIM, E1000_TSIM_TXTS);
+               wr32(E1000_IMS, E1000_IMS_TS);
                break;
        default:
+               /* No work to do. */
                return;
        }
 
-       spin_lock_irqsave(&adapter->tmreg_lock, flags);
+       /* Re-initialize the timer. */
+       if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
+               struct timespec ts = ktime_to_timespec(ktime_get_real());
 
-       ns = timecounter_cyc2time(&adapter->tc, systim);
-
-       spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
-
-       memset(hwtstamps, 0, sizeof(*hwtstamps));
-       hwtstamps->hwtstamp = ns_to_ktime(ns);
+               igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
+       } else {
+               timecounter_init(&adapter->tc, &adapter->cc,
+                                ktime_to_ns(ktime_get_real()));
+       }
 }
index 5fd5d04c26c9a850543d966655b6b32956293a3b..89f40e51fc134f0537fe8cf6f9f9c3efca44f94e 100644 (file)
@@ -32,7 +32,7 @@
 
 obj-$(CONFIG_IXGBE) += ixgbe.o
 
-ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
+ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o ixgbe_debugfs.o\
               ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
               ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o
 
index b9623e9ea895334e4c2ac6cf2b7b3ce86d28b821..5bd26763554c8926282739bf18d308d92c892990 100644 (file)
@@ -78,6 +78,9 @@
 
 /* Supported Rx Buffer Sizes */
 #define IXGBE_RXBUFFER_256    256  /* Used for skb receive header */
+#define IXGBE_RXBUFFER_2K    2048
+#define IXGBE_RXBUFFER_3K    3072
+#define IXGBE_RXBUFFER_4K    4096
 #define IXGBE_MAX_RXBUFFER  16384  /* largest size for a single descriptor */
 
 /*
 #define IXGBE_TX_FLAGS_FSO             (u32)(1 << 6)
 #define IXGBE_TX_FLAGS_TXSW            (u32)(1 << 7)
 #define IXGBE_TX_FLAGS_TSTAMP          (u32)(1 << 8)
+#define IXGBE_TX_FLAGS_NO_IFCS         (u32)(1 << 9)
 #define IXGBE_TX_FLAGS_VLAN_MASK       0xffff0000
 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK  0xe0000000
 #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT  29
@@ -293,16 +297,25 @@ struct ixgbe_ring_feature {
  * this is twice the size of a half page we need to double the page order
  * for FCoE enabled Rx queues.
  */
-#if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192)
-static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
+static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
 {
-       return test_bit(__IXGBE_RX_FCOE, &ring->state) ? 1 : 0;
+#ifdef IXGBE_FCOE
+       if (test_bit(__IXGBE_RX_FCOE, &ring->state))
+               return (PAGE_SIZE < 8192) ? IXGBE_RXBUFFER_4K :
+                                           IXGBE_RXBUFFER_3K;
+#endif
+       return IXGBE_RXBUFFER_2K;
 }
-#else
-#define ixgbe_rx_pg_order(_ring) 0
+
+static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
+{
+#ifdef IXGBE_FCOE
+       if (test_bit(__IXGBE_RX_FCOE, &ring->state))
+               return (PAGE_SIZE < 8192) ? 1 : 0;
 #endif
+       return 0;
+}
 #define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
-#define ixgbe_rx_bufsz(_ring) ((PAGE_SIZE / 2) << ixgbe_rx_pg_order(_ring))
 
 struct ixgbe_ring_container {
        struct ixgbe_ring *ring;        /* pointer to linked list of rings */
@@ -584,6 +597,9 @@ struct ixgbe_adapter {
 #ifdef CONFIG_IXGBE_HWMON
        struct hwmon_buff ixgbe_hwmon_buff;
 #endif /* CONFIG_IXGBE_HWMON */
+#ifdef CONFIG_DEBUG_FS
+       struct dentry *ixgbe_dbg_adapter;
+#endif /*CONFIG_DEBUG_FS*/
 };
 
 struct ixgbe_fdir_filter {
@@ -712,7 +728,12 @@ extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
                                  struct netdev_fcoe_hbainfo *info);
 extern u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
 #endif /* IXGBE_FCOE */
-
+#ifdef CONFIG_DEBUG_FS
+extern void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
+extern void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
+extern void ixgbe_dbg_init(void);
+extern void ixgbe_dbg_exit(void);
+#endif /* CONFIG_DEBUG_FS */
 static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
 {
        return netdev_get_tx_queue(ring->netdev, ring->queue_index);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
new file mode 100644 (file)
index 0000000..8d3a218
--- /dev/null
@@ -0,0 +1,300 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2012 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+
+#include "ixgbe.h"
+
+static struct dentry *ixgbe_dbg_root;
+
+static char ixgbe_dbg_reg_ops_buf[256] = "";
+
+/**
+ * ixgbe_dbg_reg_ops_open - prep the debugfs pokee data item when opened
+ * @inode: inode that was opened
+ * @filp:  file info
+ *
+ * Stash the adapter pointer hiding in the inode into the file pointer where
+ * we can find it later in the read and write calls
+ **/
+static int ixgbe_dbg_reg_ops_open(struct inode *inode, struct file *filp)
+{
+       filp->private_data = inode->i_private;
+       return 0;
+}
+
+/**
+ * ixgbe_dbg_reg_ops_read - read for reg_ops datum
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ **/
+static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer,
+                                   size_t count, loff_t *ppos)
+{
+       struct ixgbe_adapter *adapter = filp->private_data;
+       char buf[256];
+       int bytes_not_copied;
+       int len;
+
+       /* don't allow partial reads */
+       if (*ppos != 0)
+               return 0;
+
+       len = snprintf(buf, sizeof(buf), "%s: %s\n",
+                      adapter->netdev->name, ixgbe_dbg_reg_ops_buf);
+       if (count < len)
+               return -ENOSPC;
+       bytes_not_copied = copy_to_user(buffer, buf, len);
+       if (bytes_not_copied < 0)
+               return bytes_not_copied;
+
+       *ppos = len;
+       return len;
+}
+
+/**
+ * ixgbe_dbg_reg_ops_write - write into reg_ops datum
+ * @filp: the opened file
+ * @buffer: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ **/
+static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp,
+                                    const char __user *buffer,
+                                    size_t count, loff_t *ppos)
+{
+       struct ixgbe_adapter *adapter = filp->private_data;
+       int bytes_not_copied;
+
+       /* don't allow partial writes */
+       if (*ppos != 0)
+               return 0;
+       if (count >= sizeof(ixgbe_dbg_reg_ops_buf))
+               return -ENOSPC;
+
+       bytes_not_copied = copy_from_user(ixgbe_dbg_reg_ops_buf, buffer, count);
+       if (bytes_not_copied < 0)
+               return bytes_not_copied;
+       else if (bytes_not_copied < count)
+               count -= bytes_not_copied;
+       else
+               return -ENOSPC;
+       ixgbe_dbg_reg_ops_buf[count] = '\0';
+
+       if (strncmp(ixgbe_dbg_reg_ops_buf, "write", 5) == 0) {
+               u32 reg, value;
+               int cnt;
+               cnt = sscanf(&ixgbe_dbg_reg_ops_buf[5], "%x %x", &reg, &value);
+               if (cnt == 2) {
+                       IXGBE_WRITE_REG(&adapter->hw, reg, value);
+                       value = IXGBE_READ_REG(&adapter->hw, reg);
+                       e_dev_info("write: 0x%08x = 0x%08x\n", reg, value);
+               } else {
+                       e_dev_info("write <reg> <value>\n");
+               }
+       } else if (strncmp(ixgbe_dbg_reg_ops_buf, "read", 4) == 0) {
+               u32 reg, value;
+               int cnt;
+               cnt = sscanf(&ixgbe_dbg_reg_ops_buf[4], "%x", &reg);
+               if (cnt == 1) {
+                       value = IXGBE_READ_REG(&adapter->hw, reg);
+                       e_dev_info("read 0x%08x = 0x%08x\n", reg, value);
+               } else {
+                       e_dev_info("read <reg>\n");
+               }
+       } else {
+               e_dev_info("Unknown command %s\n", ixgbe_dbg_reg_ops_buf);
+               e_dev_info("Available commands:\n");
+               e_dev_info("   read <reg>\n");
+               e_dev_info("   write <reg> <value>\n");
+       }
+       return count;
+}
+
+static const struct file_operations ixgbe_dbg_reg_ops_fops = {
+       .owner = THIS_MODULE,
+       .open =  ixgbe_dbg_reg_ops_open,
+       .read =  ixgbe_dbg_reg_ops_read,
+       .write = ixgbe_dbg_reg_ops_write,
+};
+
+static char ixgbe_dbg_netdev_ops_buf[256] = "";
+
+/**
+ * ixgbe_dbg_netdev_ops_open - prep the debugfs netdev_ops data item
+ * @inode: inode that was opened
+ * @filp: file info
+ *
+ * Stash the adapter pointer hiding in the inode into the file pointer
+ * where we can find it later in the read and write calls
+ **/
+static int ixgbe_dbg_netdev_ops_open(struct inode *inode, struct file *filp)
+{
+       filp->private_data = inode->i_private;
+       return 0;
+}
+
+/**
+ * ixgbe_dbg_netdev_ops_read - read for netdev_ops datum
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ **/
+static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp,
+                                        char __user *buffer,
+                                        size_t count, loff_t *ppos)
+{
+       struct ixgbe_adapter *adapter = filp->private_data;
+       char buf[256];
+       int bytes_not_copied;
+       int len;
+
+       /* don't allow partial reads */
+       if (*ppos != 0)
+               return 0;
+
+       len = snprintf(buf, sizeof(buf), "%s: %s\n",
+                      adapter->netdev->name, ixgbe_dbg_netdev_ops_buf);
+       if (count < len)
+               return -ENOSPC;
+       bytes_not_copied = copy_to_user(buffer, buf, len);
+       if (bytes_not_copied < 0)
+               return bytes_not_copied;
+
+       *ppos = len;
+       return len;
+}
+
+/**
+ * ixgbe_dbg_netdev_ops_write - write into netdev_ops datum
+ * @filp: the opened file
+ * @buffer: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ **/
+static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp,
+                                         const char __user *buffer,
+                                         size_t count, loff_t *ppos)
+{
+       struct ixgbe_adapter *adapter = filp->private_data;
+       int bytes_not_copied;
+
+       /* don't allow partial writes */
+       if (*ppos != 0)
+               return 0;
+       if (count >= sizeof(ixgbe_dbg_netdev_ops_buf))
+               return -ENOSPC;
+
+       bytes_not_copied = copy_from_user(ixgbe_dbg_netdev_ops_buf,
+                                         buffer, count);
+       if (bytes_not_copied < 0)
+               return bytes_not_copied;
+       else if (bytes_not_copied < count)
+               count -= bytes_not_copied;
+       else
+               return -ENOSPC;
+       ixgbe_dbg_netdev_ops_buf[count] = '\0';
+
+       if (strncmp(ixgbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
+               adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev);
+               e_dev_info("tx_timeout called\n");
+       } else {
+               e_dev_info("Unknown command: %s\n", ixgbe_dbg_netdev_ops_buf);
+               e_dev_info("Available commands:\n");
+               e_dev_info("    tx_timeout\n");
+       }
+       return count;
+}
+
+static const struct file_operations ixgbe_dbg_netdev_ops_fops = {
+       .owner = THIS_MODULE,
+       .open = ixgbe_dbg_netdev_ops_open,
+       .read = ixgbe_dbg_netdev_ops_read,
+       .write = ixgbe_dbg_netdev_ops_write,
+};
+
+/**
+ * ixgbe_dbg_adapter_init - setup the debugfs directory for the adapter
+ * @adapter: the adapter that is starting up
+ **/
+void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter)
+{
+       const char *name = pci_name(adapter->pdev);
+       struct dentry *pfile;
+       adapter->ixgbe_dbg_adapter = debugfs_create_dir(name, ixgbe_dbg_root);
+       if (adapter->ixgbe_dbg_adapter) {
+               pfile = debugfs_create_file("reg_ops", 0600,
+                                           adapter->ixgbe_dbg_adapter, adapter,
+                                           &ixgbe_dbg_reg_ops_fops);
+               if (!pfile)
+                       e_dev_err("debugfs reg_ops for %s failed\n", name);
+               pfile = debugfs_create_file("netdev_ops", 0600,
+                                           adapter->ixgbe_dbg_adapter, adapter,
+                                           &ixgbe_dbg_netdev_ops_fops);
+               if (!pfile)
+                       e_dev_err("debugfs netdev_ops for %s failed\n", name);
+       } else {
+               e_dev_err("debugfs entry for %s failed\n", name);
+       }
+}
+
+/**
+ * ixgbe_dbg_adapter_exit - clear out the adapter's debugfs entries
+ * @pf: the pf that is stopping
+ **/
+void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter)
+{
+       if (adapter->ixgbe_dbg_adapter)
+               debugfs_remove_recursive(adapter->ixgbe_dbg_adapter);
+       adapter->ixgbe_dbg_adapter = NULL;
+}
+
+/**
+ * ixgbe_dbg_init - start up debugfs for the driver
+ **/
+void ixgbe_dbg_init(void)
+{
+       ixgbe_dbg_root = debugfs_create_dir(ixgbe_driver_name, NULL);
+       if (ixgbe_dbg_root == NULL)
+               pr_err("init of debugfs failed\n");
+}
+
+/**
+ * ixgbe_dbg_exit - clean out the driver's debugfs entries
+ **/
+void ixgbe_dbg_exit(void)
+{
+       debugfs_remove_recursive(ixgbe_dbg_root);
+}
+
+#endif /* CONFIG_DEBUG_FS */
index ee61819d6088e2f35e4586b9dd664ef40e6da06f..868af693821957bfc4c9541e8c99c7117b1d2247 100644 (file)
@@ -1167,7 +1167,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
        }
 
        bi->dma = dma;
-       bi->page_offset ^= ixgbe_rx_bufsz(rx_ring);
+       bi->page_offset = 0;
 
        return true;
 }
@@ -1320,29 +1320,6 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
                return max_len;
 }
 
-static void ixgbe_get_rsc_cnt(struct ixgbe_ring *rx_ring,
-                             union ixgbe_adv_rx_desc *rx_desc,
-                             struct sk_buff *skb)
-{
-       __le32 rsc_enabled;
-       u32 rsc_cnt;
-
-       if (!ring_is_rsc_enabled(rx_ring))
-               return;
-
-       rsc_enabled = rx_desc->wb.lower.lo_dword.data &
-                     cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
-
-       /* If this is an RSC frame rsc_cnt should be non-zero */
-       if (!rsc_enabled)
-               return;
-
-       rsc_cnt = le32_to_cpu(rsc_enabled);
-       rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
-
-       IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
-}
-
 static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
                                   struct sk_buff *skb)
 {
@@ -1440,16 +1417,28 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
 
        prefetch(IXGBE_RX_DESC(rx_ring, ntc));
 
-       if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
-               return false;
+       /* update RSC append count if present */
+       if (ring_is_rsc_enabled(rx_ring)) {
+               __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
+                                    cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
+
+               if (unlikely(rsc_enabled)) {
+                       u32 rsc_cnt = le32_to_cpu(rsc_enabled);
+
+                       rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
+                       IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
 
-       /* append_cnt indicates packet is RSC, if so fetch nextp */
-       if (IXGBE_CB(skb)->append_cnt) {
-               ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
-               ntc &= IXGBE_RXDADV_NEXTP_MASK;
-               ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
+                       /* update ntc based on RSC value */
+                       ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
+                       ntc &= IXGBE_RXDADV_NEXTP_MASK;
+                       ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
+               }
        }
 
+       /* if we are the last buffer then there is nothing else to do */
+       if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
+               return false;
+
        /* place skb in next buffer to be received */
        rx_ring->rx_buffer_info[ntc].skb = skb;
        rx_ring->rx_stats.non_eop_descs++;
@@ -1457,6 +1446,78 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
        return true;
 }
 
+/**
+ * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an ixgbe specific version of __pskb_pull_tail.  The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
+                           struct sk_buff *skb)
+{
+       struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+       unsigned char *va;
+       unsigned int pull_len;
+
+       /*
+        * it is valid to use page_address instead of kmap since we are
+        * working with pages allocated out of the lomem pool per
+        * alloc_page(GFP_ATOMIC)
+        */
+       va = skb_frag_address(frag);
+
+       /*
+        * we need the header to contain the greater of either ETH_HLEN or
+        * 60 bytes if the skb->len is less than 60 for skb_pad.
+        */
+       pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
+
+       /* align pull length to size of long to optimize memcpy performance */
+       skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+
+       /* update all of the pointers */
+       skb_frag_size_sub(frag, pull_len);
+       frag->page_offset += pull_len;
+       skb->data_len -= pull_len;
+       skb->tail += pull_len;
+}
+
+/**
+ * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being updated
+ *
+ * This function provides a basic DMA sync up for the first fragment of an
+ * skb.  The reason for doing this is that the first fragment cannot be
+ * unmapped until we have reached the end of packet descriptor for a buffer
+ * chain.
+ */
+static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
+                               struct sk_buff *skb)
+{
+       /* if the page was released unmap it, else just sync our portion */
+       if (unlikely(IXGBE_CB(skb)->page_released)) {
+               dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
+                              ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
+               IXGBE_CB(skb)->page_released = false;
+       } else {
+               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+
+               dma_sync_single_range_for_cpu(rx_ring->dev,
+                                             IXGBE_CB(skb)->dma,
+                                             frag->page_offset,
+                                             ixgbe_rx_bufsz(rx_ring),
+                                             DMA_FROM_DEVICE);
+       }
+       IXGBE_CB(skb)->dma = 0;
+}
+
 /**
  * ixgbe_cleanup_headers - Correct corrupted or empty headers
  * @rx_ring: rx descriptor ring packet is being transacted on
@@ -1479,24 +1540,7 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
                                  union ixgbe_adv_rx_desc *rx_desc,
                                  struct sk_buff *skb)
 {
-       struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
        struct net_device *netdev = rx_ring->netdev;
-       unsigned char *va;
-       unsigned int pull_len;
-
-       /* if the page was released unmap it, else just sync our portion */
-       if (unlikely(IXGBE_CB(skb)->page_released)) {
-               dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
-                              ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
-               IXGBE_CB(skb)->page_released = false;
-       } else {
-               dma_sync_single_range_for_cpu(rx_ring->dev,
-                                             IXGBE_CB(skb)->dma,
-                                             frag->page_offset,
-                                             ixgbe_rx_bufsz(rx_ring),
-                                             DMA_FROM_DEVICE);
-       }
-       IXGBE_CB(skb)->dma = 0;
 
        /* verify that the packet does not have any known errors */
        if (unlikely(ixgbe_test_staterr(rx_desc,
@@ -1506,40 +1550,9 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
                return true;
        }
 
-       /*
-        * it is valid to use page_address instead of kmap since we are
-        * working with pages allocated out of the lomem pool per
-        * alloc_page(GFP_ATOMIC)
-        */
-       va = skb_frag_address(frag);
-
-       /*
-        * we need the header to contain the greater of either ETH_HLEN or
-        * 60 bytes if the skb->len is less than 60 for skb_pad.
-        */
-       pull_len = skb_frag_size(frag);
-       if (pull_len > IXGBE_RX_HDR_SIZE)
-               pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
-
-       /* align pull length to size of long to optimize memcpy performance */
-       skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
-       /* update all of the pointers */
-       skb_frag_size_sub(frag, pull_len);
-       frag->page_offset += pull_len;
-       skb->data_len -= pull_len;
-       skb->tail += pull_len;
-
-       /*
-        * if we sucked the frag empty then we should free it,
-        * if there are other frags here something is screwed up in hardware
-        */
-       if (skb_frag_size(frag) == 0) {
-               BUG_ON(skb_shinfo(skb)->nr_frags != 1);
-               skb_shinfo(skb)->nr_frags = 0;
-               __skb_frag_unref(frag);
-               skb->truesize -= ixgbe_rx_bufsz(rx_ring);
-       }
+       /* place header in linear portion of buffer */
+       if (skb_is_nonlinear(skb))
+               ixgbe_pull_tail(rx_ring, skb);
 
 #ifdef IXGBE_FCOE
        /* do not attempt to pad FCoE Frames as this will disrupt DDP */
@@ -1559,34 +1572,18 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
        return false;
 }
 
-/**
- * ixgbe_can_reuse_page - determine if we can reuse a page
- * @rx_buffer: pointer to rx_buffer containing the page we want to reuse
- *
- * Returns true if page can be reused in another Rx buffer
- **/
-static inline bool ixgbe_can_reuse_page(struct ixgbe_rx_buffer *rx_buffer)
-{
-       struct page *page = rx_buffer->page;
-
-       /* if we are only owner of page and it is local we can reuse it */
-       return likely(page_count(page) == 1) &&
-              likely(page_to_nid(page) == numa_node_id());
-}
-
 /**
  * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring
  * @rx_ring: rx descriptor ring to store buffers on
  * @old_buff: donor buffer to have page reused
  *
- * Syncronizes page for reuse by the adapter
+ * Synchronizes page for reuse by the adapter
  **/
 static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
                                struct ixgbe_rx_buffer *old_buff)
 {
        struct ixgbe_rx_buffer *new_buff;
        u16 nta = rx_ring->next_to_alloc;
-       u16 bufsz = ixgbe_rx_bufsz(rx_ring);
 
        new_buff = &rx_ring->rx_buffer_info[nta];
 
@@ -1597,17 +1594,13 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
        /* transfer page from old buffer to new buffer */
        new_buff->page = old_buff->page;
        new_buff->dma = old_buff->dma;
-
-       /* flip page offset to other buffer and store to new_buff */
-       new_buff->page_offset = old_buff->page_offset ^ bufsz;
+       new_buff->page_offset = old_buff->page_offset;
 
        /* sync the buffer for use by the device */
        dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
-                                        new_buff->page_offset, bufsz,
+                                        new_buff->page_offset,
+                                        ixgbe_rx_bufsz(rx_ring),
                                         DMA_FROM_DEVICE);
-
-       /* bump ref count on page before it is given to the stack */
-       get_page(new_buff->page);
 }
 
 /**
@@ -1617,20 +1610,159 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
  * @rx_desc: descriptor containing length of buffer written by hardware
  * @skb: sk_buff to place the data into
  *
- * This function is based on skb_add_rx_frag.  I would have used that
- * function however it doesn't handle the truesize case correctly since we
- * are allocating more memory than might be used for a single receive.
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
  **/
-static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
+static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
                              struct ixgbe_rx_buffer *rx_buffer,
-                             struct sk_buff *skb, int size)
+                             union ixgbe_adv_rx_desc *rx_desc,
+                             struct sk_buff *skb)
 {
-       skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-                          rx_buffer->page, rx_buffer->page_offset,
-                          size);
-       skb->len += size;
-       skb->data_len += size;
-       skb->truesize += ixgbe_rx_bufsz(rx_ring);
+       struct page *page = rx_buffer->page;
+       unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
+#if (PAGE_SIZE < 8192)
+       unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
+#else
+       unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+       unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
+                                  ixgbe_rx_bufsz(rx_ring);
+#endif
+
+       if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
+               unsigned char *va = page_address(page) + rx_buffer->page_offset;
+
+               memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
+
+               /* we can reuse buffer as-is, just make sure it is local */
+               if (likely(page_to_nid(page) == numa_node_id()))
+                       return true;
+
+               /* this page cannot be reused so discard it */
+               put_page(page);
+               return false;
+       }
+
+       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+                       rx_buffer->page_offset, size, truesize);
+
+       /* avoid re-using remote pages */
+       if (unlikely(page_to_nid(page) != numa_node_id()))
+               return false;
+
+#if (PAGE_SIZE < 8192)
+       /* if we are only owner of page we can reuse it */
+       if (unlikely(page_count(page) != 1))
+               return false;
+
+       /* flip page offset to other buffer */
+       rx_buffer->page_offset ^= truesize;
+
+       /*
+        * since we are the only owner of the page and we need to
+        * increment it, just set the value to 2 in order to avoid
+        * an unecessary locked operation
+        */
+       atomic_set(&page->_count, 2);
+#else
+       /* move offset up to the next cache line */
+       rx_buffer->page_offset += truesize;
+
+       if (rx_buffer->page_offset > last_offset)
+               return false;
+
+       /* bump ref count on page before it is given to the stack */
+       get_page(page);
+#endif
+
+       return true;
+}
+
+static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
+                                            union ixgbe_adv_rx_desc *rx_desc)
+{
+       struct ixgbe_rx_buffer *rx_buffer;
+       struct sk_buff *skb;
+       struct page *page;
+
+       rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+       page = rx_buffer->page;
+       prefetchw(page);
+
+       skb = rx_buffer->skb;
+
+       if (likely(!skb)) {
+               void *page_addr = page_address(page) +
+                                 rx_buffer->page_offset;
+
+               /* prefetch first cache line of first page */
+               prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+               prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+               /* allocate a skb to store the frags */
+               skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+                                               IXGBE_RX_HDR_SIZE);
+               if (unlikely(!skb)) {
+                       rx_ring->rx_stats.alloc_rx_buff_failed++;
+                       return NULL;
+               }
+
+               /*
+                * we will be copying header into skb->data in
+                * pskb_may_pull so it is in our interest to prefetch
+                * it now to avoid a possible cache miss
+                */
+               prefetchw(skb->data);
+
+               /*
+                * Delay unmapping of the first packet. It carries the
+                * header information, HW may still access the header
+                * after the writeback.  Only unmap it when EOP is
+                * reached
+                */
+               if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
+                       goto dma_sync;
+
+               IXGBE_CB(skb)->dma = rx_buffer->dma;
+       } else {
+               if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
+                       ixgbe_dma_sync_frag(rx_ring, skb);
+
+dma_sync:
+               /* we are reusing so sync this buffer for CPU use */
+               dma_sync_single_range_for_cpu(rx_ring->dev,
+                                             rx_buffer->dma,
+                                             rx_buffer->page_offset,
+                                             ixgbe_rx_bufsz(rx_ring),
+                                             DMA_FROM_DEVICE);
+       }
+
+       /* pull page into skb */
+       if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+               /* hand second half of page back to the ring */
+               ixgbe_reuse_rx_page(rx_ring, rx_buffer);
+       } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
+               /* the page has been released from the ring */
+               IXGBE_CB(skb)->page_released = true;
+       } else {
+               /* we are not reusing the buffer so unmap it */
+               dma_unmap_page(rx_ring->dev, rx_buffer->dma,
+                              ixgbe_rx_pg_size(rx_ring),
+                              DMA_FROM_DEVICE);
+       }
+
+       /* clear contents of buffer_info */
+       rx_buffer->skb = NULL;
+       rx_buffer->dma = 0;
+       rx_buffer->page = NULL;
+
+       return skb;
 }
 
 /**
@@ -1653,16 +1785,14 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 #ifdef IXGBE_FCOE
        struct ixgbe_adapter *adapter = q_vector->adapter;
-       int ddp_bytes = 0;
+       int ddp_bytes;
+       unsigned int mss = 0;
 #endif /* IXGBE_FCOE */
        u16 cleaned_count = ixgbe_desc_unused(rx_ring);
 
        do {
-               struct ixgbe_rx_buffer *rx_buffer;
                union ixgbe_adv_rx_desc *rx_desc;
                struct sk_buff *skb;
-               struct page *page;
-               u16 ntc;
 
                /* return some buffers to hardware, one at a time is too slow */
                if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
@@ -1670,9 +1800,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                        cleaned_count = 0;
                }
 
-               ntc = rx_ring->next_to_clean;
-               rx_desc = IXGBE_RX_DESC(rx_ring, ntc);
-               rx_buffer = &rx_ring->rx_buffer_info[ntc];
+               rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
 
                if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
                        break;
@@ -1684,75 +1812,12 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                 */
                rmb();
 
-               page = rx_buffer->page;
-               prefetchw(page);
-
-               skb = rx_buffer->skb;
-
-               if (likely(!skb)) {
-                       void *page_addr = page_address(page) +
-                                         rx_buffer->page_offset;
-
-                       /* prefetch first cache line of first page */
-                       prefetch(page_addr);
-#if L1_CACHE_BYTES < 128
-                       prefetch(page_addr + L1_CACHE_BYTES);
-#endif
+               /* retrieve a buffer from the ring */
+               skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
 
-                       /* allocate a skb to store the frags */
-                       skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
-                                                       IXGBE_RX_HDR_SIZE);
-                       if (unlikely(!skb)) {
-                               rx_ring->rx_stats.alloc_rx_buff_failed++;
-                               break;
-                       }
-
-                       /*
-                        * we will be copying header into skb->data in
-                        * pskb_may_pull so it is in our interest to prefetch
-                        * it now to avoid a possible cache miss
-                        */
-                       prefetchw(skb->data);
-
-                       /*
-                        * Delay unmapping of the first packet. It carries the
-                        * header information, HW may still access the header
-                        * after the writeback.  Only unmap it when EOP is
-                        * reached
-                        */
-                       IXGBE_CB(skb)->dma = rx_buffer->dma;
-               } else {
-                       /* we are reusing so sync this buffer for CPU use */
-                       dma_sync_single_range_for_cpu(rx_ring->dev,
-                                                     rx_buffer->dma,
-                                                     rx_buffer->page_offset,
-                                                     ixgbe_rx_bufsz(rx_ring),
-                                                     DMA_FROM_DEVICE);
-               }
-
-               /* pull page into skb */
-               ixgbe_add_rx_frag(rx_ring, rx_buffer, skb,
-                                 le16_to_cpu(rx_desc->wb.upper.length));
-
-               if (ixgbe_can_reuse_page(rx_buffer)) {
-                       /* hand second half of page back to the ring */
-                       ixgbe_reuse_rx_page(rx_ring, rx_buffer);
-               } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
-                       /* the page has been released from the ring */
-                       IXGBE_CB(skb)->page_released = true;
-               } else {
-                       /* we are not reusing the buffer so unmap it */
-                       dma_unmap_page(rx_ring->dev, rx_buffer->dma,
-                                      ixgbe_rx_pg_size(rx_ring),
-                                      DMA_FROM_DEVICE);
-               }
-
-               /* clear contents of buffer_info */
-               rx_buffer->skb = NULL;
-               rx_buffer->dma = 0;
-               rx_buffer->page = NULL;
-
-               ixgbe_get_rsc_cnt(rx_ring, rx_desc, skb);
+               /* exit if we failed to retrieve a buffer */
+               if (!skb)
+                       break;
 
                cleaned_count++;
 
@@ -1775,6 +1840,20 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                /* if ddp, not passing to ULD unless for FCP_RSP or error */
                if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
                        ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
+                       /* include DDPed FCoE data */
+                       if (ddp_bytes > 0) {
+                               if (!mss) {
+                                       mss = rx_ring->netdev->mtu -
+                                               sizeof(struct fcoe_hdr) -
+                                               sizeof(struct fc_frame_header) -
+                                               sizeof(struct fcoe_crc_eof);
+                                       if (mss > 512)
+                                               mss &= ~511;
+                               }
+                               total_rx_bytes += ddp_bytes;
+                               total_rx_packets += DIV_ROUND_UP(ddp_bytes,
+                                                                mss);
+                       }
                        if (!ddp_bytes) {
                                dev_kfree_skb_any(skb);
                                continue;
@@ -1788,21 +1867,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                budget--;
        } while (likely(budget));
 
-#ifdef IXGBE_FCOE
-       /* include DDPed FCoE data */
-       if (ddp_bytes > 0) {
-               unsigned int mss;
-
-               mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) -
-                       sizeof(struct fc_frame_header) -
-                       sizeof(struct fcoe_crc_eof);
-               if (mss > 512)
-                       mss &= ~511;
-               total_rx_bytes += ddp_bytes;
-               total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
-       }
-
-#endif /* IXGBE_FCOE */
        u64_stats_update_begin(&rx_ring->syncp);
        rx_ring->stats.packets += total_rx_packets;
        rx_ring->stats.bytes += total_rx_bytes;
@@ -2868,11 +2932,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
        srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
 
        /* configure the packet buffer length */
-#if PAGE_SIZE > IXGBE_MAX_RXBUFFER
-       srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-#else
        srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-#endif
 
        /* configure descriptor type */
        srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
@@ -2980,13 +3040,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
         * total size of max desc * buf_len is not greater
         * than 65536
         */
-#if (PAGE_SIZE <= 8192)
        rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
-#elif (PAGE_SIZE <= 16384)
-       rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
-#else
-       rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
-#endif
        IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
 }
 
@@ -3606,8 +3660,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
        if (hw->mac.type == ixgbe_mac_82598EB)
                netif_set_gso_max_size(adapter->netdev, 32768);
 
-       hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
-
 #ifdef IXGBE_FCOE
        if (adapter->netdev->features & NETIF_F_FCOE_MTU)
                max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
@@ -3807,6 +3859,11 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
 #ifdef CONFIG_IXGBE_DCB
        ixgbe_configure_dcb(adapter);
 #endif
+       /*
+        * We must restore virtualization before VLANs or else
+        * the VLVF registers will not be populated
+        */
+       ixgbe_configure_virtualization(adapter);
 
        ixgbe_set_rx_mode(adapter->netdev);
        ixgbe_restore_vlan(adapter);
@@ -3838,8 +3895,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
                break;
        }
 
-       ixgbe_configure_virtualization(adapter);
-
 #ifdef IXGBE_FCOE
        /* configure FCoE L2 filters, redirection table, and Rx control */
        ixgbe_configure_fcoe(adapter);
@@ -4129,27 +4184,6 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
                hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
 }
 
-/**
- * ixgbe_init_rx_page_offset - initialize page offset values for Rx buffers
- * @rx_ring: ring to setup
- *
- * On many IA platforms the L1 cache has a critical stride of 4K, this
- * results in each receive buffer starting in the same cache set.  To help
- * reduce the pressure on this cache set we can interleave the offsets so
- * that only every other buffer will be in the same cache set.
- **/
-static void ixgbe_init_rx_page_offset(struct ixgbe_ring *rx_ring)
-{
-       struct ixgbe_rx_buffer *rx_buffer = rx_ring->rx_buffer_info;
-       u16 i;
-
-       for (i = 0; i < rx_ring->count; i += 2) {
-               rx_buffer[0].page_offset = 0;
-               rx_buffer[1].page_offset = ixgbe_rx_bufsz(rx_ring);
-               rx_buffer = &rx_buffer[2];
-       }
-}
-
 /**
  * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
  * @rx_ring: ring to free buffers from
@@ -4195,8 +4229,6 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
        size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
        memset(rx_ring->rx_buffer_info, 0, size);
 
-       ixgbe_init_rx_page_offset(rx_ring);
-
        /* Zero out the descriptor ring */
        memset(rx_ring->desc, 0, rx_ring->size);
 
@@ -4646,8 +4678,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
 
-       ixgbe_init_rx_page_offset(rx_ring);
-
        return 0;
 err:
        vfree(rx_ring->rx_buffer_info);
@@ -5530,8 +5560,9 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
 {
        u32 ssvpc;
 
-       /* Do not perform spoof check for 82598 */
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+       /* Do not perform spoof check for 82598 or if not in IOV mode */
+       if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
+           adapter->num_vfs == 0)
                return;
 
        ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
@@ -5543,7 +5574,7 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
        if (!ssvpc)
                return;
 
-       e_warn(drv, "%d Spoofed packets detected\n", ssvpc);
+       e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
 }
 
 /**
@@ -5874,9 +5905,12 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
        u32 type_tucmd = 0;
 
        if (skb->ip_summed != CHECKSUM_PARTIAL) {
-               if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
-                   !(first->tx_flags & IXGBE_TX_FLAGS_TXSW))
-                       return;
+               if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN)) {
+                       if (unlikely(skb->no_fcs))
+                               first->tx_flags |= IXGBE_TX_FLAGS_NO_IFCS;
+                       if (!(first->tx_flags & IXGBE_TX_FLAGS_TXSW))
+                               return;
+               }
        } else {
                u8 l4_hdr = 0;
                switch (first->protocol) {
@@ -5938,7 +5972,6 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
 {
        /* set type for advanced descriptor with frame checksum insertion */
        __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
-                                     IXGBE_ADVTXD_DCMD_IFCS |
                                      IXGBE_ADVTXD_DCMD_DEXT);
 
        /* set HW vlan bit if vlan is present */
@@ -5958,6 +5991,10 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
 #endif
                cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
 
+       /* insert frame checksum */
+       if (!(tx_flags & IXGBE_TX_FLAGS_NO_IFCS))
+               cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS);
+
        return cmd_type;
 }
 
@@ -6063,8 +6100,6 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
                if (likely(!data_len))
                        break;
 
-               if (unlikely(skb->no_fcs))
-                       cmd_type &= ~(cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS));
                tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
 
                i++;
@@ -6854,9 +6889,9 @@ static int ixgbe_set_features(struct net_device *netdev,
        return 0;
 }
 
-static int ixgbe_ndo_fdb_add(struct ndmsg *ndm,
+static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                             struct net_device *dev,
-                            unsigned char *addr,
+                            const unsigned char *addr,
                             u16 flags)
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
@@ -6893,7 +6928,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm,
 
 static int ixgbe_ndo_fdb_del(struct ndmsg *ndm,
                             struct net_device *dev,
-                            unsigned char *addr)
+                            const unsigned char *addr)
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        int err = -EOPNOTSUPP;
@@ -7136,11 +7171,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                goto err_ioremap;
        }
 
-       for (i = 1; i <= 5; i++) {
-               if (pci_resource_len(pdev, i) == 0)
-                       continue;
-       }
-
        netdev->netdev_ops = &ixgbe_netdev_ops;
        ixgbe_set_ethtool_ops(netdev);
        netdev->watchdog_timeo = 5 * HZ;
@@ -7419,6 +7449,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                e_err(probe, "failed to allocate sysfs resources\n");
 #endif /* CONFIG_IXGBE_HWMON */
 
+#ifdef CONFIG_DEBUG_FS
+       ixgbe_dbg_adapter_init(adapter);
+#endif /* CONFIG_DEBUG_FS */
+
        return 0;
 
 err_register:
@@ -7453,6 +7487,10 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
        struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
        struct net_device *netdev = adapter->netdev;
 
+#ifdef CONFIG_DEBUG_FS
+       ixgbe_dbg_adapter_exit(adapter);
+#endif /*CONFIG_DEBUG_FS */
+
        set_bit(__IXGBE_DOWN, &adapter->state);
        cancel_work_sync(&adapter->service_task);
 
@@ -7708,6 +7746,10 @@ static int __init ixgbe_init_module(void)
        pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
        pr_info("%s\n", ixgbe_copyright);
 
+#ifdef CONFIG_DEBUG_FS
+       ixgbe_dbg_init();
+#endif /* CONFIG_DEBUG_FS */
+
 #ifdef CONFIG_IXGBE_DCA
        dca_register_notify(&dca_notifier);
 #endif
@@ -7730,6 +7772,11 @@ static void __exit ixgbe_exit_module(void)
        dca_unregister_notify(&dca_notifier);
 #endif
        pci_unregister_driver(&ixgbe_driver);
+
+#ifdef CONFIG_DEBUG_FS
+       ixgbe_dbg_exit();
+#endif /* CONFIG_DEBUG_FS */
+
        rcu_barrier(); /* Wait for completion of call_rcu()'s */
 }
 
index 3456d56171437cfbab401c39e3db6ad08c153782..39881cb17a4b5fe8958b8e0aead9188feaaf0370 100644 (file)
@@ -960,7 +960,8 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
        /* (Re)start the overflow check */
        adapter->flags2 |= IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED;
 
-       adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps);
+       adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
+                                               &adapter->pdev->dev);
        if (IS_ERR(adapter->ptp_clock)) {
                adapter->ptp_clock = NULL;
                e_dev_err("ptp_clock_register failed\n");
index 4fea8716ab64a2952b9bb052c98ffa5ccbb2014a..dce48bf64d9616beacb3ea9b081b6d7aea0730c2 100644 (file)
@@ -346,6 +346,10 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
 static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
                             u32 vf)
 {
+       /* VLAN 0 is a special case, don't allow it to be removed */
+       if (!vid && !add)
+               return 0;
+
        return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
 }
 
@@ -414,6 +418,7 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
                                  VLAN_PRIO_SHIFT)), vf);
                ixgbe_set_vmolr(hw, vf, false);
        } else {
+               ixgbe_set_vf_vlan(adapter, true, 0, vf);
                ixgbe_set_vmvir(adapter, 0, vf);
                ixgbe_set_vmolr(hw, vf, true);
        }
@@ -810,9 +815,9 @@ out:
        return err;
 }
 
-static int ixgbe_link_mbps(int internal_link_speed)
+static int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
 {
-       switch (internal_link_speed) {
+       switch (adapter->link_speed) {
        case IXGBE_LINK_SPEED_100_FULL:
                return 100;
        case IXGBE_LINK_SPEED_1GB_FULL:
@@ -824,27 +829,30 @@ static int ixgbe_link_mbps(int internal_link_speed)
        }
 }
 
-static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate,
-                                   int link_speed)
+static void ixgbe_set_vf_rate_limit(struct ixgbe_adapter *adapter, int vf)
 {
-       int rf_dec, rf_int;
-       u32 bcnrc_val;
+       struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 bcnrc_val = 0;
+       u16 queue, queues_per_pool;
+       u16 tx_rate = adapter->vfinfo[vf].tx_rate;
+
+       if (tx_rate) {
+               /* start with base link speed value */
+               bcnrc_val = adapter->vf_rate_link_speed;
 
-       if (tx_rate != 0) {
                /* Calculate the rate factor values to set */
-               rf_int = link_speed / tx_rate;
-               rf_dec = (link_speed - (rf_int * tx_rate));
-               rf_dec = (rf_dec * (1<<IXGBE_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
-
-               bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
-               bcnrc_val |= ((rf_int<<IXGBE_RTTBCNRC_RF_INT_SHIFT) &
-                              IXGBE_RTTBCNRC_RF_INT_MASK);
-               bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
-       } else {
-               bcnrc_val = 0;
+               bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
+               bcnrc_val /= tx_rate;
+
+               /* clear everything but the rate factor */
+               bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
+                            IXGBE_RTTBCNRC_RF_DEC_MASK;
+
+               /* enable the rate scheduler */
+               bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
        }
 
-       IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, 2*vf); /* vf Y uses queue 2*Y */
        /*
         * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
         * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported
@@ -861,53 +869,68 @@ static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate,
                break;
        }
 
-       IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
+       /* determine how many queues per pool based on VMDq mask */
+       queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
+
+       /* write value for all Tx queues belonging to VF */
+       for (queue = 0; queue < queues_per_pool; queue++) {
+               unsigned int reg_idx = (vf * queues_per_pool) + queue;
+
+               IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, reg_idx);
+               IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
+       }
 }
 
 void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
 {
-       int actual_link_speed, i;
-       bool reset_rate = false;
+       int i;
 
        /* VF Tx rate limit was not set */
-       if (adapter->vf_rate_link_speed == 0)
+       if (!adapter->vf_rate_link_speed)
                return;
 
-       actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
-       if (actual_link_speed != adapter->vf_rate_link_speed) {
-               reset_rate = true;
+       if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) {
                adapter->vf_rate_link_speed = 0;
                dev_info(&adapter->pdev->dev,
-                        "Link speed has been changed. VF Transmit rate "
-                        "is disabled\n");
+                        "Link speed has been changed. VF Transmit rate is disabled\n");
        }
 
        for (i = 0; i < adapter->num_vfs; i++) {
-               if (reset_rate)
+               if (!adapter->vf_rate_link_speed)
                        adapter->vfinfo[i].tx_rate = 0;
 
-               ixgbe_set_vf_rate_limit(&adapter->hw, i,
-                                       adapter->vfinfo[i].tx_rate,
-                                       actual_link_speed);
+               ixgbe_set_vf_rate_limit(adapter, i);
        }
 }
 
 int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       struct ixgbe_hw *hw = &adapter->hw;
-       int actual_link_speed;
+       int link_speed;
+
+       /* verify VF is active */
+       if (vf >= adapter->num_vfs)
+               return -EINVAL;
 
-       actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
-       if ((vf >= adapter->num_vfs) || (!adapter->link_up) ||
-           (tx_rate > actual_link_speed) || (actual_link_speed != 10000) ||
-           ((tx_rate != 0) && (tx_rate <= 10)))
-           /* rate limit cannot be set to 10Mb or less in 10Gb adapters */
+       /* verify link is up */
+       if (!adapter->link_up)
                return -EINVAL;
 
-       adapter->vf_rate_link_speed = actual_link_speed;
-       adapter->vfinfo[vf].tx_rate = (u16)tx_rate;
-       ixgbe_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
+       /* verify we are linked at 10Gbps */
+       link_speed = ixgbe_link_mbps(adapter);
+       if (link_speed != 10000)
+               return -EINVAL;
+
+       /* rate limit cannot be less than 10Mbs or greater than link speed */
+       if (tx_rate && ((tx_rate <= 10) || (tx_rate > link_speed)))
+               return -EINVAL;
+
+       /* store values */
+       adapter->vf_rate_link_speed = link_speed;
+       adapter->vfinfo[vf].tx_rate = tx_rate;
+
+       /* update hardware configuration */
+       ixgbe_set_vf_rate_limit(adapter, vf);
 
        return 0;
 }
index 418af827b23080bba6596630c271a17c0f008ed1..da17ccf5c09db9b8647a724c55d9c98eae4fa94f 100644 (file)
@@ -272,5 +272,6 @@ struct ixgbe_adv_tx_context_desc {
 /* Error Codes */
 #define IXGBE_ERR_INVALID_MAC_ADDR              -1
 #define IXGBE_ERR_RESET_FAILED                  -2
+#define IXGBE_ERR_INVALID_ARGUMENT              -3
 
 #endif /* _IXGBEVF_DEFINES_H_ */
index 98cadb0c4dab68a46dc3e13f87a3bcc596e1fe69..383b4e1cd17532682c9dfc087d7d9a08a969b81a 100644 (file)
@@ -101,7 +101,9 @@ struct ixgbevf_ring {
 
 /* Supported Rx Buffer Sizes */
 #define IXGBEVF_RXBUFFER_256   256    /* Used for packet split */
-#define IXGBEVF_RXBUFFER_2048  2048
+#define IXGBEVF_RXBUFFER_3K    3072
+#define IXGBEVF_RXBUFFER_7K    7168
+#define IXGBEVF_RXBUFFER_15K   15360
 #define IXGBEVF_MAX_RXBUFFER   16384  /* largest size for single descriptor */
 
 #define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
@@ -259,6 +261,11 @@ enum ixbgevf_state_t {
        __IXGBEVF_DOWN
 };
 
+struct ixgbevf_cb {
+       struct sk_buff *prev;
+};
+#define IXGBE_CB(skb) ((struct ixgbevf_cb *)(skb)->cb)
+
 enum ixgbevf_boards {
        board_82599_vf,
        board_X540_vf,
index 6647383c4ddc85f9639eb010e0f3c5f470f4c3b9..0ee9bd4819f444b392d087403da514db125af0bd 100644 (file)
@@ -263,6 +263,8 @@ cont_loop:
        tx_ring->total_bytes += total_bytes;
        tx_ring->total_packets += total_packets;
        u64_stats_update_end(&tx_ring->syncp);
+       q_vector->tx.total_bytes += total_bytes;
+       q_vector->tx.total_packets += total_packets;
 
        return count < tx_ring->count;
 }
@@ -272,12 +274,10 @@ cont_loop:
  * @q_vector: structure containing interrupt and ring information
  * @skb: packet to send up
  * @status: hardware indication of status of receive
- * @rx_ring: rx descriptor ring (for a specific queue) to setup
  * @rx_desc: rx descriptor
  **/
 static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
                                struct sk_buff *skb, u8 status,
-                               struct ixgbevf_ring *ring,
                                union ixgbe_adv_rx_desc *rx_desc)
 {
        struct ixgbevf_adapter *adapter = q_vector->adapter;
@@ -433,11 +433,21 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
 
                if (!(staterr & IXGBE_RXD_STAT_EOP)) {
                        skb->next = next_buffer->skb;
-                       skb->next->prev = skb;
+                       IXGBE_CB(skb->next)->prev = skb;
                        adapter->non_eop_descs++;
                        goto next_desc;
                }
 
+               /* we should not be chaining buffers, if we did drop the skb */
+               if (IXGBE_CB(skb)->prev) {
+                       do {
+                               struct sk_buff *this = skb;
+                               skb = IXGBE_CB(skb)->prev;
+                               dev_kfree_skb(this);
+                       } while (skb);
+                       goto next_desc;
+               }
+
                /* ERR_MASK will only have valid bits if EOP set */
                if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
                        dev_kfree_skb_irq(skb);
@@ -461,7 +471,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
                }
                skb->protocol = eth_type_trans(skb, rx_ring->netdev);
 
-               ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
+               ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
 
 next_desc:
                rx_desc->wb.upper.status_error = 0;
@@ -490,6 +500,8 @@ next_desc:
        rx_ring->total_packets += total_rx_packets;
        rx_ring->total_bytes += total_rx_bytes;
        u64_stats_update_end(&rx_ring->syncp);
+       q_vector->rx.total_packets += total_rx_packets;
+       q_vector->rx.total_bytes += total_rx_bytes;
 
        return !!budget;
 }
@@ -716,40 +728,15 @@ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
        }
 }
 
-static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
+static irqreturn_t ixgbevf_msix_other(int irq, void *data)
 {
        struct ixgbevf_adapter *adapter = data;
        struct ixgbe_hw *hw = &adapter->hw;
-       u32 msg;
-       bool got_ack = false;
-
-       if (!hw->mbx.ops.check_for_ack(hw))
-               got_ack = true;
 
-       if (!hw->mbx.ops.check_for_msg(hw)) {
-               hw->mbx.ops.read(hw, &msg, 1);
+       hw->mac.get_link_status = 1;
 
-               if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
-                       mod_timer(&adapter->watchdog_timer,
-                                 round_jiffies(jiffies + 1));
-
-               if (msg & IXGBE_VT_MSGTYPE_NACK)
-                       pr_warn("Last Request of type %2.2x to PF Nacked\n",
-                               msg & 0xFF);
-               /*
-                * Restore the PFSTS bit in case someone is polling for a
-                * return message from the PF
-                */
-               hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS;
-       }
-
-       /*
-        * checking for the ack clears the PFACK bit.  Place
-        * it back in the v2p_mailbox cache so that anyone
-        * polling for an ack will not miss it
-        */
-       if (got_ack)
-               hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
+       if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+               mod_timer(&adapter->watchdog_timer, jiffies);
 
        IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
 
@@ -899,10 +886,10 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
        }
 
        err = request_irq(adapter->msix_entries[vector].vector,
-                         &ixgbevf_msix_mbx, 0, netdev->name, adapter);
+                         &ixgbevf_msix_other, 0, netdev->name, adapter);
        if (err) {
                hw_dbg(&adapter->hw,
-                      "request_irq for msix_mbx failed: %d\n", err);
+                      "request_irq for msix_other failed: %d\n", err);
                goto free_queue_irqs;
        }
 
@@ -1057,15 +1044,46 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
 
        srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
 
-       if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
-               srrctl |= IXGBEVF_RXBUFFER_2048 >>
-                       IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-       else
-               srrctl |= rx_ring->rx_buf_len >>
-                       IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+       srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
+                 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+
        IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
 }
 
+static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct net_device *netdev = adapter->netdev;
+       int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+       int i;
+       u16 rx_buf_len;
+
+       /* notify the PF of our intent to use this size of frame */
+       ixgbevf_rlpml_set_vf(hw, max_frame);
+
+       /* PF will allow an extra 4 bytes past for vlan tagged frames */
+       max_frame += VLAN_HLEN;
+
+       /*
+        * Make best use of allocation by using all but 1K of a
+        * power of 2 allocation that will be used for skb->head.
+        */
+       if ((hw->mac.type == ixgbe_mac_X540_vf) &&
+           (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
+               rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+       else if (max_frame <= IXGBEVF_RXBUFFER_3K)
+               rx_buf_len = IXGBEVF_RXBUFFER_3K;
+       else if (max_frame <= IXGBEVF_RXBUFFER_7K)
+               rx_buf_len = IXGBEVF_RXBUFFER_7K;
+       else if (max_frame <= IXGBEVF_RXBUFFER_15K)
+               rx_buf_len = IXGBEVF_RXBUFFER_15K;
+       else
+               rx_buf_len = IXGBEVF_MAX_RXBUFFER;
+
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               adapter->rx_ring[i].rx_buf_len = rx_buf_len;
+}
+
 /**
  * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
  * @adapter: board private structure
@@ -1076,18 +1094,14 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
 {
        u64 rdba;
        struct ixgbe_hw *hw = &adapter->hw;
-       struct net_device *netdev = adapter->netdev;
-       int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
        int i, j;
        u32 rdlen;
-       int rx_buf_len;
 
        /* PSRTYPE must be initialized in 82599 */
        IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
-       if (netdev->mtu <= ETH_DATA_LEN)
-               rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
-       else
-               rx_buf_len = ALIGN(max_frame, 1024);
+
+       /* set_rx_buffer_len must be called before ring initialization */
+       ixgbevf_set_rx_buffer_len(adapter);
 
        rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
        /* Setup the HW Rx Head and Tail Descriptor Pointers and
@@ -1103,7 +1117,6 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
                IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
                adapter->rx_ring[i].head = IXGBE_VFRDH(j);
                adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
-               adapter->rx_ring[i].rx_buf_len = rx_buf_len;
 
                ixgbevf_configure_srrctl(adapter, j);
        }
@@ -1113,36 +1126,47 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
+       int err;
+
+       if (!hw->mac.ops.set_vfta)
+               return -EOPNOTSUPP;
 
        spin_lock(&adapter->mbx_lock);
 
        /* add VID to filter table */
-       if (hw->mac.ops.set_vfta)
-               hw->mac.ops.set_vfta(hw, vid, 0, true);
+       err = hw->mac.ops.set_vfta(hw, vid, 0, true);
 
        spin_unlock(&adapter->mbx_lock);
 
+       /* translate error return types so error makes sense */
+       if (err == IXGBE_ERR_MBX)
+               return -EIO;
+
+       if (err == IXGBE_ERR_INVALID_ARGUMENT)
+               return -EACCES;
+
        set_bit(vid, adapter->active_vlans);
 
-       return 0;
+       return err;
 }
 
 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
+       int err = -EOPNOTSUPP;
 
        spin_lock(&adapter->mbx_lock);
 
        /* remove VID from filter table */
        if (hw->mac.ops.set_vfta)
-               hw->mac.ops.set_vfta(hw, vid, 0, false);
+               err = hw->mac.ops.set_vfta(hw, vid, 0, false);
 
        spin_unlock(&adapter->mbx_lock);
 
        clear_bit(vid, adapter->active_vlans);
 
-       return 0;
+       return err;
 }
 
 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
@@ -1308,6 +1332,25 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
        adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
 }
 
+static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       int api[] = { ixgbe_mbox_api_10,
+                     ixgbe_mbox_api_unknown };
+       int err = 0, idx = 0;
+
+       spin_lock(&adapter->mbx_lock);
+
+       while (api[idx] != ixgbe_mbox_api_unknown) {
+               err = ixgbevf_negotiate_api_version(hw, api[idx]);
+               if (!err)
+                       break;
+               idx++;
+       }
+
+       spin_unlock(&adapter->mbx_lock);
+}
+
 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
@@ -1315,7 +1358,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
        int i, j = 0;
        int num_rx_rings = adapter->num_rx_queues;
        u32 txdctl, rxdctl;
-       u32 msg[2];
 
        for (i = 0; i < adapter->num_tx_queues; i++) {
                j = adapter->tx_ring[i].reg_idx;
@@ -1356,10 +1398,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
                        hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
        }
 
-       msg[0] = IXGBE_VF_SET_LPE;
-       msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
-       hw->mbx.ops.write_posted(hw, msg, 2);
-
        spin_unlock(&adapter->mbx_lock);
 
        clear_bit(__IXGBEVF_DOWN, &adapter->state);
@@ -1371,6 +1409,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
        ixgbevf_save_reset_stats(adapter);
        ixgbevf_init_last_counter_stats(adapter);
 
+       hw->mac.get_link_status = 1;
        mod_timer(&adapter->watchdog_timer, jiffies);
 }
 
@@ -1378,6 +1417,8 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
 
+       ixgbevf_negotiate_api(adapter);
+
        ixgbevf_configure(adapter);
 
        ixgbevf_up_complete(adapter);
@@ -1419,7 +1460,7 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
                        rx_buffer_info->skb = NULL;
                        do {
                                struct sk_buff *this = skb;
-                               skb = skb->prev;
+                               skb = IXGBE_CB(skb)->prev;
                                dev_kfree_skb(this);
                        } while (skb);
                }
@@ -1547,8 +1588,6 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
 
 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
 {
-       struct ixgbe_hw *hw = &adapter->hw;
-
        WARN_ON(in_interrupt());
 
        while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
@@ -1561,10 +1600,8 @@ void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
         * watchdog task will continue to schedule reset tasks until
         * the PF is up and running.
         */
-       if (!hw->mac.ops.reset_hw(hw)) {
-               ixgbevf_down(adapter);
-               ixgbevf_up(adapter);
-       }
+       ixgbevf_down(adapter);
+       ixgbevf_up(adapter);
 
        clear_bit(__IXGBEVF_RESETTING, &adapter->state);
 }
@@ -1866,6 +1903,22 @@ err_set_interrupt:
        return err;
 }
 
+/**
+ * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
+ * @adapter: board private structure to clear interrupt scheme on
+ *
+ * We go through and clear interrupt specific resources and reset the structure
+ * to pre-load conditions
+ **/
+static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
+{
+       adapter->num_tx_queues = 0;
+       adapter->num_rx_queues = 0;
+
+       ixgbevf_free_q_vectors(adapter);
+       ixgbevf_reset_interrupt_capability(adapter);
+}
+
 /**
  * ixgbevf_sw_init - Initialize general software structures
  * (struct ixgbevf_adapter)
@@ -2351,6 +2404,8 @@ static int ixgbevf_open(struct net_device *netdev)
                }
        }
 
+       ixgbevf_negotiate_api(adapter);
+
        /* allocate transmit descriptors */
        err = ixgbevf_setup_all_tx_resources(adapter);
        if (err)
@@ -2860,10 +2915,8 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
-       struct ixgbe_hw *hw = &adapter->hw;
        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
        int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
-       u32 msg[2];
 
        if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
                max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
@@ -2877,35 +2930,91 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
        /* must set new MTU before calling down or up */
        netdev->mtu = new_mtu;
 
-       if (!netif_running(netdev)) {
-               msg[0] = IXGBE_VF_SET_LPE;
-               msg[1] = max_frame;
-               hw->mbx.ops.write_posted(hw, msg, 2);
-       }
-
        if (netif_running(netdev))
                ixgbevf_reinit_locked(adapter);
 
        return 0;
 }
 
-static void ixgbevf_shutdown(struct pci_dev *pdev)
+static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+#ifdef CONFIG_PM
+       int retval = 0;
+#endif
 
        netif_device_detach(netdev);
 
        if (netif_running(netdev)) {
+               rtnl_lock();
                ixgbevf_down(adapter);
                ixgbevf_free_irq(adapter);
                ixgbevf_free_all_tx_resources(adapter);
                ixgbevf_free_all_rx_resources(adapter);
+               rtnl_unlock();
        }
 
-       pci_save_state(pdev);
+       ixgbevf_clear_interrupt_scheme(adapter);
 
+#ifdef CONFIG_PM
+       retval = pci_save_state(pdev);
+       if (retval)
+               return retval;
+
+#endif
        pci_disable_device(pdev);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int ixgbevf_resume(struct pci_dev *pdev)
+{
+       struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev);
+       struct net_device *netdev = adapter->netdev;
+       u32 err;
+
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+       /*
+        * pci_restore_state clears dev->state_saved so call
+        * pci_save_state to restore it.
+        */
+       pci_save_state(pdev);
+
+       err = pci_enable_device_mem(pdev);
+       if (err) {
+               dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
+               return err;
+       }
+       pci_set_master(pdev);
+
+       rtnl_lock();
+       err = ixgbevf_init_interrupt_scheme(adapter);
+       rtnl_unlock();
+       if (err) {
+               dev_err(&pdev->dev, "Cannot initialize interrupts\n");
+               return err;
+       }
+
+       ixgbevf_reset(adapter);
+
+       if (netif_running(netdev)) {
+               err = ixgbevf_open(netdev);
+               if (err)
+                       return err;
+       }
+
+       netif_device_attach(netdev);
+
+       return err;
+}
+
+#endif /* CONFIG_PM */
+static void ixgbevf_shutdown(struct pci_dev *pdev)
+{
+       ixgbevf_suspend(pdev, PMSG_SUSPEND);
 }
 
 static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
@@ -2946,7 +3055,7 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
        return stats;
 }
 
-static const struct net_device_ops ixgbe_netdev_ops = {
+static const struct net_device_ops ixgbevf_netdev_ops = {
        .ndo_open               = ixgbevf_open,
        .ndo_stop               = ixgbevf_close,
        .ndo_start_xmit         = ixgbevf_xmit_frame,
@@ -2962,7 +3071,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
 
 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
 {
-       dev->netdev_ops = &ixgbe_netdev_ops;
+       dev->netdev_ops = &ixgbevf_netdev_ops;
        ixgbevf_set_ethtool_ops(dev);
        dev->watchdog_timeo = 5 * HZ;
 }
@@ -3131,6 +3240,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
        return 0;
 
 err_register:
+       ixgbevf_clear_interrupt_scheme(adapter);
 err_sw_init:
        ixgbevf_reset_interrupt_capability(adapter);
        iounmap(hw->hw_addr);
@@ -3168,6 +3278,7 @@ static void __devexit ixgbevf_remove(struct pci_dev *pdev)
        if (netdev->reg_state == NETREG_REGISTERED)
                unregister_netdev(netdev);
 
+       ixgbevf_clear_interrupt_scheme(adapter);
        ixgbevf_reset_interrupt_capability(adapter);
 
        iounmap(adapter->hw.hw_addr);
@@ -3267,6 +3378,11 @@ static struct pci_driver ixgbevf_driver = {
        .id_table = ixgbevf_pci_tbl,
        .probe    = ixgbevf_probe,
        .remove   = __devexit_p(ixgbevf_remove),
+#ifdef CONFIG_PM
+       /* Power Management Hooks */
+       .suspend  = ixgbevf_suspend,
+       .resume   = ixgbevf_resume,
+#endif
        .shutdown = ixgbevf_shutdown,
        .err_handler = &ixgbevf_err_handler
 };
index 9c955900fe649deb1b7287443f2f2a5d9f21728a..d5028ddf4b318c5721d9f5b46ead7a76bb3b81b4 100644 (file)
@@ -86,14 +86,17 @@ static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
 static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
 {
        struct ixgbe_mbx_info *mbx = &hw->mbx;
-       s32 ret_val = IXGBE_ERR_MBX;
+       s32 ret_val = -IXGBE_ERR_MBX;
+
+       if (!mbx->ops.read)
+               goto out;
 
        ret_val = ixgbevf_poll_for_msg(hw);
 
        /* if ack received read message, otherwise we timed out */
        if (!ret_val)
                ret_val = mbx->ops.read(hw, msg, size);
-
+out:
        return ret_val;
 }
 
@@ -109,7 +112,11 @@ static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
 static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
 {
        struct ixgbe_mbx_info *mbx = &hw->mbx;
-       s32 ret_val;
+       s32 ret_val = -IXGBE_ERR_MBX;
+
+       /* exit if either we can't write or there isn't a defined timeout */
+       if (!mbx->ops.write || !mbx->timeout)
+               goto out;
 
        /* send msg */
        ret_val = mbx->ops.write(hw, msg, size);
@@ -117,7 +124,7 @@ static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
        /* if msg sent wait until we receive an ack */
        if (!ret_val)
                ret_val = ixgbevf_poll_for_ack(hw);
-
+out:
        return ret_val;
 }
 
index cf9131c5c1150aa6746e5201f280ec5a87c9bf91..946ce86f337f702701f10133faa8c71b099d81c3 100644 (file)
 /* bits 23:16 are used for exra info for certain messages */
 #define IXGBE_VT_MSGINFO_MASK     (0xFF << IXGBE_VT_MSGINFO_SHIFT)
 
+/* definitions to support mailbox API version negotiation */
+
+/*
+ * each element denotes a version of the API; existing numbers may not
+ * change; any additions must go at the end
+ */
+enum ixgbe_pfvf_api_rev {
+       ixgbe_mbox_api_10,      /* API version 1.0, linux/freebsd VF driver */
+       ixgbe_mbox_api_20,      /* API version 2.0, solaris Phase1 VF driver */
+       /* This value should always be last */
+       ixgbe_mbox_api_unknown, /* indicates that API version is not known */
+};
+
+/* mailbox API, legacy requests */
 #define IXGBE_VF_RESET            0x01 /* VF requests reset */
 #define IXGBE_VF_SET_MAC_ADDR     0x02 /* VF requests PF to set MAC addr */
 #define IXGBE_VF_SET_MULTICAST    0x03 /* VF requests PF to set MC addr */
 #define IXGBE_VF_SET_VLAN         0x04 /* VF requests PF to set VLAN */
-#define IXGBE_VF_SET_LPE          0x05 /* VF requests PF to set VMOLR.LPE */
-#define IXGBE_VF_SET_MACVLAN      0x06 /* VF requests PF for unicast filter */
+
+/* mailbox API, version 1.0 VF requests */
+#define IXGBE_VF_SET_LPE       0x05 /* VF requests PF to set VMOLR.LPE */
+#define IXGBE_VF_SET_MACVLAN   0x06 /* VF requests PF for unicast filter */
+#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
 
 /* length of permanent address message returned from PF */
 #define IXGBE_VF_PERMADDR_MSG_LEN 4
index ec89b86f7ca4a66603c7128b3353d762928faf9c..0c7447e6fcc84a3a42044b11d0f85fbe3f2d0904 100644 (file)
@@ -79,6 +79,9 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
        /* Call adapter stop to disable tx/rx and clear interrupts */
        hw->mac.ops.stop_adapter(hw);
 
+       /* reset the api version */
+       hw->api_version = ixgbe_mbox_api_10;
+
        IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
        IXGBE_WRITE_FLUSH(hw);
 
@@ -97,7 +100,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
        msgbuf[0] = IXGBE_VF_RESET;
        mbx->ops.write_posted(hw, msgbuf, 1);
 
-       msleep(10);
+       mdelay(10);
 
        /* set our "perm_addr" based on info provided by PF */
        /* also set up the mc_filter_type which is piggy backed
@@ -346,16 +349,32 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
 static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
                               bool vlan_on)
 {
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
        u32 msgbuf[2];
+       s32 err;
 
        msgbuf[0] = IXGBE_VF_SET_VLAN;
        msgbuf[1] = vlan;
        /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
        msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
 
-       ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
+       err = mbx->ops.write_posted(hw, msgbuf, 2);
+       if (err)
+               goto mbx_err;
 
-       return 0;
+       err = mbx->ops.read_posted(hw, msgbuf, 2);
+       if (err)
+               goto mbx_err;
+
+       /* remove extra bits from the message */
+       msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+       msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
+
+       if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK))
+               err = IXGBE_ERR_INVALID_ARGUMENT;
+
+mbx_err:
+       return err;
 }
 
 /**
@@ -389,20 +408,23 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
                                     bool *link_up,
                                     bool autoneg_wait_to_complete)
 {
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+       struct ixgbe_mac_info *mac = &hw->mac;
+       s32 ret_val = 0;
        u32 links_reg;
+       u32 in_msg = 0;
 
-       if (!(hw->mbx.ops.check_for_rst(hw))) {
-               *link_up = false;
-               *speed = 0;
-               return -1;
-       }
+       /* If we were hit with a reset drop the link */
+       if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
+               mac->get_link_status = true;
 
-       links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+       if (!mac->get_link_status)
+               goto out;
 
-       if (links_reg & IXGBE_LINKS_UP)
-               *link_up = true;
-       else
-               *link_up = false;
+       /* if link status is down no point in checking to see if pf is up */
+       links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+       if (!(links_reg & IXGBE_LINKS_UP))
+               goto out;
 
        switch (links_reg & IXGBE_LINKS_SPEED_82599) {
        case IXGBE_LINKS_SPEED_10G_82599:
@@ -416,7 +438,79 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
                break;
        }
 
-       return 0;
+       /* if the read failed it could just be a mailbox collision, best wait
+        * until we are called again and don't report an error */
+       if (mbx->ops.read(hw, &in_msg, 1))
+               goto out;
+
+       if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
+               /* msg is not CTS and is NACK we must have lost CTS status */
+               if (in_msg & IXGBE_VT_MSGTYPE_NACK)
+                       ret_val = -1;
+               goto out;
+       }
+
+       /* the pf is talking, if we timed out in the past we reinit */
+       if (!mbx->timeout) {
+               ret_val = -1;
+               goto out;
+       }
+
+       /* if we passed all the tests above then the link is up and we no
+        * longer need to check for link */
+       mac->get_link_status = false;
+
+out:
+       *link_up = !mac->get_link_status;
+       return ret_val;
+}
+
+/**
+ *  ixgbevf_rlpml_set_vf - Set the maximum receive packet length
+ *  @hw: pointer to the HW structure
+ *  @max_size: value to assign to max frame size
+ **/
+void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
+{
+       u32 msgbuf[2];
+
+       msgbuf[0] = IXGBE_VF_SET_LPE;
+       msgbuf[1] = max_size;
+       ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
+}
+
+/**
+ *  ixgbevf_negotiate_api_version - Negotiate supported API version
+ *  @hw: pointer to the HW structure
+ *  @api: integer containing requested API version
+ **/
+int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
+{
+       int err;
+       u32 msg[3];
+
+       /* Negotiate the mailbox API version */
+       msg[0] = IXGBE_VF_API_NEGOTIATE;
+       msg[1] = api;
+       msg[2] = 0;
+       err = hw->mbx.ops.write_posted(hw, msg, 3);
+
+       if (!err)
+               err = hw->mbx.ops.read_posted(hw, msg, 3);
+
+       if (!err) {
+               msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+               /* Store value and return 0 on success */
+               if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
+                       hw->api_version = api;
+                       return 0;
+               }
+
+               err = IXGBE_ERR_INVALID_ARGUMENT;
+       }
+
+       return err;
 }
 
 static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
index 25c951daee5d3042c85e54b9944d0fda3fb855a0..47f11a584d8c04f9f4deaeecc643729362ea6917 100644 (file)
@@ -137,6 +137,8 @@ struct ixgbe_hw {
 
        u8  revision_id;
        bool adapter_stopped;
+
+       int api_version;
 };
 
 struct ixgbevf_hw_stats {
@@ -170,5 +172,7 @@ struct ixgbevf_info {
        const struct ixgbe_mac_operations *mac_ops;
 };
 
+void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
+int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
 #endif /* __IXGBE_VF_H__ */
 
index 10bba09c44ea508d047123aeafb5e001b6bbd1be..c10e3a6de09f042a02be933d8aa1ca928207fe6d 100644 (file)
@@ -712,10 +712,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
        if (bounce)
                tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
 
-       /* Run destructor before passing skb to HW */
-       if (likely(!skb_shared(skb)))
-               skb_orphan(skb);
-
        if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) {
                *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
                op_own |= htonl((bf_index & 0xffff) << 8);
diff --git a/drivers/net/ethernet/mipsnet.c b/drivers/net/ethernet/mipsnet.c
deleted file mode 100644 (file)
index db5285b..0000000
+++ /dev/null
@@ -1,345 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/platform_device.h>
-#include <asm/mips-boards/simint.h>
-
-#define MIPSNET_VERSION "2007-11-17"
-
-/*
- * Net status/control block as seen by sw in the core.
- */
-struct mipsnet_regs {
-       /*
-        * Device info for probing, reads as MIPSNET%d where %d is some
-        * form of version.
-        */
-       u64 devId;              /*0x00 */
-
-       /*
-        * read only busy flag.
-        * Set and cleared by the Net Device to indicate that an rx or a tx
-        * is in progress.
-        */
-       u32 busy;               /*0x08 */
-
-       /*
-        * Set by the Net Device.
-        * The device will set it once data has been received.
-        * The value is the number of bytes that should be read from
-        * rxDataBuffer.  The value will decrease till 0 until all the data
-        * from rxDataBuffer has been read.
-        */
-       u32 rxDataCount;        /*0x0c */
-#define MIPSNET_MAX_RXTX_DATACOUNT (1 << 16)
-
-       /*
-        * Settable from the MIPS core, cleared by the Net Device.
-        * The core should set the number of bytes it wants to send,
-        * then it should write those bytes of data to txDataBuffer.
-        * The device will clear txDataCount has been processed (not
-        * necessarily sent).
-        */
-       u32 txDataCount;        /*0x10 */
-
-       /*
-        * Interrupt control
-        *
-        * Used to clear the interrupted generated by this dev.
-        * Write a 1 to clear the interrupt. (except bit31).
-        *
-        * Bit0 is set if it was a tx-done interrupt.
-        * Bit1 is set when new rx-data is available.
-        *    Until this bit is cleared there will be no other RXs.
-        *
-        * Bit31 is used for testing, it clears after a read.
-        *    Writing 1 to this bit will cause an interrupt to be generated.
-        *    To clear the test interrupt, write 0 to this register.
-        */
-       u32 interruptControl;   /*0x14 */
-#define MIPSNET_INTCTL_TXDONE     (1u << 0)
-#define MIPSNET_INTCTL_RXDONE     (1u << 1)
-#define MIPSNET_INTCTL_TESTBIT    (1u << 31)
-
-       /*
-        * Readonly core-specific interrupt info for the device to signal
-        * the core. The meaning of the contents of this field might change.
-        */
-       /* XXX: the whole memIntf interrupt scheme is messy: the device
-        * should have no control what so ever of what VPE/register set is
-        * being used.
-        * The MemIntf should only expose interrupt lines, and something in
-        * the config should be responsible for the line<->core/vpe bindings.
-        */
-       u32 interruptInfo;      /*0x18 */
-
-       /*
-        * This is where the received data is read out.
-        * There is more data to read until rxDataReady is 0.
-        * Only 1 byte at this regs offset is used.
-        */
-       u32 rxDataBuffer;       /*0x1c */
-
-       /*
-        * This is where the data to transmit is written.
-        * Data should be written for the amount specified in the
-        * txDataCount register.
-        * Only 1 byte at this regs offset is used.
-        */
-       u32 txDataBuffer;       /*0x20 */
-};
-
-#define regaddr(dev, field) \
-  (dev->base_addr + offsetof(struct mipsnet_regs, field))
-
-static char mipsnet_string[] = "mipsnet";
-
-/*
- * Copy data from the MIPSNET rx data port
- */
-static int ioiocpy_frommipsnet(struct net_device *dev, unsigned char *kdata,
-                       int len)
-{
-       for (; len > 0; len--, kdata++)
-               *kdata = inb(regaddr(dev, rxDataBuffer));
-
-       return inl(regaddr(dev, rxDataCount));
-}
-
-static inline void mipsnet_put_todevice(struct net_device *dev,
-       struct sk_buff *skb)
-{
-       int count_to_go = skb->len;
-       char *buf_ptr = skb->data;
-
-       outl(skb->len, regaddr(dev, txDataCount));
-
-       for (; count_to_go; buf_ptr++, count_to_go--)
-               outb(*buf_ptr, regaddr(dev, txDataBuffer));
-
-       dev->stats.tx_packets++;
-       dev->stats.tx_bytes += skb->len;
-
-       dev_kfree_skb(skb);
-}
-
-static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-       /*
-        * Only one packet at a time. Once TXDONE interrupt is serviced, the
-        * queue will be restarted.
-        */
-       netif_stop_queue(dev);
-       mipsnet_put_todevice(dev, skb);
-
-       return NETDEV_TX_OK;
-}
-
-static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t len)
-{
-       struct sk_buff *skb;
-
-       if (!len)
-               return len;
-
-       skb = netdev_alloc_skb(dev, len + NET_IP_ALIGN);
-       if (!skb) {
-               dev->stats.rx_dropped++;
-               return -ENOMEM;
-       }
-
-       skb_reserve(skb, NET_IP_ALIGN);
-       if (ioiocpy_frommipsnet(dev, skb_put(skb, len), len))
-               return -EFAULT;
-
-       skb->protocol = eth_type_trans(skb, dev);
-       skb->ip_summed = CHECKSUM_UNNECESSARY;
-
-       netif_rx(skb);
-
-       dev->stats.rx_packets++;
-       dev->stats.rx_bytes += len;
-
-       return len;
-}
-
-static irqreturn_t mipsnet_interrupt(int irq, void *dev_id)
-{
-       struct net_device *dev = dev_id;
-       u32 int_flags;
-       irqreturn_t ret = IRQ_NONE;
-
-       if (irq != dev->irq)
-               goto out_badirq;
-
-       /* TESTBIT is cleared on read. */
-       int_flags = inl(regaddr(dev, interruptControl));
-       if (int_flags & MIPSNET_INTCTL_TESTBIT) {
-               /* TESTBIT takes effect after a write with 0. */
-               outl(0, regaddr(dev, interruptControl));
-               ret = IRQ_HANDLED;
-       } else if (int_flags & MIPSNET_INTCTL_TXDONE) {
-               /* Only one packet at a time, we are done. */
-               dev->stats.tx_packets++;
-               netif_wake_queue(dev);
-               outl(MIPSNET_INTCTL_TXDONE,
-                    regaddr(dev, interruptControl));
-               ret = IRQ_HANDLED;
-       } else if (int_flags & MIPSNET_INTCTL_RXDONE) {
-               mipsnet_get_fromdev(dev, inl(regaddr(dev, rxDataCount)));
-               outl(MIPSNET_INTCTL_RXDONE, regaddr(dev, interruptControl));
-               ret = IRQ_HANDLED;
-       }
-       return ret;
-
-out_badirq:
-       printk(KERN_INFO "%s: %s(): irq %d for unknown device\n",
-              dev->name, __func__, irq);
-       return ret;
-}
-
-static int mipsnet_open(struct net_device *dev)
-{
-       int err;
-
-       err = request_irq(dev->irq, mipsnet_interrupt,
-                         IRQF_SHARED, dev->name, (void *) dev);
-       if (err) {
-               release_region(dev->base_addr, sizeof(struct mipsnet_regs));
-               return err;
-       }
-
-       netif_start_queue(dev);
-
-       /* test interrupt handler */
-       outl(MIPSNET_INTCTL_TESTBIT, regaddr(dev, interruptControl));
-
-       return 0;
-}
-
-static int mipsnet_close(struct net_device *dev)
-{
-       netif_stop_queue(dev);
-       free_irq(dev->irq, dev);
-       return 0;
-}
-
-static void mipsnet_set_mclist(struct net_device *dev)
-{
-}
-
-static const struct net_device_ops mipsnet_netdev_ops = {
-       .ndo_open               = mipsnet_open,
-       .ndo_stop               = mipsnet_close,
-       .ndo_start_xmit         = mipsnet_xmit,
-       .ndo_set_rx_mode        = mipsnet_set_mclist,
-       .ndo_change_mtu         = eth_change_mtu,
-       .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_mac_address    = eth_mac_addr,
-};
-
-static int __devinit mipsnet_probe(struct platform_device *dev)
-{
-       struct net_device *netdev;
-       int err;
-
-       netdev = alloc_etherdev(0);
-       if (!netdev) {
-               err = -ENOMEM;
-               goto out;
-       }
-
-       platform_set_drvdata(dev, netdev);
-
-       netdev->netdev_ops = &mipsnet_netdev_ops;
-
-       /*
-        * TODO: probe for these or load them from PARAM
-        */
-       netdev->base_addr = 0x4200;
-       netdev->irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB0 +
-                     inl(regaddr(netdev, interruptInfo));
-
-       /* Get the io region now, get irq on open() */
-       if (!request_region(netdev->base_addr, sizeof(struct mipsnet_regs),
-                           "mipsnet")) {
-               err = -EBUSY;
-               goto out_free_netdev;
-       }
-
-       /*
-        * Lacking any better mechanism to allocate a MAC address we use a
-        * random one ...
-        */
-       eth_hw_addr_random(netdev);
-
-       err = register_netdev(netdev);
-       if (err) {
-               printk(KERN_ERR "MIPSNet: failed to register netdev.\n");
-               goto out_free_region;
-       }
-
-       return 0;
-
-out_free_region:
-       release_region(netdev->base_addr, sizeof(struct mipsnet_regs));
-
-out_free_netdev:
-       free_netdev(netdev);
-
-out:
-       return err;
-}
-
-static int __devexit mipsnet_device_remove(struct platform_device *device)
-{
-       struct net_device *dev = platform_get_drvdata(device);
-
-       unregister_netdev(dev);
-       release_region(dev->base_addr, sizeof(struct mipsnet_regs));
-       free_netdev(dev);
-       platform_set_drvdata(device, NULL);
-
-       return 0;
-}
-
-static struct platform_driver mipsnet_driver = {
-       .driver = {
-               .name           = mipsnet_string,
-               .owner          = THIS_MODULE,
-       },
-       .probe          = mipsnet_probe,
-       .remove         = __devexit_p(mipsnet_device_remove),
-};
-
-static int __init mipsnet_init_module(void)
-{
-       int err;
-
-       printk(KERN_INFO "MIPSNet Ethernet driver. Version: %s. "
-              "(c)2005 MIPS Technologies, Inc.\n", MIPSNET_VERSION);
-
-       err = platform_driver_register(&mipsnet_driver);
-       if (err)
-               printk(KERN_ERR "Driver registration failed\n");
-
-       return err;
-}
-
-static void __exit mipsnet_exit_module(void)
-{
-       platform_driver_unregister(&mipsnet_driver);
-}
-
-module_init(mipsnet_init_module);
-module_exit(mipsnet_exit_module);
index f45def01a98e46333873ee05366ad206f54b4fc9..876beceaf2d7154f07d9de46bbb75fba3b1f0c43 100644 (file)
@@ -3409,7 +3409,7 @@ set_speed:
 
        pause_flags = 0;
        /* setup pause frame */
-       if (np->duplex != 0) {
+       if (netif_running(dev) && (np->duplex != 0)) {
                if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
                        adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
                        lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
@@ -4435,7 +4435,7 @@ static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void
 
        regs->version = FORCEDETH_REGS_VER;
        spin_lock_irq(&np->lock);
-       for (i = 0; i <= np->register_size/sizeof(u32); i++)
+       for (i = 0; i < np->register_size/sizeof(u32); i++)
                rbuf[i] = readl(base + i*sizeof(u32));
        spin_unlock_irq(&np->lock);
 }
@@ -5455,6 +5455,7 @@ static int nv_close(struct net_device *dev)
 
        netif_stop_queue(dev);
        spin_lock_irq(&np->lock);
+       nv_update_pause(dev, 0); /* otherwise stop_tx bricks NIC */
        nv_stop_rxtx(dev);
        nv_txrx_reset(dev);
 
@@ -5904,11 +5905,19 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
                goto out_error;
        }
 
+       netif_carrier_off(dev);
+
+       /* Some NICs freeze when TX pause is enabled while NIC is
+        * down, and this stays across warm reboots. The sequence
+        * below should be enough to recover from that state.
+        */
+       nv_update_pause(dev, 0);
+       nv_start_tx(dev);
+       nv_stop_tx(dev);
+
        if (id->driver_data & DEV_HAS_VLAN)
                nv_vlan_mode(dev, dev->features);
 
-       netif_carrier_off(dev);
-
        dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
                 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
 
index a7cc56007b330457c53b49ef4c51145d4990221d..e7ff886e8047ac3d3a926e8c0384b3ec7568068b 100644 (file)
@@ -77,7 +77,7 @@
 static const int multicast_filter_limit = 32;
 
 #define MAX_READ_REQUEST_SHIFT 12
-#define TX_DMA_BURST   6       /* Maximum PCI burst, '6' is 1024 */
+#define TX_DMA_BURST   7       /* Maximum PCI burst, '7' is unlimited */
 #define SafeMtu                0x1c20  /* ... actually life sucks beyond ~7k */
 #define InterFrameGap  0x03    /* 3 means InterFrameGap = the shortest one */
 
@@ -287,6 +287,8 @@ static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8167), 0, 0, RTL_CFG_0 },
        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8168), 0, 0, RTL_CFG_1 },
        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8169), 0, 0, RTL_CFG_0 },
+       { PCI_VENDOR_ID_DLINK,                  0x4300,
+               PCI_VENDOR_ID_DLINK, 0x4b10,             0, 0, RTL_CFG_1 },
        { PCI_DEVICE(PCI_VENDOR_ID_DLINK,       0x4300), 0, 0, RTL_CFG_0 },
        { PCI_DEVICE(PCI_VENDOR_ID_DLINK,       0x4302), 0, 0, RTL_CFG_0 },
        { PCI_DEVICE(PCI_VENDOR_ID_AT,          0xc107), 0, 0, RTL_CFG_0 },
index fb3cbc27063cccb5ae821989c516195527e32e65..25906c1d1b1590825502bc192dffa1aa8d8e494e 100644 (file)
@@ -34,3 +34,10 @@ config SFC_SRIOV
          This enables support for the SFC9000 I/O Virtualization
          features, allowing accelerated network performance in
          virtualized environments.
+config SFC_PTP
+       bool "Solarflare SFC9000-family PTP support"
+       depends on SFC && PTP_1588_CLOCK && !(SFC=y && PTP_1588_CLOCK=m)
+       default y
+       ---help---
+         This enables support for the Precision Time Protocol (PTP)
+         on SFC9000-family NICs
index ea1f8db5731811cbe7c7005931b1c4e4e8db5593..e11f2ecf69d9b5dfd0ae5600e33bb0fe1d7382fc 100644 (file)
@@ -5,5 +5,6 @@ sfc-y                   += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \
                           mcdi.o mcdi_phy.o mcdi_mon.o
 sfc-$(CONFIG_SFC_MTD)  += mtd.o
 sfc-$(CONFIG_SFC_SRIOV)        += siena_sriov.o
+sfc-$(CONFIG_SFC_PTP)  += ptp.o
 
 obj-$(CONFIG_SFC)      += sfc.o
index b26a954c27fcba84764e4c4310e5dcb91dd17a61..5400a33f254f0d8727dd882036b033305eba0d9d 100644 (file)
@@ -120,10 +120,10 @@ typedef union efx_oword {
  * [0,high-low), with garbage in bits [high-low+1,...).
  */
 #define EFX_EXTRACT_NATIVE(native_element, min, max, low, high)                \
-       (((low > max) || (high < min)) ? 0 :                            \
-        ((low > min) ?                                                 \
-         ((native_element) >> (low - min)) :                           \
-         ((native_element) << (min - low))))
+       ((low) > (max) || (high) < (min) ? 0 :                          \
+        (low) > (min) ?                                                \
+        (native_element) >> ((low) - (min)) :                          \
+        (native_element) << ((min) - (low)))
 
 /*
  * Extract bit field portion [low,high) from the 64-bit little-endian
@@ -142,27 +142,27 @@ typedef union efx_oword {
 #define EFX_EXTRACT_OWORD64(oword, low, high)                          \
        ((EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) |             \
          EFX_EXTRACT64((oword).u64[1], 64, 127, low, high)) &          \
-        EFX_MASK64(high + 1 - low))
+        EFX_MASK64((high) + 1 - (low)))
 
 #define EFX_EXTRACT_QWORD64(qword, low, high)                          \
        (EFX_EXTRACT64((qword).u64[0], 0, 63, low, high) &              \
-        EFX_MASK64(high + 1 - low))
+        EFX_MASK64((high) + 1 - (low)))
 
 #define EFX_EXTRACT_OWORD32(oword, low, high)                          \
        ((EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) |             \
          EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) |            \
          EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) |            \
          EFX_EXTRACT32((oword).u32[3], 96, 127, low, high)) &          \
-        EFX_MASK32(high + 1 - low))
+        EFX_MASK32((high) + 1 - (low)))
 
 #define EFX_EXTRACT_QWORD32(qword, low, high)                          \
        ((EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) |             \
          EFX_EXTRACT32((qword).u32[1], 32, 63, low, high)) &           \
-        EFX_MASK32(high + 1 - low))
+        EFX_MASK32((high) + 1 - (low)))
 
 #define EFX_EXTRACT_DWORD(dword, low, high)                    \
        (EFX_EXTRACT32((dword).u32[0], 0, 31, low, high) &      \
-        EFX_MASK32(high + 1 - low))
+        EFX_MASK32((high) + 1 - (low)))
 
 #define EFX_OWORD_FIELD64(oword, field)                                \
        EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field),          \
@@ -442,10 +442,10 @@ typedef union efx_oword {
        cpu_to_le32(EFX_INSERT_NATIVE(min, max, low, high, value))
 
 #define EFX_INPLACE_MASK64(min, max, low, high)                                \
-       EFX_INSERT64(min, max, low, high, EFX_MASK64(high + 1 - low))
+       EFX_INSERT64(min, max, low, high, EFX_MASK64((high) + 1 - (low)))
 
 #define EFX_INPLACE_MASK32(min, max, low, high)                                \
-       EFX_INSERT32(min, max, low, high, EFX_MASK32(high + 1 - low))
+       EFX_INSERT32(min, max, low, high, EFX_MASK32((high) + 1 - (low)))
 
 #define EFX_SET_OWORD64(oword, low, high, value) do {                  \
        (oword).u64[0] = (((oword).u64[0]                               \
index 65a8d49106a4c63c6a7faf04c8e3334421ce6472..96bd980e828da5d28b0bed6329cb85e3655d598f 100644 (file)
@@ -202,11 +202,21 @@ static void efx_stop_all(struct efx_nic *efx);
 
 #define EFX_ASSERT_RESET_SERIALISED(efx)               \
        do {                                            \
-               if ((efx->state == STATE_RUNNING) ||    \
+               if ((efx->state == STATE_READY) ||      \
                    (efx->state == STATE_DISABLED))     \
                        ASSERT_RTNL();                  \
        } while (0)
 
+static int efx_check_disabled(struct efx_nic *efx)
+{
+       if (efx->state == STATE_DISABLED) {
+               netif_err(efx, drv, efx->net_dev,
+                         "device is disabled due to earlier errors\n");
+               return -EIO;
+       }
+       return 0;
+}
+
 /**************************************************************************
  *
  * Event queue processing
@@ -630,6 +640,16 @@ static void efx_start_datapath(struct efx_nic *efx)
        efx->rx_buffer_order = get_order(efx->rx_buffer_len +
                                         sizeof(struct efx_rx_page_state));
 
+       /* We must keep at least one descriptor in a TX ring empty.
+        * We could avoid this when the queue size does not exactly
+        * match the hardware ring size, but it's not that important.
+        * Therefore we stop the queue when one more skb might fill
+        * the ring completely.  We wake it when half way back to
+        * empty.
+        */
+       efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
+       efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
+
        /* Initialise the channels */
        efx_for_each_channel(channel, efx) {
                efx_for_each_channel_tx_queue(tx_queue, channel)
@@ -714,6 +734,7 @@ static void efx_remove_channel(struct efx_channel *channel)
        efx_for_each_possible_channel_tx_queue(tx_queue, channel)
                efx_remove_tx_queue(tx_queue);
        efx_remove_eventq(channel);
+       channel->type->post_remove(channel);
 }
 
 static void efx_remove_channels(struct efx_nic *efx)
@@ -730,7 +751,11 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
        struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
        u32 old_rxq_entries, old_txq_entries;
        unsigned i, next_buffer_table = 0;
-       int rc = 0;
+       int rc;
+
+       rc = efx_check_disabled(efx);
+       if (rc)
+               return rc;
 
        /* Not all channels should be reallocated. We must avoid
         * reallocating their buffer table entries.
@@ -828,6 +853,7 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
 
 static const struct efx_channel_type efx_default_channel_type = {
        .pre_probe              = efx_channel_dummy_op_int,
+       .post_remove            = efx_channel_dummy_op_void,
        .get_name               = efx_get_channel_name,
        .copy                   = efx_copy_channel,
        .keep_eventq            = false,
@@ -838,6 +864,10 @@ int efx_channel_dummy_op_int(struct efx_channel *channel)
        return 0;
 }
 
+void efx_channel_dummy_op_void(struct efx_channel *channel)
+{
+}
+
 /**************************************************************************
  *
  * Port handling
@@ -1365,6 +1395,8 @@ static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
 {
        struct efx_channel *channel;
 
+       BUG_ON(efx->state == STATE_DISABLED);
+
        if (efx->legacy_irq)
                efx->legacy_irq_enabled = true;
        efx_nic_enable_interrupts(efx);
@@ -1382,6 +1414,9 @@ static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
 {
        struct efx_channel *channel;
 
+       if (efx->state == STATE_DISABLED)
+               return;
+
        efx_mcdi_mode_poll(efx);
 
        efx_nic_disable_interrupts(efx);
@@ -1422,10 +1457,16 @@ static void efx_set_channels(struct efx_nic *efx)
        efx->tx_channel_offset =
                separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
 
-       /* We need to adjust the TX queue numbers if we have separate
+       /* We need to mark which channels really have RX and TX
+        * queues, and adjust the TX queue numbers if we have separate
         * RX-only and TX-only channels.
         */
        efx_for_each_channel(channel, efx) {
+               if (channel->channel < efx->n_rx_channels)
+                       channel->rx_queue.core_index = channel->channel;
+               else
+                       channel->rx_queue.core_index = -1;
+
                efx_for_each_channel_tx_queue(tx_queue, channel)
                        tx_queue->queue -= (efx->tx_channel_offset *
                                            EFX_TXQ_TYPES);
@@ -1533,22 +1574,21 @@ static int efx_probe_all(struct efx_nic *efx)
        return rc;
 }
 
-/* Called after previous invocation(s) of efx_stop_all, restarts the port,
- * kernel transmit queues and NAPI processing, and ensures that the port is
- * scheduled to be reconfigured. This function is safe to call multiple
- * times when the NIC is in any state.
+/* If the interface is supposed to be running but is not, start
+ * the hardware and software data path, regular activity for the port
+ * (MAC statistics, link polling, etc.) and schedule the port to be
+ * reconfigured.  Interrupts must already be enabled.  This function
+ * is safe to call multiple times, so long as the NIC is not disabled.
+ * Requires the RTNL lock.
  */
 static void efx_start_all(struct efx_nic *efx)
 {
        EFX_ASSERT_RESET_SERIALISED(efx);
+       BUG_ON(efx->state == STATE_DISABLED);
 
        /* Check that it is appropriate to restart the interface. All
         * of these flags are safe to read under just the rtnl lock */
-       if (efx->port_enabled)
-               return;
-       if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
-               return;
-       if (!netif_running(efx->net_dev))
+       if (efx->port_enabled || !netif_running(efx->net_dev))
                return;
 
        efx_start_port(efx);
@@ -1582,11 +1622,11 @@ static void efx_flush_all(struct efx_nic *efx)
        cancel_work_sync(&efx->mac_work);
 }
 
-/* Quiesce hardware and software without bringing the link down.
- * Safe to call multiple times, when the nic and interface is in any
- * state. The caller is guaranteed to subsequently be in a position
- * to modify any hardware and software state they see fit without
- * taking locks. */
+/* Quiesce the hardware and software data path, and regular activity
+ * for the port without bringing the link down.  Safe to call multiple
+ * times with the NIC in almost any state, but interrupts should be
+ * enabled.  Requires the RTNL lock.
+ */
 static void efx_stop_all(struct efx_nic *efx)
 {
        EFX_ASSERT_RESET_SERIALISED(efx);
@@ -1739,7 +1779,8 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
        struct efx_nic *efx = netdev_priv(net_dev);
        struct mii_ioctl_data *data = if_mii(ifr);
 
-       EFX_ASSERT_RESET_SERIALISED(efx);
+       if (cmd == SIOCSHWTSTAMP)
+               return efx_ptp_ioctl(efx, ifr, cmd);
 
        /* Convert phy_id from older PRTAD/DEVAD format */
        if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
@@ -1820,13 +1861,14 @@ static void efx_netpoll(struct net_device *net_dev)
 static int efx_net_open(struct net_device *net_dev)
 {
        struct efx_nic *efx = netdev_priv(net_dev);
-       EFX_ASSERT_RESET_SERIALISED(efx);
+       int rc;
 
        netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
                  raw_smp_processor_id());
 
-       if (efx->state == STATE_DISABLED)
-               return -EIO;
+       rc = efx_check_disabled(efx);
+       if (rc)
+               return rc;
        if (efx->phy_mode & PHY_MODE_SPECIAL)
                return -EBUSY;
        if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
@@ -1852,10 +1894,8 @@ static int efx_net_stop(struct net_device *net_dev)
        netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
                  raw_smp_processor_id());
 
-       if (efx->state != STATE_DISABLED) {
-               /* Stop the device and flush all the channels */
-               efx_stop_all(efx);
-       }
+       /* Stop the device and flush all the channels */
+       efx_stop_all(efx);
 
        return 0;
 }
@@ -1915,9 +1955,11 @@ static void efx_watchdog(struct net_device *net_dev)
 static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
 {
        struct efx_nic *efx = netdev_priv(net_dev);
+       int rc;
 
-       EFX_ASSERT_RESET_SERIALISED(efx);
-
+       rc = efx_check_disabled(efx);
+       if (rc)
+               return rc;
        if (new_mtu > EFX_MAX_MTU)
                return -EINVAL;
 
@@ -1926,8 +1968,6 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
        netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
 
        mutex_lock(&efx->mac_lock);
-       /* Reconfigure the MAC before enabling the dma queues so that
-        * the RX buffers don't overflow */
        net_dev->mtu = new_mtu;
        efx->type->reconfigure_mac(efx);
        mutex_unlock(&efx->mac_lock);
@@ -1942,8 +1982,6 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
        struct sockaddr *addr = data;
        char *new_addr = addr->sa_data;
 
-       EFX_ASSERT_RESET_SERIALISED(efx);
-
        if (!is_valid_ether_addr(new_addr)) {
                netif_err(efx, drv, efx->net_dev,
                          "invalid ethernet MAC address requested: %pM\n",
@@ -2079,11 +2117,27 @@ static int efx_register_netdev(struct efx_nic *efx)
 
        rtnl_lock();
 
+       /* Enable resets to be scheduled and check whether any were
+        * already requested.  If so, the NIC is probably hosed so we
+        * abort.
+        */
+       efx->state = STATE_READY;
+       smp_mb(); /* ensure we change state before checking reset_pending */
+       if (efx->reset_pending) {
+               netif_err(efx, probe, efx->net_dev,
+                         "aborting probe due to scheduled reset\n");
+               rc = -EIO;
+               goto fail_locked;
+       }
+
        rc = dev_alloc_name(net_dev, net_dev->name);
        if (rc < 0)
                goto fail_locked;
        efx_update_name(efx);
 
+       /* Always start with carrier off; PHY events will detect the link */
+       netif_carrier_off(net_dev);
+
        rc = register_netdevice(net_dev);
        if (rc)
                goto fail_locked;
@@ -2094,9 +2148,6 @@ static int efx_register_netdev(struct efx_nic *efx)
                        efx_init_tx_queue_core_txq(tx_queue);
        }
 
-       /* Always start with carrier off; PHY events will detect the link */
-       netif_carrier_off(net_dev);
-
        rtnl_unlock();
 
        rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
@@ -2108,14 +2159,14 @@ static int efx_register_netdev(struct efx_nic *efx)
 
        return 0;
 
+fail_registered:
+       rtnl_lock();
+       unregister_netdevice(net_dev);
 fail_locked:
+       efx->state = STATE_UNINIT;
        rtnl_unlock();
        netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
        return rc;
-
-fail_registered:
-       unregister_netdev(net_dev);
-       return rc;
 }
 
 static void efx_unregister_netdev(struct efx_nic *efx)
@@ -2138,7 +2189,11 @@ static void efx_unregister_netdev(struct efx_nic *efx)
 
        strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
        device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
-       unregister_netdev(efx->net_dev);
+
+       rtnl_lock();
+       unregister_netdevice(efx->net_dev);
+       efx->state = STATE_UNINIT;
+       rtnl_unlock();
 }
 
 /**************************************************************************
@@ -2154,9 +2209,9 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
        EFX_ASSERT_RESET_SERIALISED(efx);
 
        efx_stop_all(efx);
-       mutex_lock(&efx->mac_lock);
-
        efx_stop_interrupts(efx, false);
+
+       mutex_lock(&efx->mac_lock);
        if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
                efx->phy_op->fini(efx);
        efx->type->fini(efx);
@@ -2276,16 +2331,15 @@ static void efx_reset_work(struct work_struct *data)
        if (!pending)
                return;
 
-       /* If we're not RUNNING then don't reset. Leave the reset_pending
-        * flags set so that efx_pci_probe_main will be retried */
-       if (efx->state != STATE_RUNNING) {
-               netif_info(efx, drv, efx->net_dev,
-                          "scheduled reset quenched. NIC not RUNNING\n");
-               return;
-       }
-
        rtnl_lock();
-       (void)efx_reset(efx, fls(pending) - 1);
+
+       /* We checked the state in efx_schedule_reset() but it may
+        * have changed by now.  Now that we have the RTNL lock,
+        * it cannot change again.
+        */
+       if (efx->state == STATE_READY)
+               (void)efx_reset(efx, fls(pending) - 1);
+
        rtnl_unlock();
 }
 
@@ -2311,6 +2365,13 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
        }
 
        set_bit(method, &efx->reset_pending);
+       smp_mb(); /* ensure we change reset_pending before checking state */
+
+       /* If we're not READY then just leave the flags set as the cue
+        * to abort probing or reschedule the reset later.
+        */
+       if (ACCESS_ONCE(efx->state) != STATE_READY)
+               return;
 
        /* efx_process_channel() will no longer read events once a
         * reset is scheduled. So switch back to poll'd MCDI completions. */
@@ -2376,13 +2437,12 @@ static const struct efx_phy_operations efx_dummy_phy_operations = {
 /* This zeroes out and then fills in the invariants in a struct
  * efx_nic (including all sub-structures).
  */
-static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
+static int efx_init_struct(struct efx_nic *efx,
                           struct pci_dev *pci_dev, struct net_device *net_dev)
 {
        int i;
 
        /* Initialise common structures */
-       memset(efx, 0, sizeof(*efx));
        spin_lock_init(&efx->biu_lock);
 #ifdef CONFIG_SFC_MTD
        INIT_LIST_HEAD(&efx->mtd_list);
@@ -2392,7 +2452,7 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
        INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
        efx->pci_dev = pci_dev;
        efx->msg_enable = debug;
-       efx->state = STATE_INIT;
+       efx->state = STATE_UNINIT;
        strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
 
        efx->net_dev = net_dev;
@@ -2409,8 +2469,6 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
                        goto fail;
        }
 
-       efx->type = type;
-
        EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
 
        /* Higher numbered interrupt modes are less capable! */
@@ -2455,6 +2513,12 @@ static void efx_fini_struct(struct efx_nic *efx)
  */
 static void efx_pci_remove_main(struct efx_nic *efx)
 {
+       /* Flush reset_work. It can no longer be scheduled since we
+        * are not READY.
+        */
+       BUG_ON(efx->state == STATE_READY);
+       cancel_work_sync(&efx->reset_work);
+
 #ifdef CONFIG_RFS_ACCEL
        free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
        efx->net_dev->rx_cpu_rmap = NULL;
@@ -2480,24 +2544,15 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
 
        /* Mark the NIC as fini, then stop the interface */
        rtnl_lock();
-       efx->state = STATE_FINI;
        dev_close(efx->net_dev);
-
-       /* Allow any queued efx_resets() to complete */
+       efx_stop_interrupts(efx, false);
        rtnl_unlock();
 
-       efx_stop_interrupts(efx, false);
        efx_sriov_fini(efx);
        efx_unregister_netdev(efx);
 
        efx_mtd_remove(efx);
 
-       /* Wait for any scheduled resets to complete. No more will be
-        * scheduled from this point because efx_stop_all() has been
-        * called, we are no longer registered with driverlink, and
-        * the net_device's have been removed. */
-       cancel_work_sync(&efx->reset_work);
-
        efx_pci_remove_main(efx);
 
        efx_fini_io(efx);
@@ -2617,7 +2672,6 @@ static int efx_pci_probe_main(struct efx_nic *efx)
 static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
                                   const struct pci_device_id *entry)
 {
-       const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data;
        struct net_device *net_dev;
        struct efx_nic *efx;
        int rc;
@@ -2627,10 +2681,12 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
                                     EFX_MAX_RX_QUEUES);
        if (!net_dev)
                return -ENOMEM;
-       net_dev->features |= (type->offload_features | NETIF_F_SG |
+       efx = netdev_priv(net_dev);
+       efx->type = (const struct efx_nic_type *) entry->driver_data;
+       net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
                              NETIF_F_HIGHDMA | NETIF_F_TSO |
                              NETIF_F_RXCSUM);
-       if (type->offload_features & NETIF_F_V6_CSUM)
+       if (efx->type->offload_features & NETIF_F_V6_CSUM)
                net_dev->features |= NETIF_F_TSO6;
        /* Mask for features that also apply to VLAN devices */
        net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
@@ -2638,10 +2694,9 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
                                   NETIF_F_RXCSUM);
        /* All offloads can be toggled */
        net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
-       efx = netdev_priv(net_dev);
        pci_set_drvdata(pci_dev, efx);
        SET_NETDEV_DEV(net_dev, &pci_dev->dev);
-       rc = efx_init_struct(efx, type, pci_dev, net_dev);
+       rc = efx_init_struct(efx, pci_dev, net_dev);
        if (rc)
                goto fail1;
 
@@ -2656,28 +2711,9 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
                goto fail2;
 
        rc = efx_pci_probe_main(efx);
-
-       /* Serialise against efx_reset(). No more resets will be
-        * scheduled since efx_stop_all() has been called, and we have
-        * not and never have been registered.
-        */
-       cancel_work_sync(&efx->reset_work);
-
        if (rc)
                goto fail3;
 
-       /* If there was a scheduled reset during probe, the NIC is
-        * probably hosed anyway.
-        */
-       if (efx->reset_pending) {
-               rc = -EIO;
-               goto fail4;
-       }
-
-       /* Switch to the running state before we expose the device to the OS,
-        * so that dev_open()|efx_start_all() will actually start the device */
-       efx->state = STATE_RUNNING;
-
        rc = efx_register_netdev(efx);
        if (rc)
                goto fail4;
@@ -2717,12 +2753,18 @@ static int efx_pm_freeze(struct device *dev)
 {
        struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
 
-       efx->state = STATE_FINI;
+       rtnl_lock();
 
-       netif_device_detach(efx->net_dev);
+       if (efx->state != STATE_DISABLED) {
+               efx->state = STATE_UNINIT;
 
-       efx_stop_all(efx);
-       efx_stop_interrupts(efx, false);
+               netif_device_detach(efx->net_dev);
+
+               efx_stop_all(efx);
+               efx_stop_interrupts(efx, false);
+       }
+
+       rtnl_unlock();
 
        return 0;
 }
@@ -2731,21 +2773,25 @@ static int efx_pm_thaw(struct device *dev)
 {
        struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
 
-       efx->state = STATE_INIT;
+       rtnl_lock();
 
-       efx_start_interrupts(efx, false);
+       if (efx->state != STATE_DISABLED) {
+               efx_start_interrupts(efx, false);
 
-       mutex_lock(&efx->mac_lock);
-       efx->phy_op->reconfigure(efx);
-       mutex_unlock(&efx->mac_lock);
+               mutex_lock(&efx->mac_lock);
+               efx->phy_op->reconfigure(efx);
+               mutex_unlock(&efx->mac_lock);
 
-       efx_start_all(efx);
+               efx_start_all(efx);
 
-       netif_device_attach(efx->net_dev);
+               netif_device_attach(efx->net_dev);
 
-       efx->state = STATE_RUNNING;
+               efx->state = STATE_READY;
 
-       efx->type->resume_wol(efx);
+               efx->type->resume_wol(efx);
+       }
+
+       rtnl_unlock();
 
        /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
        queue_work(reset_workqueue, &efx->reset_work);
index 70755c97251aaab4e9cafe969b0a8b95821a1cbb..f11170bc48bf4f292d38c4ac5c31a55db10499ee 100644 (file)
@@ -102,6 +102,7 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
 
 /* Channels */
 extern int efx_channel_dummy_op_int(struct efx_channel *channel);
+extern void efx_channel_dummy_op_void(struct efx_channel *channel);
 extern void efx_process_channel_now(struct efx_channel *channel);
 extern int
 efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
index 5faedd855b779272342b37c6d992a1a93562a70b..90f078eff8e60d86b491763c7a2b40edf7012052 100644 (file)
@@ -337,7 +337,8 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
                                  unsigned int test_index,
                                  struct ethtool_string *strings, u64 *data)
 {
-       struct efx_channel *channel = efx_get_channel(efx, 0);
+       struct efx_channel *channel =
+               efx_get_channel(efx, efx->tx_channel_offset);
        struct efx_tx_queue *tx_queue;
 
        efx_for_each_channel_tx_queue(tx_queue, channel) {
@@ -529,9 +530,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
        if (!efx_tests)
                goto fail;
 
-
-       ASSERT_RTNL();
-       if (efx->state != STATE_RUNNING) {
+       if (efx->state != STATE_READY) {
                rc = -EIO;
                goto fail1;
        }
@@ -962,9 +961,7 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
        int rc;
 
        /* Check that user wants us to choose the location */
-       if (rule->location != RX_CLS_LOC_ANY &&
-           rule->location != RX_CLS_LOC_FIRST &&
-           rule->location != RX_CLS_LOC_LAST)
+       if (rule->location != RX_CLS_LOC_ANY)
                return -EINVAL;
 
        /* Range-check ring_cookie */
@@ -978,9 +975,7 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
             rule->m_ext.data[1]))
                return -EINVAL;
 
-       efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL,
-                          (rule->location == RX_CLS_LOC_FIRST) ?
-                          EFX_FILTER_FLAG_RX_OVERRIDE_IP : 0,
+       efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, 0,
                           (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
                           0xfff : rule->ring_cookie);
 
@@ -1176,6 +1171,7 @@ const struct ethtool_ops efx_ethtool_ops = {
        .get_rxfh_indir_size    = efx_ethtool_get_rxfh_indir_size,
        .get_rxfh_indir         = efx_ethtool_get_rxfh_indir,
        .set_rxfh_indir         = efx_ethtool_set_rxfh_indir,
+       .get_ts_info            = efx_ptp_get_ts_info,
        .get_module_info        = efx_ethtool_get_module_info,
        .get_module_eeprom      = efx_ethtool_get_module_eeprom,
 };
index 8687a6c3db0dc19cb11ecbe5327db5e00f869094..ec1e99d0dcad9e32f038f8b436e2416ac6d2ff4e 100644 (file)
@@ -380,7 +380,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
                new_mode = PHY_MODE_SPECIAL;
        if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) {
                err = 0;
-       } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
+       } else if (efx->state != STATE_READY || netif_running(efx->net_dev)) {
                err = -EBUSY;
        } else {
                /* Reset the PHY, reconfigure the MAC and enable/disable
index c3fd61f0a95c5d0680a1a286f48000d82021b46b..8af42cd1feda7d7dddbd860b76cdddbf69ff01fa 100644 (file)
@@ -161,10 +161,6 @@ static void efx_filter_push_rx_config(struct efx_nic *efx)
                        filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
                        !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
                           EFX_FILTER_FLAG_RX_RSS));
-               EFX_SET_OWORD_FIELD(
-                       filter_ctl, FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE,
-                       !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
-                          EFX_FILTER_FLAG_RX_OVERRIDE_IP));
                EFX_SET_OWORD_FIELD(
                        filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
                        table->spec[EFX_FILTER_INDEX_MC_DEF].dmaq_id);
@@ -172,10 +168,6 @@ static void efx_filter_push_rx_config(struct efx_nic *efx)
                        filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
                        !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
                           EFX_FILTER_FLAG_RX_RSS));
-               EFX_SET_OWORD_FIELD(
-                       filter_ctl, FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE,
-                       !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
-                          EFX_FILTER_FLAG_RX_OVERRIDE_IP));
        }
 
        efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
@@ -480,14 +472,12 @@ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
 
        case EFX_FILTER_TABLE_RX_MAC: {
                bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
-               EFX_POPULATE_OWORD_8(
+               EFX_POPULATE_OWORD_7(
                        *filter,
                        FRF_CZ_RMFT_RSS_EN,
                        !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
                        FRF_CZ_RMFT_SCATTER_EN,
                        !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
-                       FRF_CZ_RMFT_IP_OVERRIDE,
-                       !!(spec->flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP),
                        FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
                        FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
                        FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
@@ -567,49 +557,62 @@ static int efx_filter_search(struct efx_filter_table *table,
 }
 
 /*
- * Construct/deconstruct external filter IDs.  These must be ordered
- * by matching priority, for RX NFC semantics.
+ * Construct/deconstruct external filter IDs.  At least the RX filter
+ * IDs must be ordered by matching priority, for RX NFC semantics.
  *
- * Each RX MAC filter entry has a flag for whether it can override an
- * RX IP filter that also matches.  So we assign locations for MAC
- * filters with overriding behaviour, then for IP filters, then for
- * MAC filters without overriding behaviour.
+ * Deconstruction needs to be robust against invalid IDs so that
+ * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
+ * accept user-provided IDs.
  */
 
-#define EFX_FILTER_MATCH_PRI_RX_MAC_OVERRIDE_IP        0
-#define EFX_FILTER_MATCH_PRI_RX_DEF_OVERRIDE_IP        1
-#define EFX_FILTER_MATCH_PRI_NORMAL_BASE       2
+#define EFX_FILTER_MATCH_PRI_COUNT     5
+
+static const u8 efx_filter_type_match_pri[EFX_FILTER_TYPE_COUNT] = {
+       [EFX_FILTER_TCP_FULL]   = 0,
+       [EFX_FILTER_UDP_FULL]   = 0,
+       [EFX_FILTER_TCP_WILD]   = 1,
+       [EFX_FILTER_UDP_WILD]   = 1,
+       [EFX_FILTER_MAC_FULL]   = 2,
+       [EFX_FILTER_MAC_WILD]   = 3,
+       [EFX_FILTER_UC_DEF]     = 4,
+       [EFX_FILTER_MC_DEF]     = 4,
+};
+
+static const enum efx_filter_table_id efx_filter_range_table[] = {
+       EFX_FILTER_TABLE_RX_IP,         /* RX match pri 0 */
+       EFX_FILTER_TABLE_RX_IP,
+       EFX_FILTER_TABLE_RX_MAC,
+       EFX_FILTER_TABLE_RX_MAC,
+       EFX_FILTER_TABLE_RX_DEF,        /* RX match pri 4 */
+       EFX_FILTER_TABLE_COUNT,         /* TX match pri 0; invalid */
+       EFX_FILTER_TABLE_COUNT,         /* invalid */
+       EFX_FILTER_TABLE_TX_MAC,
+       EFX_FILTER_TABLE_TX_MAC,        /* TX match pri 3 */
+};
 
 #define EFX_FILTER_INDEX_WIDTH 13
 #define EFX_FILTER_INDEX_MASK  ((1 << EFX_FILTER_INDEX_WIDTH) - 1)
 
-static inline u32 efx_filter_make_id(enum efx_filter_table_id table_id,
-                                    unsigned int index, u8 flags)
+static inline u32
+efx_filter_make_id(const struct efx_filter_spec *spec, unsigned int index)
 {
-       unsigned int match_pri = EFX_FILTER_MATCH_PRI_NORMAL_BASE + table_id;
+       unsigned int range;
 
-       if (flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP) {
-               if (table_id == EFX_FILTER_TABLE_RX_MAC)
-                       match_pri = EFX_FILTER_MATCH_PRI_RX_MAC_OVERRIDE_IP;
-               else if (table_id == EFX_FILTER_TABLE_RX_DEF)
-                       match_pri = EFX_FILTER_MATCH_PRI_RX_DEF_OVERRIDE_IP;
-       }
+       range = efx_filter_type_match_pri[spec->type];
+       if (!(spec->flags & EFX_FILTER_FLAG_RX))
+               range += EFX_FILTER_MATCH_PRI_COUNT;
 
-       return match_pri << EFX_FILTER_INDEX_WIDTH | index;
+       return range << EFX_FILTER_INDEX_WIDTH | index;
 }
 
 static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id)
 {
-       unsigned int match_pri = id >> EFX_FILTER_INDEX_WIDTH;
+       unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
 
-       switch (match_pri) {
-       case EFX_FILTER_MATCH_PRI_RX_MAC_OVERRIDE_IP:
-               return EFX_FILTER_TABLE_RX_MAC;
-       case EFX_FILTER_MATCH_PRI_RX_DEF_OVERRIDE_IP:
-               return EFX_FILTER_TABLE_RX_DEF;
-       default:
-               return match_pri - EFX_FILTER_MATCH_PRI_NORMAL_BASE;
-       }
+       if (range < ARRAY_SIZE(efx_filter_range_table))
+               return efx_filter_range_table[range];
+       else
+               return EFX_FILTER_TABLE_COUNT; /* invalid */
 }
 
 static inline unsigned int efx_filter_id_index(u32 id)
@@ -619,12 +622,9 @@ static inline unsigned int efx_filter_id_index(u32 id)
 
 static inline u8 efx_filter_id_flags(u32 id)
 {
-       unsigned int match_pri = id >> EFX_FILTER_INDEX_WIDTH;
+       unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
 
-       if (match_pri < EFX_FILTER_MATCH_PRI_NORMAL_BASE)
-               return EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_OVERRIDE_IP;
-       else if (match_pri <=
-                EFX_FILTER_MATCH_PRI_NORMAL_BASE + EFX_FILTER_TABLE_RX_DEF)
+       if (range < EFX_FILTER_MATCH_PRI_COUNT)
                return EFX_FILTER_FLAG_RX;
        else
                return EFX_FILTER_FLAG_TX;
@@ -633,14 +633,15 @@ static inline u8 efx_filter_id_flags(u32 id)
 u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
 {
        struct efx_filter_state *state = efx->filter_state;
-       unsigned int table_id = EFX_FILTER_TABLE_RX_DEF;
+       unsigned int range = EFX_FILTER_MATCH_PRI_COUNT - 1;
+       enum efx_filter_table_id table_id;
 
        do {
+               table_id = efx_filter_range_table[range];
                if (state->table[table_id].size != 0)
-                       return ((EFX_FILTER_MATCH_PRI_NORMAL_BASE + table_id)
-                               << EFX_FILTER_INDEX_WIDTH) +
+                       return range << EFX_FILTER_INDEX_WIDTH |
                                state->table[table_id].size;
-       } while (table_id--);
+       } while (range--);
 
        return 0;
 }
@@ -718,7 +719,7 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
        netif_vdbg(efx, hw, efx->net_dev,
                   "%s: filter type %d index %d rxq %u set",
                   __func__, spec->type, filter_idx, spec->dmaq_id);
-       rc = efx_filter_make_id(table->id, filter_idx, spec->flags);
+       rc = efx_filter_make_id(spec, filter_idx);
 
 out:
        spin_unlock_bh(&state->lock);
@@ -781,8 +782,7 @@ int efx_filter_remove_id_safe(struct efx_nic *efx,
        spin_lock_bh(&state->lock);
 
        if (test_bit(filter_idx, table->used_bitmap) &&
-           spec->priority == priority &&
-           !((spec->flags ^ filter_flags) & EFX_FILTER_FLAG_RX_OVERRIDE_IP)) {
+           spec->priority == priority) {
                efx_filter_table_clear_entry(efx, table, filter_idx);
                if (table->used == 0)
                        efx_filter_table_reset_search_depth(table);
@@ -833,8 +833,7 @@ int efx_filter_get_filter_safe(struct efx_nic *efx,
        spin_lock_bh(&state->lock);
 
        if (test_bit(filter_idx, table->used_bitmap) &&
-           spec->priority == priority &&
-           !((spec->flags ^ filter_flags) & EFX_FILTER_FLAG_RX_OVERRIDE_IP)) {
+           spec->priority == priority) {
                *spec_buf = *spec;
                rc = 0;
        } else {
@@ -927,8 +926,7 @@ s32 efx_filter_get_rx_ids(struct efx_nic *efx,
                                        goto out;
                                }
                                buf[count++] = efx_filter_make_id(
-                                       table_id, filter_idx,
-                                       table->spec[filter_idx].flags);
+                                       &table->spec[filter_idx], filter_idx);
                        }
                }
        }
index 3c77802aed6c63e02adf29ca9d2858b4d2c100c0..5cb54723b8244bf5b4c8b21e8c655f8e9f9b0916 100644 (file)
@@ -61,16 +61,12 @@ enum efx_filter_priority {
  *     according to the indirection table.
  * @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
  *     queue.
- * @EFX_FILTER_FLAG_RX_OVERRIDE_IP: Enables a MAC filter to override
- *     any IP filter that matches the same packet.  By default, IP
- *     filters take precedence.
  * @EFX_FILTER_FLAG_RX: Filter is for RX
  * @EFX_FILTER_FLAG_TX: Filter is for TX
  */
 enum efx_filter_flags {
        EFX_FILTER_FLAG_RX_RSS = 0x01,
        EFX_FILTER_FLAG_RX_SCATTER = 0x02,
-       EFX_FILTER_FLAG_RX_OVERRIDE_IP = 0x04,
        EFX_FILTER_FLAG_RX = 0x08,
        EFX_FILTER_FLAG_TX = 0x10,
 };
@@ -88,8 +84,7 @@ enum efx_filter_flags {
  *
  * The @priority field is used by software to determine whether a new
  * filter may replace an old one.  The hardware priority of a filter
- * depends on the filter type and %EFX_FILTER_FLAG_RX_OVERRIDE_IP
- * flag.
+ * depends on the filter type.
  */
 struct efx_filter_spec {
        u8      type:4;
index fc5e7bbcbc9e5b9ea2ff8cf61eee36703371865d..aea43cbd05200acc1f19cd21e9f96d9e26782ca2 100644 (file)
@@ -320,14 +320,20 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
                efx_mcdi_complete(mcdi);
 }
 
-/* Issue the given command by writing the data into the shared memory PDU,
- * ring the doorbell and wait for completion. Copyout the result. */
 int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
                 const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen,
                 size_t *outlen_actual)
+{
+       efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
+       return efx_mcdi_rpc_finish(efx, cmd, inlen,
+                                  outbuf, outlen, outlen_actual);
+}
+
+void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, const u8 *inbuf,
+                       size_t inlen)
 {
        struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
-       int rc;
+
        BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
 
        efx_mcdi_acquire(mcdi);
@@ -338,6 +344,15 @@ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
        spin_unlock_bh(&mcdi->iface_lock);
 
        efx_mcdi_copyin(efx, cmd, inbuf, inlen);
+}
+
+int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
+                       u8 *outbuf, size_t outlen, size_t *outlen_actual)
+{
+       struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+       int rc;
+
+       BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
 
        if (mcdi->mode == MCDI_MODE_POLL)
                rc = efx_mcdi_poll(efx);
@@ -563,6 +578,11 @@ void efx_mcdi_process_event(struct efx_channel *channel,
        case MCDI_EVENT_CODE_FLR:
                efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
                break;
+       case MCDI_EVENT_CODE_PTP_RX:
+       case MCDI_EVENT_CODE_PTP_FAULT:
+       case MCDI_EVENT_CODE_PTP_PPS:
+               efx_ptp_event(efx, event);
+               break;
 
        default:
                netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
@@ -641,9 +661,8 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
                           u16 *fw_subtype_list, u32 *capabilities)
 {
        uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMIN];
-       size_t outlen;
+       size_t outlen, offset, i;
        int port_num = efx_port_num(efx);
-       int offset;
        int rc;
 
        BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
@@ -663,11 +682,18 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
                : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST;
        if (mac_address)
                memcpy(mac_address, outbuf + offset, ETH_ALEN);
-       if (fw_subtype_list)
-               memcpy(fw_subtype_list,
-                      outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST,
-                      MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM *
-                      sizeof(fw_subtype_list[0]));
+       if (fw_subtype_list) {
+               /* Byte-swap and truncate or zero-pad as necessary */
+               offset = MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST;
+               for (i = 0;
+                    i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM;
+                    i++) {
+                       fw_subtype_list[i] =
+                               (offset + 2 <= outlen) ?
+                               le16_to_cpup((__le16 *)(outbuf + offset)) : 0;
+                       offset += 2;
+               }
+       }
        if (capabilities) {
                if (port_num)
                        *capabilities = MCDI_DWORD(outbuf,
@@ -1169,6 +1195,9 @@ int efx_mcdi_flush_rxqs(struct efx_nic *efx)
        __le32 *qid;
        int rc, count;
 
+       BUILD_BUG_ON(EFX_MAX_CHANNELS >
+                    MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
+
        qid = kmalloc(EFX_MAX_CHANNELS * sizeof(*qid), GFP_KERNEL);
        if (qid == NULL)
                return -ENOMEM;
index 0bdf3e33183253d19803803cdf73d7519c9bdaf6..3ba2e5b5a9cc98eac81a90c9b3d2e0e650b63af0 100644 (file)
@@ -71,6 +71,12 @@ extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const u8 *inbuf,
                        size_t inlen, u8 *outbuf, size_t outlen,
                        size_t *outlen_actual);
 
+extern void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
+                              const u8 *inbuf, size_t inlen);
+extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
+                              u8 *outbuf, size_t outlen,
+                              size_t *outlen_actual);
+
 extern int efx_mcdi_poll_reboot(struct efx_nic *efx);
 extern void efx_mcdi_mode_poll(struct efx_nic *efx);
 extern void efx_mcdi_mode_event(struct efx_nic *efx);
@@ -107,11 +113,13 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
 #define MCDI_EVENT_FIELD(_ev, _field)                  \
        EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
 #define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2)                \
-       EFX_DWORD_FIELD(                                                \
+       EFX_EXTRACT_DWORD(                                              \
                *((efx_dword_t *)                                       \
                  (MCDI_ARRAY_PTR(_buf, _field1, _type, _index) +       \
                   (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _OFST & ~3))), \
-               MC_CMD_ ## _type ## _TYPEDEF_ ## _field2)
+               MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _LBN & 0x1f, \
+               (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _LBN & 0x1f) + \
+               MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _WIDTH - 1)
 
 extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
 extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
index db4beed97669c94a299d0ec6b96069d969e26b1e..9d426d0457bdd5140709b1da8186d817d5b2bbd2 100644 (file)
 #define          MCDI_EVENT_CODE_TX_FLUSH  0xc /* enum */
 #define          MCDI_EVENT_CODE_PTP_RX  0xd /* enum */
 #define          MCDI_EVENT_CODE_PTP_FAULT  0xe /* enum */
+#define          MCDI_EVENT_CODE_PTP_PPS  0xf /* enum */
 #define       MCDI_EVENT_CMDDONE_DATA_OFST 0
 #define       MCDI_EVENT_CMDDONE_DATA_LBN 0
 #define       MCDI_EVENT_CMDDONE_DATA_WIDTH 32
 
 /* MC_CMD_GET_FPGAREG_OUT msgresponse */
 #define    MC_CMD_GET_FPGAREG_OUT_LENMIN 1
-#define    MC_CMD_GET_FPGAREG_OUT_LENMAX 255
+#define    MC_CMD_GET_FPGAREG_OUT_LENMAX 252
 #define    MC_CMD_GET_FPGAREG_OUT_LEN(num) (0+1*(num))
 #define       MC_CMD_GET_FPGAREG_OUT_BUFFER_OFST 0
 #define       MC_CMD_GET_FPGAREG_OUT_BUFFER_LEN 1
 #define       MC_CMD_GET_FPGAREG_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_GET_FPGAREG_OUT_BUFFER_MAXNUM 255
+#define       MC_CMD_GET_FPGAREG_OUT_BUFFER_MAXNUM 252
 
 
 /***********************************/
 
 /* MC_CMD_PUT_FPGAREG_IN msgrequest */
 #define    MC_CMD_PUT_FPGAREG_IN_LENMIN 5
-#define    MC_CMD_PUT_FPGAREG_IN_LENMAX 255
+#define    MC_CMD_PUT_FPGAREG_IN_LENMAX 252
 #define    MC_CMD_PUT_FPGAREG_IN_LEN(num) (4+1*(num))
 #define       MC_CMD_PUT_FPGAREG_IN_ADDR_OFST 0
 #define       MC_CMD_PUT_FPGAREG_IN_BUFFER_OFST 4
 #define       MC_CMD_PUT_FPGAREG_IN_BUFFER_LEN 1
 #define       MC_CMD_PUT_FPGAREG_IN_BUFFER_MINNUM 1
-#define       MC_CMD_PUT_FPGAREG_IN_BUFFER_MAXNUM 251
+#define       MC_CMD_PUT_FPGAREG_IN_BUFFER_MAXNUM 248
 
 /* MC_CMD_PUT_FPGAREG_OUT msgresponse */
 #define    MC_CMD_PUT_FPGAREG_OUT_LEN 0
 
 /* MC_CMD_PTP_IN_TRANSMIT msgrequest */
 #define    MC_CMD_PTP_IN_TRANSMIT_LENMIN 13
-#define    MC_CMD_PTP_IN_TRANSMIT_LENMAX 255
+#define    MC_CMD_PTP_IN_TRANSMIT_LENMAX 252
 #define    MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
 #define       MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
 #define       MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
 #define       MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1
-#define       MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 243
+#define       MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 240
 
 /* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */
 #define    MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8
 
 /* MC_CMD_PUTS_IN msgrequest */
 #define    MC_CMD_PUTS_IN_LENMIN 13
-#define    MC_CMD_PUTS_IN_LENMAX 255
+#define    MC_CMD_PUTS_IN_LENMAX 252
 #define    MC_CMD_PUTS_IN_LEN(num) (12+1*(num))
 #define       MC_CMD_PUTS_IN_DEST_OFST 0
 #define        MC_CMD_PUTS_IN_UART_LBN 0
 #define       MC_CMD_PUTS_IN_STRING_OFST 12
 #define       MC_CMD_PUTS_IN_STRING_LEN 1
 #define       MC_CMD_PUTS_IN_STRING_MINNUM 1
-#define       MC_CMD_PUTS_IN_STRING_MAXNUM 243
+#define       MC_CMD_PUTS_IN_STRING_MAXNUM 240
 
 /* MC_CMD_PUTS_OUT msgresponse */
 #define    MC_CMD_PUTS_OUT_LEN 0
 
 /* MC_CMD_NVRAM_READ_OUT msgresponse */
 #define    MC_CMD_NVRAM_READ_OUT_LENMIN 1
-#define    MC_CMD_NVRAM_READ_OUT_LENMAX 255
+#define    MC_CMD_NVRAM_READ_OUT_LENMAX 252
 #define    MC_CMD_NVRAM_READ_OUT_LEN(num) (0+1*(num))
 #define       MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0
 #define       MC_CMD_NVRAM_READ_OUT_READ_BUFFER_LEN 1
 #define       MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MINNUM 1
-#define       MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 255
+#define       MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 252
 
 
 /***********************************/
 
 /* MC_CMD_NVRAM_WRITE_IN msgrequest */
 #define    MC_CMD_NVRAM_WRITE_IN_LENMIN 13
-#define    MC_CMD_NVRAM_WRITE_IN_LENMAX 255
+#define    MC_CMD_NVRAM_WRITE_IN_LENMAX 252
 #define    MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num))
 #define       MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
 /*            Enum values, see field(s): */
 #define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
 #define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1
 #define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1
-#define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 243
+#define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 240
 
 /* MC_CMD_NVRAM_WRITE_OUT msgresponse */
 #define    MC_CMD_NVRAM_WRITE_OUT_LEN 0
 
 /* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
 #define    MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
-#define    MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 255
+#define    MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252
 #define    MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
 #define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
 #define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
 #define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
 #define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1
-#define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 251
+#define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 248
 
 
 /***********************************/
index 758148379b0e19d34e0e413884649060efa9090f..08f825b71ac8c5a69db6995fda028626e702adae 100644 (file)
@@ -585,6 +585,7 @@ static const struct siena_nvram_type_info siena_nvram_types[] = {
        [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1]   = { 1, "sfc_exp_rom_cfg" },
        [MC_CMD_NVRAM_TYPE_PHY_PORT0]           = { 0, "sfc_phy_fw" },
        [MC_CMD_NVRAM_TYPE_PHY_PORT1]           = { 1, "sfc_phy_fw" },
+       [MC_CMD_NVRAM_TYPE_FPGA]                = { 0, "sfc_fpga" },
 };
 
 static int siena_mtd_probe_partition(struct efx_nic *efx,
@@ -598,7 +599,8 @@ static int siena_mtd_probe_partition(struct efx_nic *efx,
        bool protected;
        int rc;
 
-       if (type >= ARRAY_SIZE(siena_nvram_types))
+       if (type >= ARRAY_SIZE(siena_nvram_types) ||
+           siena_nvram_types[type].name == NULL)
                return -ENODEV;
 
        info = &siena_nvram_types[type];
@@ -627,7 +629,8 @@ static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
                                     struct efx_mtd *efx_mtd)
 {
        struct efx_mtd_partition *part;
-       uint16_t fw_subtype_list[MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM];
+       uint16_t fw_subtype_list[
+               MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM];
        int rc;
 
        rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL);
index cd9c0a989692b5a547c74136c9be28e208f6951c..c1a010cda89b92f20afc5657354a42bc592f8568 100644 (file)
@@ -37,7 +37,7 @@
  *
  **************************************************************************/
 
-#define EFX_DRIVER_VERSION     "3.1"
+#define EFX_DRIVER_VERSION     "3.2"
 
 #ifdef DEBUG
 #define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
@@ -56,7 +56,8 @@
 #define EFX_MAX_CHANNELS 32U
 #define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
 #define EFX_EXTRA_CHANNEL_IOV  0
-#define EFX_MAX_EXTRA_CHANNELS 1U
+#define EFX_EXTRA_CHANNEL_PTP  1
+#define EFX_MAX_EXTRA_CHANNELS 2U
 
 /* Checksum generation is a per-queue option in hardware, so each
  * queue visible to the networking core is backed by two hardware TX
@@ -68,6 +69,9 @@
 #define EFX_TXQ_TYPES          4
 #define EFX_MAX_TX_QUEUES      (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
 
+/* Forward declare Precision Time Protocol (PTP) support structure. */
+struct efx_ptp_data;
+
 struct efx_self_tests;
 
 /**
@@ -91,29 +95,31 @@ struct efx_special_buffer {
 };
 
 /**
- * struct efx_tx_buffer - An Efx TX buffer
- * @skb: The associated socket buffer.
- *     Set only on the final fragment of a packet; %NULL for all other
- *     fragments.  When this fragment completes, then we can free this
- *     skb.
- * @tsoh: The associated TSO header structure, or %NULL if this
- *     buffer is not a TSO header.
+ * struct efx_tx_buffer - buffer state for a TX descriptor
+ * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be
+ *     freed when descriptor completes
+ * @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be
+ *     freed when descriptor completes.
  * @dma_addr: DMA address of the fragment.
+ * @flags: Flags for allocation and DMA mapping type
  * @len: Length of this fragment.
  *     This field is zero when the queue slot is empty.
- * @continuation: True if this fragment is not the end of a packet.
- * @unmap_single: True if dma_unmap_single should be used.
  * @unmap_len: Length of this fragment to unmap
  */
 struct efx_tx_buffer {
-       const struct sk_buff *skb;
-       struct efx_tso_header *tsoh;
+       union {
+               const struct sk_buff *skb;
+               void *heap_buf;
+       };
        dma_addr_t dma_addr;
+       unsigned short flags;
        unsigned short len;
-       bool continuation;
-       bool unmap_single;
        unsigned short unmap_len;
 };
+#define EFX_TX_BUF_CONT                1       /* not last descriptor of packet */
+#define EFX_TX_BUF_SKB         2       /* buffer is last part of skb */
+#define EFX_TX_BUF_HEAP                4       /* buffer was allocated with kmalloc() */
+#define EFX_TX_BUF_MAP_SINGLE  8       /* buffer was mapped with dma_map_single() */
 
 /**
  * struct efx_tx_queue - An Efx TX queue
@@ -133,6 +139,7 @@ struct efx_tx_buffer {
  * @channel: The associated channel
  * @core_txq: The networking core TX queue structure
  * @buffer: The software buffer ring
+ * @tsoh_page: Array of pages of TSO header buffers
  * @txd: The hardware descriptor ring
  * @ptr_mask: The size of the ring minus 1.
  * @initialised: Has hardware queue been initialised?
@@ -156,9 +163,6 @@ struct efx_tx_buffer {
  *     variable indicates that the queue is full.  This is to
  *     avoid cache-line ping-pong between the xmit path and the
  *     completion path.
- * @tso_headers_free: A list of TSO headers allocated for this TX queue
- *     that are not in use, and so available for new TSO sends. The list
- *     is protected by the TX queue lock.
  * @tso_bursts: Number of times TSO xmit invoked by kernel
  * @tso_long_headers: Number of packets with headers too long for standard
  *     blocks
@@ -175,6 +179,7 @@ struct efx_tx_queue {
        struct efx_channel *channel;
        struct netdev_queue *core_txq;
        struct efx_tx_buffer *buffer;
+       struct efx_buffer *tsoh_page;
        struct efx_special_buffer txd;
        unsigned int ptr_mask;
        bool initialised;
@@ -187,7 +192,6 @@ struct efx_tx_queue {
        unsigned int insert_count ____cacheline_aligned_in_smp;
        unsigned int write_count;
        unsigned int old_read_count;
-       struct efx_tso_header *tso_headers_free;
        unsigned int tso_bursts;
        unsigned int tso_long_headers;
        unsigned int tso_packets;
@@ -242,6 +246,8 @@ struct efx_rx_page_state {
 /**
  * struct efx_rx_queue - An Efx RX queue
  * @efx: The associated Efx NIC
+ * @core_index:  Index of network core RX queue.  Will be >= 0 iff this
+ *     is associated with a real RX queue.
  * @buffer: The software buffer ring
  * @rxd: The hardware descriptor ring
  * @ptr_mask: The size of the ring minus 1.
@@ -263,6 +269,7 @@ struct efx_rx_page_state {
  */
 struct efx_rx_queue {
        struct efx_nic *efx;
+       int core_index;
        struct efx_rx_buffer *buffer;
        struct efx_special_buffer rxd;
        unsigned int ptr_mask;
@@ -390,14 +397,17 @@ struct efx_channel {
  * @get_name: Generate the channel's name (used for its IRQ handler)
  * @copy: Copy the channel state prior to reallocation.  May be %NULL if
  *     reallocation is not supported.
+ * @receive_skb: Handle an skb ready to be passed to netif_receive_skb()
  * @keep_eventq: Flag for whether event queue should be kept initialised
  *     while the device is stopped
  */
 struct efx_channel_type {
        void (*handle_no_channel)(struct efx_nic *);
        int (*pre_probe)(struct efx_channel *);
+       void (*post_remove)(struct efx_channel *);
        void (*get_name)(struct efx_channel *, char *buf, size_t len);
        struct efx_channel *(*copy)(const struct efx_channel *);
+       void (*receive_skb)(struct efx_channel *, struct sk_buff *);
        bool keep_eventq;
 };
 
@@ -430,11 +440,9 @@ enum efx_int_mode {
 #define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
 
 enum nic_state {
-       STATE_INIT = 0,
-       STATE_RUNNING = 1,
-       STATE_FINI = 2,
-       STATE_DISABLED = 3,
-       STATE_MAX,
+       STATE_UNINIT = 0,       /* device being probed/removed or is frozen */
+       STATE_READY = 1,        /* hardware ready and netdev registered */
+       STATE_DISABLED = 2,     /* device disabled due to hardware errors */
 };
 
 /*
@@ -654,7 +662,7 @@ struct vfdi_status;
  * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
  * @irq_rx_moderation: IRQ moderation time for RX event queues
  * @msg_enable: Log message enable flags
- * @state: Device state flag. Serialised by the rtnl_lock.
+ * @state: Device state number (%STATE_*). Serialised by the rtnl_lock.
  * @reset_pending: Bitmask for pending resets
  * @tx_queue: TX DMA queues
  * @rx_queue: RX DMA queues
@@ -664,6 +672,8 @@ struct vfdi_status;
  *     should be allocated for this NIC
  * @rxq_entries: Size of receive queues requested by user.
  * @txq_entries: Size of transmit queues requested by user.
+ * @txq_stop_thresh: TX queue fill level at or above which we stop it.
+ * @txq_wake_thresh: TX queue fill level at or below which we wake it.
  * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches
  * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches
  * @sram_lim_qw: Qword address limit of SRAM
@@ -730,6 +740,7 @@ struct vfdi_status;
  *     %local_addr_list. Protected by %local_lock.
  * @local_lock: Mutex protecting %local_addr_list and %local_page_list.
  * @peer_work: Work item to broadcast peer addresses to VMs.
+ * @ptp_data: PTP state data
  * @monitor_work: Hardware monitor workitem
  * @biu_lock: BIU (bus interface unit) lock
  * @last_irq_cpu: Last CPU to handle a possible test interrupt.  This
@@ -774,6 +785,9 @@ struct efx_nic {
 
        unsigned rxq_entries;
        unsigned txq_entries;
+       unsigned int txq_stop_thresh;
+       unsigned int txq_wake_thresh;
+
        unsigned tx_dc_base;
        unsigned rx_dc_base;
        unsigned sram_lim_qw;
@@ -854,6 +868,10 @@ struct efx_nic {
        struct work_struct peer_work;
 #endif
 
+#ifdef CONFIG_SFC_PTP
+       struct efx_ptp_data *ptp_data;
+#endif
+
        /* The following fields may be written more often */
 
        struct delayed_work monitor_work ____cacheline_aligned_in_smp;
@@ -1044,7 +1062,7 @@ static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
 
 static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
 {
-       return channel->channel < channel->efx->n_rx_channels;
+       return channel->rx_queue.core_index >= 0;
 }
 
 static inline struct efx_rx_queue *
@@ -1116,5 +1134,13 @@ static inline void clear_bit_le(unsigned nr, unsigned char *addr)
 #define EFX_MAX_FRAME_LEN(mtu) \
        ((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */ + 7) & ~7) + 16)
 
+static inline bool efx_xmit_with_hwtstamp(struct sk_buff *skb)
+{
+       return skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP;
+}
+static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb)
+{
+       skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+}
 
 #endif /* EFX_NET_DRIVER_H */
index 326d799762d644b1c665a18c5b2e3bdca9a22206..cdff40b65729ad79a8a744933ca9453958305162 100644 (file)
@@ -298,7 +298,7 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
 /**************************************************************************
  *
  * Generic buffer handling
- * These buffers are used for interrupt status and MAC stats
+ * These buffers are used for interrupt status, MAC stats, etc.
  *
  **************************************************************************/
 
@@ -401,8 +401,10 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
                ++tx_queue->write_count;
 
                /* Create TX descriptor ring entry */
+               BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
                EFX_POPULATE_QWORD_4(*txd,
-                                    FSF_AZ_TX_KER_CONT, buffer->continuation,
+                                    FSF_AZ_TX_KER_CONT,
+                                    buffer->flags & EFX_TX_BUF_CONT,
                                     FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
                                     FSF_AZ_TX_KER_BUF_REGION, 0,
                                     FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
index bab5cd9f5740bb8e9e7476849f089ba513346b9f..438cef11f7270bd620ed04c42fb1c8ee8a9d3621 100644 (file)
@@ -11,6 +11,7 @@
 #ifndef EFX_NIC_H
 #define EFX_NIC_H
 
+#include <linux/net_tstamp.h>
 #include <linux/i2c-algo-bit.h>
 #include "net_driver.h"
 #include "efx.h"
@@ -250,6 +251,41 @@ extern int efx_sriov_get_vf_config(struct net_device *dev, int vf,
 extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
                                     bool spoofchk);
 
+struct ethtool_ts_info;
+#ifdef CONFIG_SFC_PTP
+extern void efx_ptp_probe(struct efx_nic *efx);
+extern int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
+extern int efx_ptp_get_ts_info(struct net_device *net_dev,
+                              struct ethtool_ts_info *ts_info);
+extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
+#else
+static inline void efx_ptp_probe(struct efx_nic *efx) {}
+static inline int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd)
+{
+       return -EOPNOTSUPP;
+}
+static inline int efx_ptp_get_ts_info(struct net_device *net_dev,
+                                     struct ethtool_ts_info *ts_info)
+{
+       ts_info->so_timestamping = (SOF_TIMESTAMPING_SOFTWARE |
+                                   SOF_TIMESTAMPING_RX_SOFTWARE);
+       ts_info->phc_index = -1;
+
+       return 0;
+}
+static inline bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
+{
+       return false;
+}
+static inline int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
+{
+       return NETDEV_TX_OK;
+}
+static inline void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev) {}
+#endif
+
 extern const struct efx_nic_type falcon_a1_nic_type;
 extern const struct efx_nic_type falcon_b0_nic_type;
 extern const struct efx_nic_type siena_a0_nic_type;
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
new file mode 100644 (file)
index 0000000..5b3dd02
--- /dev/null
@@ -0,0 +1,1484 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+/* Theory of operation:
+ *
+ * PTP support is assisted by firmware running on the MC, which provides
+ * the hardware timestamping capabilities.  Both transmitted and received
+ * PTP event packets are queued onto internal queues for subsequent processing;
+ * this is because the MC operations are relatively long and would block
+ * block NAPI/interrupt operation.
+ *
+ * Receive event processing:
+ *     The event contains the packet's UUID and sequence number, together
+ *     with the hardware timestamp.  The PTP receive packet queue is searched
+ *     for this UUID/sequence number and, if found, put on a pending queue.
+ *     Packets not matching are delivered without timestamps (MCDI events will
+ *     always arrive after the actual packet).
+ *     It is important for the operation of the PTP protocol that the ordering
+ *     of packets between the event and general port is maintained.
+ *
+ * Work queue processing:
+ *     If work waiting, synchronise host/hardware time
+ *
+ *     Transmit: send packet through MC, which returns the transmission time
+ *     that is converted to an appropriate timestamp.
+ *
+ *     Receive: the packet's reception time is converted to an appropriate
+ *     timestamp.
+ */
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/time.h>
+#include <linux/ktime.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/pps_kernel.h>
+#include <linux/ptp_clock_kernel.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+#include "io.h"
+#include "regs.h"
+#include "nic.h"
+
+/* Maximum number of events expected to make up a PTP event */
+#define        MAX_EVENT_FRAGS                 3
+
+/* Maximum delay, ms, to begin synchronisation */
+#define        MAX_SYNCHRONISE_WAIT_MS         2
+
+/* How long, at most, to spend synchronising */
+#define        SYNCHRONISE_PERIOD_NS           250000
+
+/* How often to update the shared memory time */
+#define        SYNCHRONISATION_GRANULARITY_NS  200
+
+/* Minimum permitted length of a (corrected) synchronisation time */
+#define        MIN_SYNCHRONISATION_NS          120
+
+/* Maximum permitted length of a (corrected) synchronisation time */
+#define        MAX_SYNCHRONISATION_NS          1000
+
+/* How many (MC) receive events that can be queued */
+#define        MAX_RECEIVE_EVENTS              8
+
+/* Length of (modified) moving average. */
+#define        AVERAGE_LENGTH                  16
+
+/* How long an unmatched event or packet can be held */
+#define PKT_EVENT_LIFETIME_MS          10
+
+/* Offsets into PTP packet for identification.  These offsets are from the
+ * start of the IP header, not the MAC header.  Note that neither PTP V1 nor
+ * PTP V2 permit the use of IPV4 options.
+ */
+#define PTP_DPORT_OFFSET       22
+
+#define PTP_V1_VERSION_LENGTH  2
+#define PTP_V1_VERSION_OFFSET  28
+
+#define PTP_V1_UUID_LENGTH     6
+#define PTP_V1_UUID_OFFSET     50
+
+#define PTP_V1_SEQUENCE_LENGTH 2
+#define PTP_V1_SEQUENCE_OFFSET 58
+
+/* The minimum length of a PTP V1 packet for offsets, etc. to be valid:
+ * includes IP header.
+ */
+#define        PTP_V1_MIN_LENGTH       64
+
+#define PTP_V2_VERSION_LENGTH  1
+#define PTP_V2_VERSION_OFFSET  29
+
+/* Although PTP V2 UUIDs are comprised a ClockIdentity (8) and PortNumber (2),
+ * the MC only captures the last six bytes of the clock identity. These values
+ * reflect those, not the ones used in the standard.  The standard permits
+ * mapping of V1 UUIDs to V2 UUIDs with these same values.
+ */
+#define PTP_V2_MC_UUID_LENGTH  6
+#define PTP_V2_MC_UUID_OFFSET  50
+
+#define PTP_V2_SEQUENCE_LENGTH 2
+#define PTP_V2_SEQUENCE_OFFSET 58
+
+/* The minimum length of a PTP V2 packet for offsets, etc. to be valid:
+ * includes IP header.
+ */
+#define        PTP_V2_MIN_LENGTH       63
+
+#define        PTP_MIN_LENGTH          63
+
+#define PTP_ADDRESS            0xe0000181      /* 224.0.1.129 */
+#define PTP_EVENT_PORT         319
+#define PTP_GENERAL_PORT       320
+
+/* Annoyingly the format of the version numbers are different between
+ * versions 1 and 2 so it isn't possible to simply look for 1 or 2.
+ */
+#define        PTP_VERSION_V1          1
+
+#define        PTP_VERSION_V2          2
+#define        PTP_VERSION_V2_MASK     0x0f
+
+enum ptp_packet_state {
+       PTP_PACKET_STATE_UNMATCHED = 0,
+       PTP_PACKET_STATE_MATCHED,
+       PTP_PACKET_STATE_TIMED_OUT,
+       PTP_PACKET_STATE_MATCH_UNWANTED
+};
+
+/* NIC synchronised with single word of time only comprising
+ * partial seconds and full nanoseconds: 10^9 ~ 2^30 so 2 bits for seconds.
+ */
+#define        MC_NANOSECOND_BITS      30
+#define        MC_NANOSECOND_MASK      ((1 << MC_NANOSECOND_BITS) - 1)
+#define        MC_SECOND_MASK          ((1 << (32 - MC_NANOSECOND_BITS)) - 1)
+
+/* Maximum parts-per-billion adjustment that is acceptable */
+#define MAX_PPB                        1000000
+
+/* Number of bits required to hold the above */
+#define        MAX_PPB_BITS            20
+
+/* Number of extra bits allowed when calculating fractional ns.
+ * EXTRA_BITS + MC_CMD_PTP_IN_ADJUST_BITS + MAX_PPB_BITS should
+ * be less than 63.
+ */
+#define        PPB_EXTRA_BITS          2
+
+/* Precalculate scale word to avoid long long division at runtime */
+#define        PPB_SCALE_WORD  ((1LL << (PPB_EXTRA_BITS + MC_CMD_PTP_IN_ADJUST_BITS +\
+                       MAX_PPB_BITS)) / 1000000000LL)
+
+#define PTP_SYNC_ATTEMPTS      4
+
+/**
+ * struct efx_ptp_match - Matching structure, stored in sk_buff's cb area.
+ * @words: UUID and (partial) sequence number
+ * @expiry: Time after which the packet should be delivered irrespective of
+ *            event arrival.
+ * @state: The state of the packet - whether it is ready for processing or
+ *         whether that is of no interest.
+ */
+struct efx_ptp_match {
+       u32 words[DIV_ROUND_UP(PTP_V1_UUID_LENGTH, 4)];
+       unsigned long expiry;
+       enum ptp_packet_state state;
+};
+
+/**
+ * struct efx_ptp_event_rx - A PTP receive event (from MC)
+ * @seq0: First part of (PTP) UUID
+ * @seq1: Second part of (PTP) UUID and sequence number
+ * @hwtimestamp: Event timestamp
+ */
+struct efx_ptp_event_rx {
+       struct list_head link;
+       u32 seq0;
+       u32 seq1;
+       ktime_t hwtimestamp;
+       unsigned long expiry;
+};
+
+/**
+ * struct efx_ptp_timeset - Synchronisation between host and MC
+ * @host_start: Host time immediately before hardware timestamp taken
+ * @seconds: Hardware timestamp, seconds
+ * @nanoseconds: Hardware timestamp, nanoseconds
+ * @host_end: Host time immediately after hardware timestamp taken
+ * @waitns: Number of nanoseconds between hardware timestamp being read and
+ *          host end time being seen
+ * @window: Difference of host_end and host_start
+ * @valid: Whether this timeset is valid
+ */
+struct efx_ptp_timeset {
+       u32 host_start;
+       u32 seconds;
+       u32 nanoseconds;
+       u32 host_end;
+       u32 waitns;
+       u32 window;     /* Derived: end - start, allowing for wrap */
+};
+
+/**
+ * struct efx_ptp_data - Precision Time Protocol (PTP) state
+ * @channel: The PTP channel
+ * @rxq: Receive queue (awaiting timestamps)
+ * @txq: Transmit queue
+ * @evt_list: List of MC receive events awaiting packets
+ * @evt_free_list: List of free events
+ * @evt_lock: Lock for manipulating evt_list and evt_free_list
+ * @rx_evts: Instantiated events (on evt_list and evt_free_list)
+ * @workwq: Work queue for processing pending PTP operations
+ * @work: Work task
+ * @reset_required: A serious error has occurred and the PTP task needs to be
+ *                  reset (disable, enable).
+ * @rxfilter_event: Receive filter when operating
+ * @rxfilter_general: Receive filter when operating
+ * @config: Current timestamp configuration
+ * @enabled: PTP operation enabled
+ * @mode: Mode in which PTP operating (PTP version)
+ * @evt_frags: Partly assembled PTP events
+ * @evt_frag_idx: Current fragment number
+ * @evt_code: Last event code
+ * @start: Address at which MC indicates ready for synchronisation
+ * @host_time_pps: Host time at last PPS
+ * @last_sync_ns: Last number of nanoseconds between readings when synchronising
+ * @base_sync_ns: Number of nanoseconds for last synchronisation.
+ * @base_sync_valid: Whether base_sync_time is valid.
+ * @current_adjfreq: Current ppb adjustment.
+ * @phc_clock: Pointer to registered phc device
+ * @phc_clock_info: Registration structure for phc device
+ * @pps_work: pps work task for handling pps events
+ * @pps_workwq: pps work queue
+ * @nic_ts_enabled: Flag indicating if NIC generated TS events are handled
+ * @txbuf: Buffer for use when transmitting (PTP) packets to MC (avoids
+ *         allocations in main data path).
+ * @debug_ptp_dir: PTP debugfs directory
+ * @missed_rx_sync: Number of packets received without syncrhonisation.
+ * @good_syncs: Number of successful synchronisations.
+ * @no_time_syncs: Number of synchronisations with no good times.
+ * @bad_sync_durations: Number of synchronisations with bad durations.
+ * @bad_syncs: Number of failed synchronisations.
+ * @last_sync_time: Number of nanoseconds for last synchronisation.
+ * @sync_timeouts: Number of synchronisation timeouts
+ * @fast_syncs: Number of synchronisations requiring short delay
+ * @min_sync_delta: Minimum time between event and synchronisation
+ * @max_sync_delta: Maximum time between event and synchronisation
+ * @average_sync_delta: Average time between event and synchronisation.
+ *                      Modified moving average.
+ * @last_sync_delta: Last time between event and synchronisation
+ * @mc_stats: Context value for MC statistics
+ * @timeset: Last set of synchronisation statistics.
+ */
+struct efx_ptp_data {
+       struct efx_channel *channel;
+       struct sk_buff_head rxq;
+       struct sk_buff_head txq;
+       struct list_head evt_list;
+       struct list_head evt_free_list;
+       spinlock_t evt_lock;
+       struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS];
+       struct workqueue_struct *workwq;
+       struct work_struct work;
+       bool reset_required;
+       u32 rxfilter_event;
+       u32 rxfilter_general;
+       bool rxfilter_installed;
+       struct hwtstamp_config config;
+       bool enabled;
+       unsigned int mode;
+       efx_qword_t evt_frags[MAX_EVENT_FRAGS];
+       int evt_frag_idx;
+       int evt_code;
+       struct efx_buffer start;
+       struct pps_event_time host_time_pps;
+       unsigned last_sync_ns;
+       unsigned base_sync_ns;
+       bool base_sync_valid;
+       s64 current_adjfreq;
+       struct ptp_clock *phc_clock;
+       struct ptp_clock_info phc_clock_info;
+       struct work_struct pps_work;
+       struct workqueue_struct *pps_workwq;
+       bool nic_ts_enabled;
+       u8 txbuf[ALIGN(MC_CMD_PTP_IN_TRANSMIT_LEN(
+                              MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM), 4)];
+       struct efx_ptp_timeset
+       timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM];
+};
+
+static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta);
+static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta);
+static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts);
+static int efx_phc_settime(struct ptp_clock_info *ptp,
+                          const struct timespec *e_ts);
+static int efx_phc_enable(struct ptp_clock_info *ptp,
+                         struct ptp_clock_request *request, int on);
+
+/* Enable MCDI PTP support. */
+static int efx_ptp_enable(struct efx_nic *efx)
+{
+       u8 inbuf[MC_CMD_PTP_IN_ENABLE_LEN];
+
+       MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE);
+       MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE,
+                      efx->ptp_data->channel->channel);
+       MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode);
+
+       return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+                           NULL, 0, NULL);
+}
+
+/* Disable MCDI PTP support.
+ *
+ * Note that this function should never rely on the presence of ptp_data -
+ * may be called before that exists.
+ */
+static int efx_ptp_disable(struct efx_nic *efx)
+{
+       u8 inbuf[MC_CMD_PTP_IN_DISABLE_LEN];
+
+       MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE);
+       return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+                           NULL, 0, NULL);
+}
+
+static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q)
+{
+       struct sk_buff *skb;
+
+       while ((skb = skb_dequeue(q))) {
+               local_bh_disable();
+               netif_receive_skb(skb);
+               local_bh_enable();
+       }
+}
+
+static void efx_ptp_handle_no_channel(struct efx_nic *efx)
+{
+       netif_err(efx, drv, efx->net_dev,
+                 "ERROR: PTP requires MSI-X and 1 additional interrupt"
+                 "vector. PTP disabled\n");
+}
+
+/* Repeatedly send the host time to the MC which will capture the hardware
+ * time.
+ */
+static void efx_ptp_send_times(struct efx_nic *efx,
+                              struct pps_event_time *last_time)
+{
+       struct pps_event_time now;
+       struct timespec limit;
+       struct efx_ptp_data *ptp = efx->ptp_data;
+       struct timespec start;
+       int *mc_running = ptp->start.addr;
+
+       pps_get_ts(&now);
+       start = now.ts_real;
+       limit = now.ts_real;
+       timespec_add_ns(&limit, SYNCHRONISE_PERIOD_NS);
+
+       /* Write host time for specified period or until MC is done */
+       while ((timespec_compare(&now.ts_real, &limit) < 0) &&
+              ACCESS_ONCE(*mc_running)) {
+               struct timespec update_time;
+               unsigned int host_time;
+
+               /* Don't update continuously to avoid saturating the PCIe bus */
+               update_time = now.ts_real;
+               timespec_add_ns(&update_time, SYNCHRONISATION_GRANULARITY_NS);
+               do {
+                       pps_get_ts(&now);
+               } while ((timespec_compare(&now.ts_real, &update_time) < 0) &&
+                        ACCESS_ONCE(*mc_running));
+
+               /* Synchronise NIC with single word of time only */
+               host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS |
+                            now.ts_real.tv_nsec);
+               /* Update host time in NIC memory */
+               _efx_writed(efx, cpu_to_le32(host_time),
+                           FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
+       }
+       *last_time = now;
+}
+
+/* Read a timeset from the MC's results and partial process. */
+static void efx_ptp_read_timeset(u8 *data, struct efx_ptp_timeset *timeset)
+{
+       unsigned start_ns, end_ns;
+
+       timeset->host_start = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTSTART);
+       timeset->seconds = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_SECONDS);
+       timeset->nanoseconds = MCDI_DWORD(data,
+                                        PTP_OUT_SYNCHRONIZE_NANOSECONDS);
+       timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND),
+       timeset->waitns = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS);
+
+       /* Ignore seconds */
+       start_ns = timeset->host_start & MC_NANOSECOND_MASK;
+       end_ns = timeset->host_end & MC_NANOSECOND_MASK;
+       /* Allow for rollover */
+       if (end_ns < start_ns)
+               end_ns += NSEC_PER_SEC;
+       /* Determine duration of operation */
+       timeset->window = end_ns - start_ns;
+}
+
+/* Process times received from MC.
+ *
+ * Extract times from returned results, and establish the minimum value
+ * seen.  The minimum value represents the "best" possible time and events
+ * too much greater than this are rejected - the machine is, perhaps, too
+ * busy. A number of readings are taken so that, hopefully, at least one good
+ * synchronisation will be seen in the results.
+ */
+static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
+                                size_t response_length,
+                                const struct pps_event_time *last_time)
+{
+       unsigned number_readings = (response_length /
+                              MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN);
+       unsigned i;
+       unsigned min;
+       unsigned min_set = 0;
+       unsigned total;
+       unsigned ngood = 0;
+       unsigned last_good = 0;
+       struct efx_ptp_data *ptp = efx->ptp_data;
+       bool min_valid = false;
+       u32 last_sec;
+       u32 start_sec;
+       struct timespec delta;
+
+       if (number_readings == 0)
+               return -EAGAIN;
+
+       /* Find minimum value in this set of results, discarding clearly
+        * erroneous results.
+        */
+       for (i = 0; i < number_readings; i++) {
+               efx_ptp_read_timeset(synch_buf, &ptp->timeset[i]);
+               synch_buf += MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN;
+               if (ptp->timeset[i].window > SYNCHRONISATION_GRANULARITY_NS) {
+                       if (min_valid) {
+                               if (ptp->timeset[i].window < min_set)
+                                       min_set = ptp->timeset[i].window;
+                       } else {
+                               min_valid = true;
+                               min_set = ptp->timeset[i].window;
+                       }
+               }
+       }
+
+       if (min_valid) {
+               if (ptp->base_sync_valid && (min_set > ptp->base_sync_ns))
+                       min = ptp->base_sync_ns;
+               else
+                       min = min_set;
+       } else {
+               min = SYNCHRONISATION_GRANULARITY_NS;
+       }
+
+       /* Discard excessively long synchronise durations.  The MC times
+        * when it finishes reading the host time so the corrected window
+        * time should be fairly constant for a given platform.
+        */
+       total = 0;
+       for (i = 0; i < number_readings; i++)
+               if (ptp->timeset[i].window > ptp->timeset[i].waitns) {
+                       unsigned win;
+
+                       win = ptp->timeset[i].window - ptp->timeset[i].waitns;
+                       if (win >= MIN_SYNCHRONISATION_NS &&
+                           win < MAX_SYNCHRONISATION_NS) {
+                               total += ptp->timeset[i].window;
+                               ngood++;
+                               last_good = i;
+                       }
+               }
+
+       if (ngood == 0) {
+               netif_warn(efx, drv, efx->net_dev,
+                          "PTP no suitable synchronisations %dns %dns\n",
+                          ptp->base_sync_ns, min_set);
+               return -EAGAIN;
+       }
+
+       /* Average minimum this synchronisation */
+       ptp->last_sync_ns = DIV_ROUND_UP(total, ngood);
+       if (!ptp->base_sync_valid || (ptp->last_sync_ns < ptp->base_sync_ns)) {
+               ptp->base_sync_valid = true;
+               ptp->base_sync_ns = ptp->last_sync_ns;
+       }
+
+       /* Calculate delay from actual PPS to last_time */
+       delta.tv_nsec =
+               ptp->timeset[last_good].nanoseconds +
+               last_time->ts_real.tv_nsec -
+               (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK);
+
+       /* It is possible that the seconds rolled over between taking
+        * the start reading and the last value written by the host.  The
+        * timescales are such that a gap of more than one second is never
+        * expected.
+        */
+       start_sec = ptp->timeset[last_good].host_start >> MC_NANOSECOND_BITS;
+       last_sec = last_time->ts_real.tv_sec & MC_SECOND_MASK;
+       if (start_sec != last_sec) {
+               if (((start_sec + 1) & MC_SECOND_MASK) != last_sec) {
+                       netif_warn(efx, hw, efx->net_dev,
+                                  "PTP bad synchronisation seconds\n");
+                       return -EAGAIN;
+               } else {
+                       delta.tv_sec = 1;
+               }
+       } else {
+               delta.tv_sec = 0;
+       }
+
+       ptp->host_time_pps = *last_time;
+       pps_sub_ts(&ptp->host_time_pps, delta);
+
+       return 0;
+}
+
+/* Synchronize times between the host and the MC */
+static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
+{
+       struct efx_ptp_data *ptp = efx->ptp_data;
+       u8 synch_buf[MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX];
+       size_t response_length;
+       int rc;
+       unsigned long timeout;
+       struct pps_event_time last_time = {};
+       unsigned int loops = 0;
+       int *start = ptp->start.addr;
+
+       MCDI_SET_DWORD(synch_buf, PTP_IN_OP, MC_CMD_PTP_OP_SYNCHRONIZE);
+       MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_NUMTIMESETS,
+                      num_readings);
+       MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR_LO,
+                      (u32)ptp->start.dma_addr);
+       MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR_HI,
+                      (u32)((u64)ptp->start.dma_addr >> 32));
+
+       /* Clear flag that signals MC ready */
+       ACCESS_ONCE(*start) = 0;
+       efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
+                          MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
+
+       /* Wait for start from MCDI (or timeout) */
+       timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS);
+       while (!ACCESS_ONCE(*start) && (time_before(jiffies, timeout))) {
+               udelay(20);     /* Usually start MCDI execution quickly */
+               loops++;
+       }
+
+       if (ACCESS_ONCE(*start))
+               efx_ptp_send_times(efx, &last_time);
+
+       /* Collect results */
+       rc = efx_mcdi_rpc_finish(efx, MC_CMD_PTP,
+                                MC_CMD_PTP_IN_SYNCHRONIZE_LEN,
+                                synch_buf, sizeof(synch_buf),
+                                &response_length);
+       if (rc == 0)
+               rc = efx_ptp_process_times(efx, synch_buf, response_length,
+                                          &last_time);
+
+       return rc;
+}
+
+/* Transmit a PTP packet, via the MCDI interface, to the wire. */
+static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
+{
+       u8 *txbuf = efx->ptp_data->txbuf;
+       struct skb_shared_hwtstamps timestamps;
+       int rc = -EIO;
+       /* MCDI driver requires word aligned lengths */
+       size_t len = ALIGN(MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len), 4);
+       u8 txtime[MC_CMD_PTP_OUT_TRANSMIT_LEN];
+
+       MCDI_SET_DWORD(txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT);
+       MCDI_SET_DWORD(txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len);
+       if (skb_shinfo(skb)->nr_frags != 0) {
+               rc = skb_linearize(skb);
+               if (rc != 0)
+                       goto fail;
+       }
+
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               rc = skb_checksum_help(skb);
+               if (rc != 0)
+                       goto fail;
+       }
+       skb_copy_from_linear_data(skb,
+                                 &txbuf[MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST],
+                                 len);
+       rc = efx_mcdi_rpc(efx, MC_CMD_PTP, txbuf, len, txtime,
+                         sizeof(txtime), &len);
+       if (rc != 0)
+               goto fail;
+
+       memset(&timestamps, 0, sizeof(timestamps));
+       timestamps.hwtstamp = ktime_set(
+               MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_SECONDS),
+               MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_NANOSECONDS));
+
+       skb_tstamp_tx(skb, &timestamps);
+
+       rc = 0;
+
+fail:
+       dev_kfree_skb(skb);
+
+       return rc;
+}
+
+static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
+{
+       struct efx_ptp_data *ptp = efx->ptp_data;
+       struct list_head *cursor;
+       struct list_head *next;
+
+       /* Drop time-expired events */
+       spin_lock_bh(&ptp->evt_lock);
+       if (!list_empty(&ptp->evt_list)) {
+               list_for_each_safe(cursor, next, &ptp->evt_list) {
+                       struct efx_ptp_event_rx *evt;
+
+                       evt = list_entry(cursor, struct efx_ptp_event_rx,
+                                        link);
+                       if (time_after(jiffies, evt->expiry)) {
+                               list_del(&evt->link);
+                               list_add(&evt->link, &ptp->evt_free_list);
+                               netif_warn(efx, hw, efx->net_dev,
+                                          "PTP rx event dropped\n");
+                       }
+               }
+       }
+       spin_unlock_bh(&ptp->evt_lock);
+}
+
+static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
+                                             struct sk_buff *skb)
+{
+       struct efx_ptp_data *ptp = efx->ptp_data;
+       bool evts_waiting;
+       struct list_head *cursor;
+       struct list_head *next;
+       struct efx_ptp_match *match;
+       enum ptp_packet_state rc = PTP_PACKET_STATE_UNMATCHED;
+
+       spin_lock_bh(&ptp->evt_lock);
+       evts_waiting = !list_empty(&ptp->evt_list);
+       spin_unlock_bh(&ptp->evt_lock);
+
+       if (!evts_waiting)
+               return PTP_PACKET_STATE_UNMATCHED;
+
+       match = (struct efx_ptp_match *)skb->cb;
+       /* Look for a matching timestamp in the event queue */
+       spin_lock_bh(&ptp->evt_lock);
+       list_for_each_safe(cursor, next, &ptp->evt_list) {
+               struct efx_ptp_event_rx *evt;
+
+               evt = list_entry(cursor, struct efx_ptp_event_rx, link);
+               if ((evt->seq0 == match->words[0]) &&
+                   (evt->seq1 == match->words[1])) {
+                       struct skb_shared_hwtstamps *timestamps;
+
+                       /* Match - add in hardware timestamp */
+                       timestamps = skb_hwtstamps(skb);
+                       timestamps->hwtstamp = evt->hwtimestamp;
+
+                       match->state = PTP_PACKET_STATE_MATCHED;
+                       rc = PTP_PACKET_STATE_MATCHED;
+                       list_del(&evt->link);
+                       list_add(&evt->link, &ptp->evt_free_list);
+                       break;
+               }
+       }
+       spin_unlock_bh(&ptp->evt_lock);
+
+       return rc;
+}
+
+/* Process any queued receive events and corresponding packets
+ *
+ * q is returned with all the packets that are ready for delivery.
+ * true is returned if at least one of those packets requires
+ * synchronisation.
+ */
+static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
+{
+       struct efx_ptp_data *ptp = efx->ptp_data;
+       bool rc = false;
+       struct sk_buff *skb;
+
+       while ((skb = skb_dequeue(&ptp->rxq))) {
+               struct efx_ptp_match *match;
+
+               match = (struct efx_ptp_match *)skb->cb;
+               if (match->state == PTP_PACKET_STATE_MATCH_UNWANTED) {
+                       __skb_queue_tail(q, skb);
+               } else if (efx_ptp_match_rx(efx, skb) ==
+                          PTP_PACKET_STATE_MATCHED) {
+                       rc = true;
+                       __skb_queue_tail(q, skb);
+               } else if (time_after(jiffies, match->expiry)) {
+                       match->state = PTP_PACKET_STATE_TIMED_OUT;
+                       netif_warn(efx, rx_err, efx->net_dev,
+                                  "PTP packet - no timestamp seen\n");
+                       __skb_queue_tail(q, skb);
+               } else {
+                       /* Replace unprocessed entry and stop */
+                       skb_queue_head(&ptp->rxq, skb);
+                       break;
+               }
+       }
+
+       return rc;
+}
+
+/* Complete processing of a received packet */
+static inline void efx_ptp_process_rx(struct efx_nic *efx, struct sk_buff *skb)
+{
+       local_bh_disable();
+       netif_receive_skb(skb);
+       local_bh_enable();
+}
+
+static int efx_ptp_start(struct efx_nic *efx)
+{
+       struct efx_ptp_data *ptp = efx->ptp_data;
+       struct efx_filter_spec rxfilter;
+       int rc;
+
+       ptp->reset_required = false;
+
+       /* Must filter on both event and general ports to ensure
+        * that there is no packet re-ordering.
+        */
+       efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0,
+                          efx_rx_queue_index(
+                                  efx_channel_get_rx_queue(ptp->channel)));
+       rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP,
+                                      htonl(PTP_ADDRESS),
+                                      htons(PTP_EVENT_PORT));
+       if (rc != 0)
+               return rc;
+
+       rc = efx_filter_insert_filter(efx, &rxfilter, true);
+       if (rc < 0)
+               return rc;
+       ptp->rxfilter_event = rc;
+
+       efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0,
+                          efx_rx_queue_index(
+                                  efx_channel_get_rx_queue(ptp->channel)));
+       rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP,
+                                      htonl(PTP_ADDRESS),
+                                      htons(PTP_GENERAL_PORT));
+       if (rc != 0)
+               goto fail;
+
+       rc = efx_filter_insert_filter(efx, &rxfilter, true);
+       if (rc < 0)
+               goto fail;
+       ptp->rxfilter_general = rc;
+
+       rc = efx_ptp_enable(efx);
+       if (rc != 0)
+               goto fail2;
+
+       ptp->evt_frag_idx = 0;
+       ptp->current_adjfreq = 0;
+       ptp->rxfilter_installed = true;
+
+       return 0;
+
+fail2:
+       efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+                                 ptp->rxfilter_general);
+fail:
+       efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+                                 ptp->rxfilter_event);
+
+       return rc;
+}
+
+static int efx_ptp_stop(struct efx_nic *efx)
+{
+       struct efx_ptp_data *ptp = efx->ptp_data;
+       int rc = efx_ptp_disable(efx);
+       struct list_head *cursor;
+       struct list_head *next;
+
+       if (ptp->rxfilter_installed) {
+               efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+                                         ptp->rxfilter_general);
+               efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+                                         ptp->rxfilter_event);
+               ptp->rxfilter_installed = false;
+       }
+
+       /* Make sure RX packets are really delivered */
+       efx_ptp_deliver_rx_queue(&efx->ptp_data->rxq);
+       skb_queue_purge(&efx->ptp_data->txq);
+
+       /* Drop any pending receive events */
+       spin_lock_bh(&efx->ptp_data->evt_lock);
+       list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) {
+               list_del(cursor);
+               list_add(cursor, &efx->ptp_data->evt_free_list);
+       }
+       spin_unlock_bh(&efx->ptp_data->evt_lock);
+
+       return rc;
+}
+
+static void efx_ptp_pps_worker(struct work_struct *work)
+{
+       struct efx_ptp_data *ptp =
+               container_of(work, struct efx_ptp_data, pps_work);
+       struct efx_nic *efx = ptp->channel->efx;
+       struct ptp_clock_event ptp_evt;
+
+       if (efx_ptp_synchronize(efx, PTP_SYNC_ATTEMPTS))
+               return;
+
+       ptp_evt.type = PTP_CLOCK_PPSUSR;
+       ptp_evt.pps_times = ptp->host_time_pps;
+       ptp_clock_event(ptp->phc_clock, &ptp_evt);
+}
+
+/* Process any pending transmissions and timestamp any received packets.
+ */
+static void efx_ptp_worker(struct work_struct *work)
+{
+       struct efx_ptp_data *ptp_data =
+               container_of(work, struct efx_ptp_data, work);
+       struct efx_nic *efx = ptp_data->channel->efx;
+       struct sk_buff *skb;
+       struct sk_buff_head tempq;
+
+       if (ptp_data->reset_required) {
+               efx_ptp_stop(efx);
+               efx_ptp_start(efx);
+               return;
+       }
+
+       efx_ptp_drop_time_expired_events(efx);
+
+       __skb_queue_head_init(&tempq);
+       if (efx_ptp_process_events(efx, &tempq) ||
+           !skb_queue_empty(&ptp_data->txq)) {
+
+               while ((skb = skb_dequeue(&ptp_data->txq)))
+                       efx_ptp_xmit_skb(efx, skb);
+       }
+
+       while ((skb = __skb_dequeue(&tempq)))
+               efx_ptp_process_rx(efx, skb);
+}
+
+/* Initialise PTP channel and state.
+ *
+ * Setting core_index to zero causes the queue to be initialised and doesn't
+ * overlap with 'rxq0' because ptp.c doesn't use skb_record_rx_queue.
+ */
+static int efx_ptp_probe_channel(struct efx_channel *channel)
+{
+       struct efx_nic *efx = channel->efx;
+       struct efx_ptp_data *ptp;
+       int rc = 0;
+       unsigned int pos;
+
+       channel->irq_moderation = 0;
+       channel->rx_queue.core_index = 0;
+
+       ptp = kzalloc(sizeof(struct efx_ptp_data), GFP_KERNEL);
+       efx->ptp_data = ptp;
+       if (!efx->ptp_data)
+               return -ENOMEM;
+
+       rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int));
+       if (rc != 0)
+               goto fail1;
+
+       ptp->channel = channel;
+       skb_queue_head_init(&ptp->rxq);
+       skb_queue_head_init(&ptp->txq);
+       ptp->workwq = create_singlethread_workqueue("sfc_ptp");
+       if (!ptp->workwq) {
+               rc = -ENOMEM;
+               goto fail2;
+       }
+
+       INIT_WORK(&ptp->work, efx_ptp_worker);
+       ptp->config.flags = 0;
+       ptp->config.tx_type = HWTSTAMP_TX_OFF;
+       ptp->config.rx_filter = HWTSTAMP_FILTER_NONE;
+       INIT_LIST_HEAD(&ptp->evt_list);
+       INIT_LIST_HEAD(&ptp->evt_free_list);
+       spin_lock_init(&ptp->evt_lock);
+       for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++)
+               list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list);
+
+       ptp->phc_clock_info.owner = THIS_MODULE;
+       snprintf(ptp->phc_clock_info.name,
+                sizeof(ptp->phc_clock_info.name),
+                "%pm", efx->net_dev->perm_addr);
+       ptp->phc_clock_info.max_adj = MAX_PPB;
+       ptp->phc_clock_info.n_alarm = 0;
+       ptp->phc_clock_info.n_ext_ts = 0;
+       ptp->phc_clock_info.n_per_out = 0;
+       ptp->phc_clock_info.pps = 1;
+       ptp->phc_clock_info.adjfreq = efx_phc_adjfreq;
+       ptp->phc_clock_info.adjtime = efx_phc_adjtime;
+       ptp->phc_clock_info.gettime = efx_phc_gettime;
+       ptp->phc_clock_info.settime = efx_phc_settime;
+       ptp->phc_clock_info.enable = efx_phc_enable;
+
+       ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info,
+                                           &efx->pci_dev->dev);
+       if (!ptp->phc_clock)
+               goto fail3;
+
+       INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
+       ptp->pps_workwq = create_singlethread_workqueue("sfc_pps");
+       if (!ptp->pps_workwq) {
+               rc = -ENOMEM;
+               goto fail4;
+       }
+       ptp->nic_ts_enabled = false;
+
+       return 0;
+fail4:
+       ptp_clock_unregister(efx->ptp_data->phc_clock);
+
+fail3:
+       destroy_workqueue(efx->ptp_data->workwq);
+
+fail2:
+       efx_nic_free_buffer(efx, &ptp->start);
+
+fail1:
+       kfree(efx->ptp_data);
+       efx->ptp_data = NULL;
+
+       return rc;
+}
+
+static void efx_ptp_remove_channel(struct efx_channel *channel)
+{
+       struct efx_nic *efx = channel->efx;
+
+       if (!efx->ptp_data)
+               return;
+
+       (void)efx_ptp_disable(channel->efx);
+
+       cancel_work_sync(&efx->ptp_data->work);
+       cancel_work_sync(&efx->ptp_data->pps_work);
+
+       skb_queue_purge(&efx->ptp_data->rxq);
+       skb_queue_purge(&efx->ptp_data->txq);
+
+       ptp_clock_unregister(efx->ptp_data->phc_clock);
+
+       destroy_workqueue(efx->ptp_data->workwq);
+       destroy_workqueue(efx->ptp_data->pps_workwq);
+
+       efx_nic_free_buffer(efx, &efx->ptp_data->start);
+       kfree(efx->ptp_data);
+}
+
+static void efx_ptp_get_channel_name(struct efx_channel *channel,
+                                    char *buf, size_t len)
+{
+       snprintf(buf, len, "%s-ptp", channel->efx->name);
+}
+
+/* Determine whether this packet should be processed by the PTP module
+ * or transmitted conventionally.
+ */
+bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
+{
+       return efx->ptp_data &&
+               efx->ptp_data->enabled &&
+               skb->len >= PTP_MIN_LENGTH &&
+               skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM &&
+               likely(skb->protocol == htons(ETH_P_IP)) &&
+               ip_hdr(skb)->protocol == IPPROTO_UDP &&
+               udp_hdr(skb)->dest == htons(PTP_EVENT_PORT);
+}
+
+/* Receive a PTP packet.  Packets are queued until the arrival of
+ * the receive timestamp from the MC - this will probably occur after the
+ * packet arrival because of the processing in the MC.
+ */
+static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
+{
+       struct efx_nic *efx = channel->efx;
+       struct efx_ptp_data *ptp = efx->ptp_data;
+       struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb;
+       u8 *data;
+       unsigned int version;
+
+       match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
+
+       /* Correct version? */
+       if (ptp->mode == MC_CMD_PTP_MODE_V1) {
+               if (skb->len < PTP_V1_MIN_LENGTH) {
+                       netif_receive_skb(skb);
+                       return;
+               }
+               version = ntohs(*(__be16 *)&skb->data[PTP_V1_VERSION_OFFSET]);
+               if (version != PTP_VERSION_V1) {
+                       netif_receive_skb(skb);
+                       return;
+               }
+       } else {
+               if (skb->len < PTP_V2_MIN_LENGTH) {
+                       netif_receive_skb(skb);
+                       return;
+               }
+               version = skb->data[PTP_V2_VERSION_OFFSET];
+
+               BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2);
+               BUILD_BUG_ON(PTP_V1_UUID_OFFSET != PTP_V2_MC_UUID_OFFSET);
+               BUILD_BUG_ON(PTP_V1_UUID_LENGTH != PTP_V2_MC_UUID_LENGTH);
+               BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET);
+               BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH);
+
+               if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) {
+                       netif_receive_skb(skb);
+                       return;
+               }
+       }
+
+       /* Does this packet require timestamping? */
+       if (ntohs(*(__be16 *)&skb->data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) {
+               struct skb_shared_hwtstamps *timestamps;
+
+               match->state = PTP_PACKET_STATE_UNMATCHED;
+
+               /* Clear all timestamps held: filled in later */
+               timestamps = skb_hwtstamps(skb);
+               memset(timestamps, 0, sizeof(*timestamps));
+
+               /* Extract UUID/Sequence information */
+               data = skb->data + PTP_V1_UUID_OFFSET;
+               match->words[0] = (data[0]         |
+                                  (data[1] << 8)  |
+                                  (data[2] << 16) |
+                                  (data[3] << 24));
+               match->words[1] = (data[4]         |
+                                  (data[5] << 8)  |
+                                  (skb->data[PTP_V1_SEQUENCE_OFFSET +
+                                             PTP_V1_SEQUENCE_LENGTH - 1] <<
+                                   16));
+       } else {
+               match->state = PTP_PACKET_STATE_MATCH_UNWANTED;
+       }
+
+       skb_queue_tail(&ptp->rxq, skb);
+       queue_work(ptp->workwq, &ptp->work);
+}
+
+/* Transmit a PTP packet.  This has to be transmitted by the MC
+ * itself, through an MCDI call.  MCDI calls aren't permitted
+ * in the transmit path so defer the actual transmission to a suitable worker.
+ */
+int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
+{
+       struct efx_ptp_data *ptp = efx->ptp_data;
+
+       skb_queue_tail(&ptp->txq, skb);
+
+       if ((udp_hdr(skb)->dest == htons(PTP_EVENT_PORT)) &&
+           (skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM))
+               efx_xmit_hwtstamp_pending(skb);
+       queue_work(ptp->workwq, &ptp->work);
+
+       return NETDEV_TX_OK;
+}
+
+static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
+                              unsigned int new_mode)
+{
+       if ((enable_wanted != efx->ptp_data->enabled) ||
+           (enable_wanted && (efx->ptp_data->mode != new_mode))) {
+               int rc;
+
+               if (enable_wanted) {
+                       /* Change of mode requires disable */
+                       if (efx->ptp_data->enabled &&
+                           (efx->ptp_data->mode != new_mode)) {
+                               efx->ptp_data->enabled = false;
+                               rc = efx_ptp_stop(efx);
+                               if (rc != 0)
+                                       return rc;
+                       }
+
+                       /* Set new operating mode and establish
+                        * baseline synchronisation, which must
+                        * succeed.
+                        */
+                       efx->ptp_data->mode = new_mode;
+                       rc = efx_ptp_start(efx);
+                       if (rc == 0) {
+                               rc = efx_ptp_synchronize(efx,
+                                                        PTP_SYNC_ATTEMPTS * 2);
+                               if (rc != 0)
+                                       efx_ptp_stop(efx);
+                       }
+               } else {
+                       rc = efx_ptp_stop(efx);
+               }
+
+               if (rc != 0)
+                       return rc;
+
+               efx->ptp_data->enabled = enable_wanted;
+       }
+
+       return 0;
+}
+
+static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
+{
+       bool enable_wanted = false;
+       unsigned int new_mode;
+       int rc;
+
+       if (init->flags)
+               return -EINVAL;
+
+       if ((init->tx_type != HWTSTAMP_TX_OFF) &&
+           (init->tx_type != HWTSTAMP_TX_ON))
+               return -ERANGE;
+
+       new_mode = efx->ptp_data->mode;
+       /* Determine whether any PTP HW operations are required */
+       switch (init->rx_filter) {
+       case HWTSTAMP_FILTER_NONE:
+               break;
+       case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+       case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+       case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+               init->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+               new_mode = MC_CMD_PTP_MODE_V1;
+               enable_wanted = true;
+               break;
+       case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+       /* Although these three are accepted only IPV4 packets will be
+        * timestamped
+        */
+               init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+               new_mode = MC_CMD_PTP_MODE_V2;
+               enable_wanted = true;
+               break;
+       case HWTSTAMP_FILTER_PTP_V2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+               /* Non-IP + IPv6 timestamping not supported */
+               return -ERANGE;
+               break;
+       default:
+               return -ERANGE;
+       }
+
+       if (init->tx_type != HWTSTAMP_TX_OFF)
+               enable_wanted = true;
+
+       rc = efx_ptp_change_mode(efx, enable_wanted, new_mode);
+       if (rc != 0)
+               return rc;
+
+       efx->ptp_data->config = *init;
+
+       return 0;
+}
+
+int
+efx_ptp_get_ts_info(struct net_device *net_dev, struct ethtool_ts_info *ts_info)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+       struct efx_ptp_data *ptp = efx->ptp_data;
+
+       if (!ptp)
+               return -EOPNOTSUPP;
+
+       ts_info->so_timestamping = (SOF_TIMESTAMPING_TX_HARDWARE |
+                                   SOF_TIMESTAMPING_RX_HARDWARE |
+                                   SOF_TIMESTAMPING_RAW_HARDWARE);
+       ts_info->phc_index = ptp_clock_index(ptp->phc_clock);
+       ts_info->tx_types = 1 << HWTSTAMP_TX_OFF | 1 << HWTSTAMP_TX_ON;
+       ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE |
+                              1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
+                              1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC |
+                              1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ |
+                              1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT |
+                              1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC |
+                              1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+       return 0;
+}
+
+int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd)
+{
+       struct hwtstamp_config config;
+       int rc;
+
+       /* Not a PTP enabled port */
+       if (!efx->ptp_data)
+               return -EOPNOTSUPP;
+
+       if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+               return -EFAULT;
+
+       rc = efx_ptp_ts_init(efx, &config);
+       if (rc != 0)
+               return rc;
+
+       return copy_to_user(ifr->ifr_data, &config, sizeof(config))
+               ? -EFAULT : 0;
+}
+
+static void ptp_event_failure(struct efx_nic *efx, int expected_frag_len)
+{
+       struct efx_ptp_data *ptp = efx->ptp_data;
+
+       netif_err(efx, hw, efx->net_dev,
+               "PTP unexpected event length: got %d expected %d\n",
+               ptp->evt_frag_idx, expected_frag_len);
+       ptp->reset_required = true;
+       queue_work(ptp->workwq, &ptp->work);
+}
+
+/* Process a completed receive event.  Put it on the event queue and
+ * start worker thread.  This is required because event and their
+ * correspoding packets may come in either order.
+ */
+static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
+{
+       struct efx_ptp_event_rx *evt = NULL;
+
+       if (ptp->evt_frag_idx != 3) {
+               ptp_event_failure(efx, 3);
+               return;
+       }
+
+       spin_lock_bh(&ptp->evt_lock);
+       if (!list_empty(&ptp->evt_free_list)) {
+               evt = list_first_entry(&ptp->evt_free_list,
+                                      struct efx_ptp_event_rx, link);
+               list_del(&evt->link);
+
+               evt->seq0 = EFX_QWORD_FIELD(ptp->evt_frags[2], MCDI_EVENT_DATA);
+               evt->seq1 = (EFX_QWORD_FIELD(ptp->evt_frags[2],
+                                            MCDI_EVENT_SRC)        |
+                            (EFX_QWORD_FIELD(ptp->evt_frags[1],
+                                             MCDI_EVENT_SRC) << 8) |
+                            (EFX_QWORD_FIELD(ptp->evt_frags[0],
+                                             MCDI_EVENT_SRC) << 16));
+               evt->hwtimestamp = ktime_set(
+                       EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA),
+                       EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA));
+               evt->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
+               list_add_tail(&evt->link, &ptp->evt_list);
+
+               queue_work(ptp->workwq, &ptp->work);
+       } else {
+               netif_err(efx, rx_err, efx->net_dev, "No free PTP event");
+       }
+       spin_unlock_bh(&ptp->evt_lock);
+}
+
+static void ptp_event_fault(struct efx_nic *efx, struct efx_ptp_data *ptp)
+{
+       int code = EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA);
+       if (ptp->evt_frag_idx != 1) {
+               ptp_event_failure(efx, 1);
+               return;
+       }
+
+       netif_err(efx, hw, efx->net_dev, "PTP error %d\n", code);
+}
+
+static void ptp_event_pps(struct efx_nic *efx, struct efx_ptp_data *ptp)
+{
+       if (ptp->nic_ts_enabled)
+               queue_work(ptp->pps_workwq, &ptp->pps_work);
+}
+
+void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
+{
+       struct efx_ptp_data *ptp = efx->ptp_data;
+       int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE);
+
+       if (!ptp->enabled)
+               return;
+
+       if (ptp->evt_frag_idx == 0) {
+               ptp->evt_code = code;
+       } else if (ptp->evt_code != code) {
+               netif_err(efx, hw, efx->net_dev,
+                         "PTP out of sequence event %d\n", code);
+               ptp->evt_frag_idx = 0;
+       }
+
+       ptp->evt_frags[ptp->evt_frag_idx++] = *ev;
+       if (!MCDI_EVENT_FIELD(*ev, CONT)) {
+               /* Process resulting event */
+               switch (code) {
+               case MCDI_EVENT_CODE_PTP_RX:
+                       ptp_event_rx(efx, ptp);
+                       break;
+               case MCDI_EVENT_CODE_PTP_FAULT:
+                       ptp_event_fault(efx, ptp);
+                       break;
+               case MCDI_EVENT_CODE_PTP_PPS:
+                       ptp_event_pps(efx, ptp);
+                       break;
+               default:
+                       netif_err(efx, hw, efx->net_dev,
+                                 "PTP unknown event %d\n", code);
+                       break;
+               }
+               ptp->evt_frag_idx = 0;
+       } else if (MAX_EVENT_FRAGS == ptp->evt_frag_idx) {
+               netif_err(efx, hw, efx->net_dev,
+                         "PTP too many event fragments\n");
+               ptp->evt_frag_idx = 0;
+       }
+}
+
+static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+{
+       struct efx_ptp_data *ptp_data = container_of(ptp,
+                                                    struct efx_ptp_data,
+                                                    phc_clock_info);
+       struct efx_nic *efx = ptp_data->channel->efx;
+       u8 inadj[MC_CMD_PTP_IN_ADJUST_LEN];
+       s64 adjustment_ns;
+       int rc;
+
+       if (delta > MAX_PPB)
+               delta = MAX_PPB;
+       else if (delta < -MAX_PPB)
+               delta = -MAX_PPB;
+
+       /* Convert ppb to fixed point ns. */
+       adjustment_ns = (((s64)delta * PPB_SCALE_WORD) >>
+                        (PPB_EXTRA_BITS + MAX_PPB_BITS));
+
+       MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
+       MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_FREQ_LO, (u32)adjustment_ns);
+       MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_FREQ_HI,
+                      (u32)(adjustment_ns >> 32));
+       MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_SECONDS, 0);
+       MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_NANOSECONDS, 0);
+       rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inadj, sizeof(inadj),
+                         NULL, 0, NULL);
+       if (rc != 0)
+               return rc;
+
+       ptp_data->current_adjfreq = delta;
+       return 0;
+}
+
+static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+       struct efx_ptp_data *ptp_data = container_of(ptp,
+                                                    struct efx_ptp_data,
+                                                    phc_clock_info);
+       struct efx_nic *efx = ptp_data->channel->efx;
+       struct timespec delta_ts = ns_to_timespec(delta);
+       u8 inbuf[MC_CMD_PTP_IN_ADJUST_LEN];
+
+       MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
+       MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_FREQ_LO, 0);
+       MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_FREQ_HI, 0);
+       MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec);
+       MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec);
+       return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+                           NULL, 0, NULL);
+}
+
+static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+       struct efx_ptp_data *ptp_data = container_of(ptp,
+                                                    struct efx_ptp_data,
+                                                    phc_clock_info);
+       struct efx_nic *efx = ptp_data->channel->efx;
+       u8 inbuf[MC_CMD_PTP_IN_READ_NIC_TIME_LEN];
+       u8 outbuf[MC_CMD_PTP_OUT_READ_NIC_TIME_LEN];
+       int rc;
+
+       MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME);
+
+       rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+                         outbuf, sizeof(outbuf), NULL);
+       if (rc != 0)
+               return rc;
+
+       ts->tv_sec = MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_SECONDS);
+       ts->tv_nsec = MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_NANOSECONDS);
+       return 0;
+}
+
+static int efx_phc_settime(struct ptp_clock_info *ptp,
+                          const struct timespec *e_ts)
+{
+       /* Get the current NIC time, efx_phc_gettime.
+        * Subtract from the desired time to get the offset
+        * call efx_phc_adjtime with the offset
+        */
+       int rc;
+       struct timespec time_now;
+       struct timespec delta;
+
+       rc = efx_phc_gettime(ptp, &time_now);
+       if (rc != 0)
+               return rc;
+
+       delta = timespec_sub(*e_ts, time_now);
+
+       efx_phc_adjtime(ptp, timespec_to_ns(&delta));
+       if (rc != 0)
+               return rc;
+
+       return 0;
+}
+
+static int efx_phc_enable(struct ptp_clock_info *ptp,
+                         struct ptp_clock_request *request,
+                         int enable)
+{
+       struct efx_ptp_data *ptp_data = container_of(ptp,
+                                                    struct efx_ptp_data,
+                                                    phc_clock_info);
+       if (request->type != PTP_CLK_REQ_PPS)
+               return -EOPNOTSUPP;
+
+       ptp_data->nic_ts_enabled = !!enable;
+       return 0;
+}
+
+static const struct efx_channel_type efx_ptp_channel_type = {
+       .handle_no_channel      = efx_ptp_handle_no_channel,
+       .pre_probe              = efx_ptp_probe_channel,
+       .post_remove            = efx_ptp_remove_channel,
+       .get_name               = efx_ptp_get_channel_name,
+       /* no copy operation; there is no need to reallocate this channel */
+       .receive_skb            = efx_ptp_rx,
+       .keep_eventq            = false,
+};
+
+void efx_ptp_probe(struct efx_nic *efx)
+{
+       /* Check whether PTP is implemented on this NIC.  The DISABLE
+        * operation will succeed if and only if it is implemented.
+        */
+       if (efx_ptp_disable(efx) == 0)
+               efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] =
+                       &efx_ptp_channel_type;
+}
index 719319b89d7a8086f315559a1c68cfe28a82acd3..9e0ad1b75c335c0bf014fd1b47962c6719705767 100644 (file)
@@ -479,7 +479,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
                skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
                                  CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
 
-               skb_record_rx_queue(skb, channel->channel);
+               skb_record_rx_queue(skb, channel->rx_queue.core_index);
 
                gro_result = napi_gro_frags(napi);
        } else {
@@ -571,8 +571,14 @@ static void efx_rx_deliver(struct efx_channel *channel,
        /* Set the SKB flags */
        skb_checksum_none_assert(skb);
 
+       /* Record the rx_queue */
+       skb_record_rx_queue(skb, channel->rx_queue.core_index);
+
        /* Pass the packet up */
-       netif_receive_skb(skb);
+       if (channel->type->receive_skb)
+               channel->type->receive_skb(channel, skb);
+       else
+               netif_receive_skb(skb);
 
        /* Update allocation strategy method */
        channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
@@ -608,13 +614,14 @@ void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf)
                 * at the ethernet header */
                skb->protocol = eth_type_trans(skb, efx->net_dev);
 
-               skb_record_rx_queue(skb, channel->channel);
+               skb_record_rx_queue(skb, channel->rx_queue.core_index);
        }
 
        if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
                rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
 
-       if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED)))
+       if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED)) &&
+           !channel->type->receive_skb)
                efx_rx_packet_gro(channel, rx_buf, eh);
        else
                efx_rx_deliver(channel, rx_buf);
@@ -624,6 +631,11 @@ void efx_rx_strategy(struct efx_channel *channel)
 {
        enum efx_rx_alloc_method method = rx_alloc_method;
 
+       if (channel->type->receive_skb) {
+               channel->rx_alloc_push_pages = false;
+               return;
+       }
+
        /* Only makes sense to use page based allocation if GRO is enabled */
        if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
                method = RX_ALLOC_METHOD_SKB;
index 96068d15b601f5afc285618ea6fecd86b7713caf..ce72ae4f399fdf3861262967aeb8222d5856bd85 100644 (file)
@@ -614,7 +614,8 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
 {
        enum efx_loopback_mode mode;
        struct efx_loopback_state *state;
-       struct efx_channel *channel = efx_get_channel(efx, 0);
+       struct efx_channel *channel =
+               efx_get_channel(efx, efx->tx_channel_offset);
        struct efx_tx_queue *tx_queue;
        int rc = 0;
 
index 6bafd216e55e6eb668d12b5e636f14975ae7f110..84b41bf08a38a22ad8e9f0954ec32631628b4ad7 100644 (file)
@@ -335,6 +335,7 @@ static int siena_probe_nic(struct efx_nic *efx)
                goto fail5;
 
        efx_sriov_probe(efx);
+       efx_ptp_probe(efx);
 
        return 0;
 
index 9cb3b84ecae99a24553f0bd039d5a5fdbaafbd8e..d49b53dc2a500a2602093a2df63daba5c5ad2538 100644 (file)
@@ -21,6 +21,9 @@
 /* Number of longs required to track all the VIs in a VF */
 #define VI_MASK_LENGTH BITS_TO_LONGS(1 << EFX_VI_SCALE_MAX)
 
+/* Maximum number of RX queues supported */
+#define VF_MAX_RX_QUEUES 63
+
 /**
  * enum efx_vf_tx_filter_mode - TX MAC filtering behaviour
  * @VF_TX_FILTER_OFF: Disabled
@@ -578,6 +581,7 @@ static int efx_vfdi_init_rxq(struct efx_vf *vf)
        efx_oword_t reg;
 
        if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_rxq) ||
+           vf_rxq >= VF_MAX_RX_QUEUES ||
            bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) {
                if (net_ratelimit())
                        netif_err(efx, hw, efx->net_dev,
@@ -683,6 +687,9 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
        __le32 *rxqs;
        int rc;
 
+       BUILD_BUG_ON(VF_MAX_RX_QUEUES >
+                    MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
+
        rxqs = kmalloc(count * sizeof(*rxqs), GFP_KERNEL);
        if (rxqs == NULL)
                return VFDI_RC_ENOMEM;
@@ -1028,6 +1035,7 @@ efx_sriov_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
 static const struct efx_channel_type efx_sriov_channel_type = {
        .handle_no_channel      = efx_sriov_handle_no_channel,
        .pre_probe              = efx_sriov_probe_channel,
+       .post_remove            = efx_channel_dummy_op_void,
        .get_name               = efx_sriov_get_channel_name,
        /* no copy operation; channel must not be reallocated */
        .keep_eventq            = true,
index 18713436b44345a110ed263d54c5411d1e93c6fa..5e090e54298e667a732c53ece789bb03e40aef5e 100644 (file)
 #include "nic.h"
 #include "workarounds.h"
 
-/*
- * TX descriptor ring full threshold
- *
- * The tx_queue descriptor ring fill-level must fall below this value
- * before we restart the netif queue
- */
-#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
-
 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
                               struct efx_tx_buffer *buffer,
                               unsigned int *pkts_compl,
@@ -39,67 +31,32 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
                struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
                dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
                                         buffer->unmap_len);
-               if (buffer->unmap_single)
+               if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
                        dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
                                         DMA_TO_DEVICE);
                else
                        dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
                                       DMA_TO_DEVICE);
                buffer->unmap_len = 0;
-               buffer->unmap_single = false;
        }
 
-       if (buffer->skb) {
+       if (buffer->flags & EFX_TX_BUF_SKB) {
                (*pkts_compl)++;
                (*bytes_compl) += buffer->skb->len;
                dev_kfree_skb_any((struct sk_buff *) buffer->skb);
-               buffer->skb = NULL;
                netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
                           "TX queue %d transmission id %x complete\n",
                           tx_queue->queue, tx_queue->read_count);
+       } else if (buffer->flags & EFX_TX_BUF_HEAP) {
+               kfree(buffer->heap_buf);
        }
-}
 
-/**
- * struct efx_tso_header - a DMA mapped buffer for packet headers
- * @next: Linked list of free ones.
- *     The list is protected by the TX queue lock.
- * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
- * @dma_addr: The DMA address of the header below.
- *
- * This controls the memory used for a TSO header.  Use TSOH_DATA()
- * to find the packet header data.  Use TSOH_SIZE() to calculate the
- * total size required for a given packet header length.  TSO headers
- * in the free list are exactly %TSOH_STD_SIZE bytes in size.
- */
-struct efx_tso_header {
-       union {
-               struct efx_tso_header *next;
-               size_t unmap_len;
-       };
-       dma_addr_t dma_addr;
-};
+       buffer->len = 0;
+       buffer->flags = 0;
+}
 
 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
                               struct sk_buff *skb);
-static void efx_fini_tso(struct efx_tx_queue *tx_queue);
-static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
-                              struct efx_tso_header *tsoh);
-
-static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
-                         struct efx_tx_buffer *buffer)
-{
-       if (buffer->tsoh) {
-               if (likely(!buffer->tsoh->unmap_len)) {
-                       buffer->tsoh->next = tx_queue->tso_headers_free;
-                       tx_queue->tso_headers_free = buffer->tsoh;
-               } else {
-                       efx_tsoh_heap_free(tx_queue, buffer->tsoh);
-               }
-               buffer->tsoh = NULL;
-       }
-}
-
 
 static inline unsigned
 efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
@@ -138,6 +95,56 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
        return max_descs;
 }
 
+/* Get partner of a TX queue, seen as part of the same net core queue */
+static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
+{
+       if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
+               return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
+       else
+               return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
+}
+
+static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
+{
+       /* We need to consider both queues that the net core sees as one */
+       struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
+       struct efx_nic *efx = txq1->efx;
+       unsigned int fill_level;
+
+       fill_level = max(txq1->insert_count - txq1->old_read_count,
+                        txq2->insert_count - txq2->old_read_count);
+       if (likely(fill_level < efx->txq_stop_thresh))
+               return;
+
+       /* We used the stale old_read_count above, which gives us a
+        * pessimistic estimate of the fill level (which may even
+        * validly be >= efx->txq_entries).  Now try again using
+        * read_count (more likely to be a cache miss).
+        *
+        * If we read read_count and then conditionally stop the
+        * queue, it is possible for the completion path to race with
+        * us and complete all outstanding descriptors in the middle,
+        * after which there will be no more completions to wake it.
+        * Therefore we stop the queue first, then read read_count
+        * (with a memory barrier to ensure the ordering), then
+        * restart the queue if the fill level turns out to be low
+        * enough.
+        */
+       netif_tx_stop_queue(txq1->core_txq);
+       smp_mb();
+       txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
+       txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
+
+       fill_level = max(txq1->insert_count - txq1->old_read_count,
+                        txq2->insert_count - txq2->old_read_count);
+       EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
+       if (likely(fill_level < efx->txq_stop_thresh)) {
+               smp_mb();
+               if (likely(!efx->loopback_selftest))
+                       netif_tx_start_queue(txq1->core_txq);
+       }
+}
+
 /*
  * Add a socket buffer to a TX queue
  *
@@ -151,7 +158,7 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
  * This function is split out from efx_hard_start_xmit to allow the
  * loopback test to direct packets via specific TX queues.
  *
- * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
+ * Returns NETDEV_TX_OK.
  * You must hold netif_tx_lock() to call this function.
  */
 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
@@ -160,12 +167,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
        struct device *dma_dev = &efx->pci_dev->dev;
        struct efx_tx_buffer *buffer;
        skb_frag_t *fragment;
-       unsigned int len, unmap_len = 0, fill_level, insert_ptr;
+       unsigned int len, unmap_len = 0, insert_ptr;
        dma_addr_t dma_addr, unmap_addr = 0;
        unsigned int dma_len;
-       bool unmap_single;
-       int q_space, i = 0;
-       netdev_tx_t rc = NETDEV_TX_OK;
+       unsigned short dma_flags;
+       int i = 0;
 
        EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
 
@@ -183,14 +189,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
                        return NETDEV_TX_OK;
        }
 
-       fill_level = tx_queue->insert_count - tx_queue->old_read_count;
-       q_space = efx->txq_entries - 1 - fill_level;
-
        /* Map for DMA.  Use dma_map_single rather than dma_map_page
         * since this is more efficient on machines with sparse
         * memory.
         */
-       unmap_single = true;
+       dma_flags = EFX_TX_BUF_MAP_SINGLE;
        dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
 
        /* Process all fragments */
@@ -205,39 +208,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
 
                /* Add to TX queue, splitting across DMA boundaries */
                do {
-                       if (unlikely(q_space-- <= 0)) {
-                               /* It might be that completions have
-                                * happened since the xmit path last
-                                * checked.  Update the xmit path's
-                                * copy of read_count.
-                                */
-                               netif_tx_stop_queue(tx_queue->core_txq);
-                               /* This memory barrier protects the
-                                * change of queue state from the access
-                                * of read_count. */
-                               smp_mb();
-                               tx_queue->old_read_count =
-                                       ACCESS_ONCE(tx_queue->read_count);
-                               fill_level = (tx_queue->insert_count
-                                             - tx_queue->old_read_count);
-                               q_space = efx->txq_entries - 1 - fill_level;
-                               if (unlikely(q_space-- <= 0)) {
-                                       rc = NETDEV_TX_BUSY;
-                                       goto unwind;
-                               }
-                               smp_mb();
-                               if (likely(!efx->loopback_selftest))
-                                       netif_tx_start_queue(
-                                               tx_queue->core_txq);
-                       }
-
                        insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
                        buffer = &tx_queue->buffer[insert_ptr];
-                       efx_tsoh_free(tx_queue, buffer);
-                       EFX_BUG_ON_PARANOID(buffer->tsoh);
-                       EFX_BUG_ON_PARANOID(buffer->skb);
+                       EFX_BUG_ON_PARANOID(buffer->flags);
                        EFX_BUG_ON_PARANOID(buffer->len);
-                       EFX_BUG_ON_PARANOID(!buffer->continuation);
                        EFX_BUG_ON_PARANOID(buffer->unmap_len);
 
                        dma_len = efx_max_tx_len(efx, dma_addr);
@@ -247,13 +221,14 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
                        /* Fill out per descriptor fields */
                        buffer->len = dma_len;
                        buffer->dma_addr = dma_addr;
+                       buffer->flags = EFX_TX_BUF_CONT;
                        len -= dma_len;
                        dma_addr += dma_len;
                        ++tx_queue->insert_count;
                } while (len);
 
                /* Transfer ownership of the unmapping to the final buffer */
-               buffer->unmap_single = unmap_single;
+               buffer->flags = EFX_TX_BUF_CONT | dma_flags;
                buffer->unmap_len = unmap_len;
                unmap_len = 0;
 
@@ -264,20 +239,22 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
                len = skb_frag_size(fragment);
                i++;
                /* Map for DMA */
-               unmap_single = false;
+               dma_flags = 0;
                dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
                                            DMA_TO_DEVICE);
        }
 
        /* Transfer ownership of the skb to the final buffer */
        buffer->skb = skb;
-       buffer->continuation = false;
+       buffer->flags = EFX_TX_BUF_SKB | dma_flags;
 
        netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
 
        /* Pass off to hardware */
        efx_nic_push_buffers(tx_queue);
 
+       efx_tx_maybe_stop_queue(tx_queue);
+
        return NETDEV_TX_OK;
 
  dma_err:
@@ -289,7 +266,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
        /* Mark the packet as transmitted, and free the SKB ourselves */
        dev_kfree_skb_any(skb);
 
- unwind:
        /* Work backwards until we hit the original insert pointer value */
        while (tx_queue->insert_count != tx_queue->write_count) {
                unsigned int pkts_compl = 0, bytes_compl = 0;
@@ -297,12 +273,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
                insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
                buffer = &tx_queue->buffer[insert_ptr];
                efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
-               buffer->len = 0;
        }
 
        /* Free the fragment we were mid-way through pushing */
        if (unmap_len) {
-               if (unmap_single)
+               if (dma_flags & EFX_TX_BUF_MAP_SINGLE)
                        dma_unmap_single(dma_dev, unmap_addr, unmap_len,
                                         DMA_TO_DEVICE);
                else
@@ -310,7 +285,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
                                       DMA_TO_DEVICE);
        }
 
-       return rc;
+       return NETDEV_TX_OK;
 }
 
 /* Remove packets from the TX queue
@@ -340,8 +315,6 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
                }
 
                efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
-               buffer->continuation = true;
-               buffer->len = 0;
 
                ++tx_queue->read_count;
                read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
@@ -366,6 +339,12 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
 
        EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
 
+       /* PTP "event" packet */
+       if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
+           unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
+               return efx_ptp_tx(efx, skb);
+       }
+
        index = skb_get_queue_mapping(skb);
        type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
        if (index >= efx->n_tx_channels) {
@@ -450,6 +429,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
 {
        unsigned fill_level;
        struct efx_nic *efx = tx_queue->efx;
+       struct efx_tx_queue *txq2;
        unsigned int pkts_compl = 0, bytes_compl = 0;
 
        EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
@@ -457,15 +437,18 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
        efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
        netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
 
-       /* See if we need to restart the netif queue.  This barrier
-        * separates the update of read_count from the test of the
-        * queue state. */
+       /* See if we need to restart the netif queue.  This memory
+        * barrier ensures that we write read_count (inside
+        * efx_dequeue_buffers()) before reading the queue status.
+        */
        smp_mb();
        if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
            likely(efx->port_enabled) &&
            likely(netif_device_present(efx->net_dev))) {
-               fill_level = tx_queue->insert_count - tx_queue->read_count;
-               if (fill_level < EFX_TXQ_THRESHOLD(efx))
+               txq2 = efx_tx_queue_partner(tx_queue);
+               fill_level = max(tx_queue->insert_count - tx_queue->read_count,
+                                txq2->insert_count - txq2->read_count);
+               if (fill_level <= efx->txq_wake_thresh)
                        netif_tx_wake_queue(tx_queue->core_txq);
        }
 
@@ -480,11 +463,26 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
        }
 }
 
+/* Size of page-based TSO header buffers.  Larger blocks must be
+ * allocated from the heap.
+ */
+#define TSOH_STD_SIZE  128
+#define TSOH_PER_PAGE  (PAGE_SIZE / TSOH_STD_SIZE)
+
+/* At most half the descriptors in the queue at any time will refer to
+ * a TSO header buffer, since they must always be followed by a
+ * payload descriptor referring to an skb.
+ */
+static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue)
+{
+       return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE);
+}
+
 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
 {
        struct efx_nic *efx = tx_queue->efx;
        unsigned int entries;
-       int i, rc;
+       int rc;
 
        /* Create the smallest power-of-two aligned ring */
        entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
@@ -500,17 +498,28 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
                                   GFP_KERNEL);
        if (!tx_queue->buffer)
                return -ENOMEM;
-       for (i = 0; i <= tx_queue->ptr_mask; ++i)
-               tx_queue->buffer[i].continuation = true;
+
+       if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) {
+               tx_queue->tsoh_page =
+                       kcalloc(efx_tsoh_page_count(tx_queue),
+                               sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL);
+               if (!tx_queue->tsoh_page) {
+                       rc = -ENOMEM;
+                       goto fail1;
+               }
+       }
 
        /* Allocate hardware ring */
        rc = efx_nic_probe_tx(tx_queue);
        if (rc)
-               goto fail;
+               goto fail2;
 
        return 0;
 
- fail:
+fail2:
+       kfree(tx_queue->tsoh_page);
+       tx_queue->tsoh_page = NULL;
+fail1:
        kfree(tx_queue->buffer);
        tx_queue->buffer = NULL;
        return rc;
@@ -546,8 +555,6 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
                unsigned int pkts_compl = 0, bytes_compl = 0;
                buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
                efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
-               buffer->continuation = true;
-               buffer->len = 0;
 
                ++tx_queue->read_count;
        }
@@ -568,13 +575,12 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
        efx_nic_fini_tx(tx_queue);
 
        efx_release_tx_buffers(tx_queue);
-
-       /* Free up TSO header cache */
-       efx_fini_tso(tx_queue);
 }
 
 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
 {
+       int i;
+
        if (!tx_queue->buffer)
                return;
 
@@ -582,6 +588,14 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
                  "destroying TX queue %d\n", tx_queue->queue);
        efx_nic_remove_tx(tx_queue);
 
+       if (tx_queue->tsoh_page) {
+               for (i = 0; i < efx_tsoh_page_count(tx_queue); i++)
+                       efx_nic_free_buffer(tx_queue->efx,
+                                           &tx_queue->tsoh_page[i]);
+               kfree(tx_queue->tsoh_page);
+               tx_queue->tsoh_page = NULL;
+       }
+
        kfree(tx_queue->buffer);
        tx_queue->buffer = NULL;
 }
@@ -604,22 +618,7 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
 #define TSOH_OFFSET    NET_IP_ALIGN
 #endif
 
-#define TSOH_BUFFER(tsoh)      ((u8 *)(tsoh + 1) + TSOH_OFFSET)
-
-/* Total size of struct efx_tso_header, buffer and padding */
-#define TSOH_SIZE(hdr_len)                                     \
-       (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
-
-/* Size of blocks on free list.  Larger blocks must be allocated from
- * the heap.
- */
-#define TSOH_STD_SIZE          128
-
 #define PTR_DIFF(p1, p2)  ((u8 *)(p1) - (u8 *)(p2))
-#define ETH_HDR_LEN(skb)  (skb_network_header(skb) - (skb)->data)
-#define SKB_TCP_OFF(skb)  PTR_DIFF(tcp_hdr(skb), (skb)->data)
-#define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
-#define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data)
 
 /**
  * struct tso_state - TSO state for an SKB
@@ -631,10 +630,12 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
  * @in_len: Remaining length in current SKB fragment
  * @unmap_len: Length of SKB fragment
  * @unmap_addr: DMA address of SKB fragment
- * @unmap_single: DMA single vs page mapping flag
+ * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0
  * @protocol: Network protocol (after any VLAN header)
+ * @ip_off: Offset of IP header
+ * @tcp_off: Offset of TCP header
  * @header_len: Number of bytes of header
- * @full_packet_size: Number of bytes to put in each outgoing segment
+ * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
  *
  * The state used during segmentation.  It is put into this data structure
  * just to make it easy to pass into inline functions.
@@ -651,11 +652,13 @@ struct tso_state {
        unsigned in_len;
        unsigned unmap_len;
        dma_addr_t unmap_addr;
-       bool unmap_single;
+       unsigned short dma_flags;
 
        __be16 protocol;
+       unsigned int ip_off;
+       unsigned int tcp_off;
        unsigned header_len;
-       int full_packet_size;
+       unsigned int ip_base_len;
 };
 
 
@@ -687,91 +690,43 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb)
        return protocol;
 }
 
-
-/*
- * Allocate a page worth of efx_tso_header structures, and string them
- * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
- */
-static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
+static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
+                              struct efx_tx_buffer *buffer, unsigned int len)
 {
-       struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
-       struct efx_tso_header *tsoh;
-       dma_addr_t dma_addr;
-       u8 *base_kva, *kva;
+       u8 *result;
 
-       base_kva = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr, GFP_ATOMIC);
-       if (base_kva == NULL) {
-               netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
-                         "Unable to allocate page for TSO headers\n");
-               return -ENOMEM;
-       }
-
-       /* dma_alloc_coherent() allocates pages. */
-       EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
-
-       for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
-               tsoh = (struct efx_tso_header *)kva;
-               tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
-               tsoh->next = tx_queue->tso_headers_free;
-               tx_queue->tso_headers_free = tsoh;
-       }
-
-       return 0;
-}
-
-
-/* Free up a TSO header, and all others in the same page. */
-static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
-                               struct efx_tso_header *tsoh,
-                               struct device *dma_dev)
-{
-       struct efx_tso_header **p;
-       unsigned long base_kva;
-       dma_addr_t base_dma;
-
-       base_kva = (unsigned long)tsoh & PAGE_MASK;
-       base_dma = tsoh->dma_addr & PAGE_MASK;
-
-       p = &tx_queue->tso_headers_free;
-       while (*p != NULL) {
-               if (((unsigned long)*p & PAGE_MASK) == base_kva)
-                       *p = (*p)->next;
-               else
-                       p = &(*p)->next;
-       }
+       EFX_BUG_ON_PARANOID(buffer->len);
+       EFX_BUG_ON_PARANOID(buffer->flags);
+       EFX_BUG_ON_PARANOID(buffer->unmap_len);
 
-       dma_free_coherent(dma_dev, PAGE_SIZE, (void *)base_kva, base_dma);
-}
+       if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) {
+               unsigned index =
+                       (tx_queue->insert_count & tx_queue->ptr_mask) / 2;
+               struct efx_buffer *page_buf =
+                       &tx_queue->tsoh_page[index / TSOH_PER_PAGE];
+               unsigned offset =
+                       TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
+
+               if (unlikely(!page_buf->addr) &&
+                   efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE))
+                       return NULL;
+
+               result = (u8 *)page_buf->addr + offset;
+               buffer->dma_addr = page_buf->dma_addr + offset;
+               buffer->flags = EFX_TX_BUF_CONT;
+       } else {
+               tx_queue->tso_long_headers++;
 
-static struct efx_tso_header *
-efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
-{
-       struct efx_tso_header *tsoh;
-
-       tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
-       if (unlikely(!tsoh))
-               return NULL;
-
-       tsoh->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
-                                       TSOH_BUFFER(tsoh), header_len,
-                                       DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
-                                      tsoh->dma_addr))) {
-               kfree(tsoh);
-               return NULL;
+               buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC);
+               if (unlikely(!buffer->heap_buf))
+                       return NULL;
+               result = (u8 *)buffer->heap_buf + TSOH_OFFSET;
+               buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
        }
 
-       tsoh->unmap_len = header_len;
-       return tsoh;
-}
+       buffer->len = len;
 
-static void
-efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
-{
-       dma_unmap_single(&tx_queue->efx->pci_dev->dev,
-                        tsoh->dma_addr, tsoh->unmap_len,
-                        DMA_TO_DEVICE);
-       kfree(tsoh);
+       return result;
 }
 
 /**
@@ -781,47 +736,19 @@ efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
  * @len:               Length of fragment
  * @final_buffer:      The final buffer inserted into the queue
  *
- * Push descriptors onto the TX queue.  Return 0 on success or 1 if
- * @tx_queue full.
+ * Push descriptors onto the TX queue.
  */
-static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
-                              dma_addr_t dma_addr, unsigned len,
-                              struct efx_tx_buffer **final_buffer)
+static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
+                               dma_addr_t dma_addr, unsigned len,
+                               struct efx_tx_buffer **final_buffer)
 {
        struct efx_tx_buffer *buffer;
        struct efx_nic *efx = tx_queue->efx;
-       unsigned dma_len, fill_level, insert_ptr;
-       int q_space;
+       unsigned dma_len, insert_ptr;
 
        EFX_BUG_ON_PARANOID(len <= 0);
 
-       fill_level = tx_queue->insert_count - tx_queue->old_read_count;
-       /* -1 as there is no way to represent all descriptors used */
-       q_space = efx->txq_entries - 1 - fill_level;
-
        while (1) {
-               if (unlikely(q_space-- <= 0)) {
-                       /* It might be that completions have happened
-                        * since the xmit path last checked.  Update
-                        * the xmit path's copy of read_count.
-                        */
-                       netif_tx_stop_queue(tx_queue->core_txq);
-                       /* This memory barrier protects the change of
-                        * queue state from the access of read_count. */
-                       smp_mb();
-                       tx_queue->old_read_count =
-                               ACCESS_ONCE(tx_queue->read_count);
-                       fill_level = (tx_queue->insert_count
-                                     - tx_queue->old_read_count);
-                       q_space = efx->txq_entries - 1 - fill_level;
-                       if (unlikely(q_space-- <= 0)) {
-                               *final_buffer = NULL;
-                               return 1;
-                       }
-                       smp_mb();
-                       netif_tx_start_queue(tx_queue->core_txq);
-               }
-
                insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
                buffer = &tx_queue->buffer[insert_ptr];
                ++tx_queue->insert_count;
@@ -830,12 +757,9 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
                                    tx_queue->read_count >=
                                    efx->txq_entries);
 
-               efx_tsoh_free(tx_queue, buffer);
                EFX_BUG_ON_PARANOID(buffer->len);
                EFX_BUG_ON_PARANOID(buffer->unmap_len);
-               EFX_BUG_ON_PARANOID(buffer->skb);
-               EFX_BUG_ON_PARANOID(!buffer->continuation);
-               EFX_BUG_ON_PARANOID(buffer->tsoh);
+               EFX_BUG_ON_PARANOID(buffer->flags);
 
                buffer->dma_addr = dma_addr;
 
@@ -845,7 +769,8 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
                if (dma_len >= len)
                        break;
 
-               buffer->len = dma_len; /* Don't set the other members */
+               buffer->len = dma_len;
+               buffer->flags = EFX_TX_BUF_CONT;
                dma_addr += dma_len;
                len -= dma_len;
        }
@@ -853,7 +778,6 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
        EFX_BUG_ON_PARANOID(!len);
        buffer->len = len;
        *final_buffer = buffer;
-       return 0;
 }
 
 
@@ -864,54 +788,42 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
  * a single fragment, and we know it doesn't cross a page boundary.  It
  * also allows us to not worry about end-of-packet etc.
  */
-static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
-                              struct efx_tso_header *tsoh, unsigned len)
+static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
+                             struct efx_tx_buffer *buffer, u8 *header)
 {
-       struct efx_tx_buffer *buffer;
-
-       buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
-       efx_tsoh_free(tx_queue, buffer);
-       EFX_BUG_ON_PARANOID(buffer->len);
-       EFX_BUG_ON_PARANOID(buffer->unmap_len);
-       EFX_BUG_ON_PARANOID(buffer->skb);
-       EFX_BUG_ON_PARANOID(!buffer->continuation);
-       EFX_BUG_ON_PARANOID(buffer->tsoh);
-       buffer->len = len;
-       buffer->dma_addr = tsoh->dma_addr;
-       buffer->tsoh = tsoh;
+       if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) {
+               buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
+                                                 header, buffer->len,
+                                                 DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
+                                              buffer->dma_addr))) {
+                       kfree(buffer->heap_buf);
+                       buffer->len = 0;
+                       buffer->flags = 0;
+                       return -ENOMEM;
+               }
+               buffer->unmap_len = buffer->len;
+               buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
+       }
 
        ++tx_queue->insert_count;
+       return 0;
 }
 
 
-/* Remove descriptors put into a tx_queue. */
+/* Remove buffers put into a tx_queue.  None of the buffers must have
+ * an skb attached.
+ */
 static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
 {
        struct efx_tx_buffer *buffer;
-       dma_addr_t unmap_addr;
 
        /* Work backwards until we hit the original insert pointer value */
        while (tx_queue->insert_count != tx_queue->write_count) {
                --tx_queue->insert_count;
                buffer = &tx_queue->buffer[tx_queue->insert_count &
                                           tx_queue->ptr_mask];
-               efx_tsoh_free(tx_queue, buffer);
-               EFX_BUG_ON_PARANOID(buffer->skb);
-               if (buffer->unmap_len) {
-                       unmap_addr = (buffer->dma_addr + buffer->len -
-                                     buffer->unmap_len);
-                       if (buffer->unmap_single)
-                               dma_unmap_single(&tx_queue->efx->pci_dev->dev,
-                                                unmap_addr, buffer->unmap_len,
-                                                DMA_TO_DEVICE);
-                       else
-                               dma_unmap_page(&tx_queue->efx->pci_dev->dev,
-                                              unmap_addr, buffer->unmap_len,
-                                              DMA_TO_DEVICE);
-                       buffer->unmap_len = 0;
-               }
-               buffer->len = 0;
-               buffer->continuation = true;
+               efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
        }
 }
 
@@ -919,17 +831,16 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
 /* Parse the SKB header and initialise state. */
 static void tso_start(struct tso_state *st, const struct sk_buff *skb)
 {
-       /* All ethernet/IP/TCP headers combined size is TCP header size
-        * plus offset of TCP header relative to start of packet.
-        */
-       st->header_len = ((tcp_hdr(skb)->doff << 2u)
-                         + PTR_DIFF(tcp_hdr(skb), skb->data));
-       st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size;
-
-       if (st->protocol == htons(ETH_P_IP))
+       st->ip_off = skb_network_header(skb) - skb->data;
+       st->tcp_off = skb_transport_header(skb) - skb->data;
+       st->header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
+       if (st->protocol == htons(ETH_P_IP)) {
+               st->ip_base_len = st->header_len - st->ip_off;
                st->ipv4_id = ntohs(ip_hdr(skb)->id);
-       else
+       } else {
+               st->ip_base_len = st->header_len - st->tcp_off;
                st->ipv4_id = 0;
+       }
        st->seqnum = ntohl(tcp_hdr(skb)->seq);
 
        EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
@@ -938,7 +849,7 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb)
 
        st->out_len = skb->len - st->header_len;
        st->unmap_len = 0;
-       st->unmap_single = false;
+       st->dma_flags = 0;
 }
 
 static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
@@ -947,7 +858,7 @@ static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
        st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
                                          skb_frag_size(frag), DMA_TO_DEVICE);
        if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
-               st->unmap_single = false;
+               st->dma_flags = 0;
                st->unmap_len = skb_frag_size(frag);
                st->in_len = skb_frag_size(frag);
                st->dma_addr = st->unmap_addr;
@@ -965,7 +876,7 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
        st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
                                        len, DMA_TO_DEVICE);
        if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
-               st->unmap_single = true;
+               st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
                st->unmap_len = len;
                st->in_len = len;
                st->dma_addr = st->unmap_addr;
@@ -982,20 +893,19 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
  * @st:                        TSO state
  *
  * Form descriptors for the current fragment, until we reach the end
- * of fragment or end-of-packet.  Return 0 on success, 1 if not enough
- * space in @tx_queue.
+ * of fragment or end-of-packet.
  */
-static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
-                                        const struct sk_buff *skb,
-                                        struct tso_state *st)
+static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
+                                         const struct sk_buff *skb,
+                                         struct tso_state *st)
 {
        struct efx_tx_buffer *buffer;
-       int n, end_of_packet, rc;
+       int n;
 
        if (st->in_len == 0)
-               return 0;
+               return;
        if (st->packet_space == 0)
-               return 0;
+               return;
 
        EFX_BUG_ON_PARANOID(st->in_len <= 0);
        EFX_BUG_ON_PARANOID(st->packet_space <= 0);
@@ -1006,25 +916,24 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
        st->out_len -= n;
        st->in_len -= n;
 
-       rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
-       if (likely(rc == 0)) {
-               if (st->out_len == 0)
-                       /* Transfer ownership of the skb */
-                       buffer->skb = skb;
+       efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
 
-               end_of_packet = st->out_len == 0 || st->packet_space == 0;
-               buffer->continuation = !end_of_packet;
+       if (st->out_len == 0) {
+               /* Transfer ownership of the skb */
+               buffer->skb = skb;
+               buffer->flags = EFX_TX_BUF_SKB;
+       } else if (st->packet_space != 0) {
+               buffer->flags = EFX_TX_BUF_CONT;
+       }
 
-               if (st->in_len == 0) {
-                       /* Transfer ownership of the DMA mapping */
-                       buffer->unmap_len = st->unmap_len;
-                       buffer->unmap_single = st->unmap_single;
-                       st->unmap_len = 0;
-               }
+       if (st->in_len == 0) {
+               /* Transfer ownership of the DMA mapping */
+               buffer->unmap_len = st->unmap_len;
+               buffer->flags |= st->dma_flags;
+               st->unmap_len = 0;
        }
 
        st->dma_addr += n;
-       return rc;
 }
 
 
@@ -1035,36 +944,25 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
  * @st:                        TSO state
  *
  * Generate a new header and prepare for the new packet.  Return 0 on
- * success, or -1 if failed to alloc header.
+ * success, or -%ENOMEM if failed to alloc header.
  */
 static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
                                const struct sk_buff *skb,
                                struct tso_state *st)
 {
-       struct efx_tso_header *tsoh;
+       struct efx_tx_buffer *buffer =
+               &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
        struct tcphdr *tsoh_th;
        unsigned ip_length;
        u8 *header;
+       int rc;
 
-       /* Allocate a DMA-mapped header buffer. */
-       if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) {
-               if (tx_queue->tso_headers_free == NULL) {
-                       if (efx_tsoh_block_alloc(tx_queue))
-                               return -1;
-               }
-               EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
-               tsoh = tx_queue->tso_headers_free;
-               tx_queue->tso_headers_free = tsoh->next;
-               tsoh->unmap_len = 0;
-       } else {
-               tx_queue->tso_long_headers++;
-               tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len);
-               if (unlikely(!tsoh))
-                       return -1;
-       }
+       /* Allocate and insert a DMA-mapped header buffer. */
+       header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
+       if (!header)
+               return -ENOMEM;
 
-       header = TSOH_BUFFER(tsoh);
-       tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
+       tsoh_th = (struct tcphdr *)(header + st->tcp_off);
 
        /* Copy and update the headers. */
        memcpy(header, skb->data, st->header_len);
@@ -1073,19 +971,19 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
        st->seqnum += skb_shinfo(skb)->gso_size;
        if (st->out_len > skb_shinfo(skb)->gso_size) {
                /* This packet will not finish the TSO burst. */
-               ip_length = st->full_packet_size - ETH_HDR_LEN(skb);
+               st->packet_space = skb_shinfo(skb)->gso_size;
                tsoh_th->fin = 0;
                tsoh_th->psh = 0;
        } else {
                /* This packet will be the last in the TSO burst. */
-               ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len;
+               st->packet_space = st->out_len;
                tsoh_th->fin = tcp_hdr(skb)->fin;
                tsoh_th->psh = tcp_hdr(skb)->psh;
        }
+       ip_length = st->ip_base_len + st->packet_space;
 
        if (st->protocol == htons(ETH_P_IP)) {
-               struct iphdr *tsoh_iph =
-                       (struct iphdr *)(header + SKB_IPV4_OFF(skb));
+               struct iphdr *tsoh_iph = (struct iphdr *)(header + st->ip_off);
 
                tsoh_iph->tot_len = htons(ip_length);
 
@@ -1094,16 +992,16 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
                st->ipv4_id++;
        } else {
                struct ipv6hdr *tsoh_iph =
-                       (struct ipv6hdr *)(header + SKB_IPV6_OFF(skb));
+                       (struct ipv6hdr *)(header + st->ip_off);
 
-               tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph));
+               tsoh_iph->payload_len = htons(ip_length);
        }
 
-       st->packet_space = skb_shinfo(skb)->gso_size;
-       ++tx_queue->tso_packets;
+       rc = efx_tso_put_header(tx_queue, buffer, header);
+       if (unlikely(rc))
+               return rc;
 
-       /* Form a descriptor for this header. */
-       efx_tso_put_header(tx_queue, tsoh, st->header_len);
+       ++tx_queue->tso_packets;
 
        return 0;
 }
@@ -1118,13 +1016,13 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
  *
  * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
  * @skb was not enqueued.  In all cases @skb is consumed.  Return
- * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
+ * %NETDEV_TX_OK.
  */
 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
                               struct sk_buff *skb)
 {
        struct efx_nic *efx = tx_queue->efx;
-       int frag_i, rc, rc2 = NETDEV_TX_OK;
+       int frag_i, rc;
        struct tso_state state;
 
        /* Find the packet protocol and sanity-check it */
@@ -1156,11 +1054,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
                goto mem_err;
 
        while (1) {
-               rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
-               if (unlikely(rc)) {
-                       rc2 = NETDEV_TX_BUSY;
-                       goto unwind;
-               }
+               tso_fill_packet_with_fragment(tx_queue, skb, &state);
 
                /* Move onto the next fragment? */
                if (state.in_len == 0) {
@@ -1184,6 +1078,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
        /* Pass off to hardware */
        efx_nic_push_buffers(tx_queue);
 
+       efx_tx_maybe_stop_queue(tx_queue);
+
        tx_queue->tso_bursts++;
        return NETDEV_TX_OK;
 
@@ -1192,10 +1088,9 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
                  "Out of memory for TSO headers, or DMA mapping error\n");
        dev_kfree_skb_any(skb);
 
- unwind:
        /* Free the DMA mapping we were in the process of writing out */
        if (state.unmap_len) {
-               if (state.unmap_single)
+               if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE)
                        dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
                                         state.unmap_len, DMA_TO_DEVICE);
                else
@@ -1204,25 +1099,5 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
        }
 
        efx_enqueue_unwind(tx_queue);
-       return rc2;
-}
-
-
-/*
- * Free up all TSO datastructures associated with tx_queue. This
- * routine should be called only once the tx_queue is both empty and
- * will no longer be used.
- */
-static void efx_fini_tso(struct efx_tx_queue *tx_queue)
-{
-       unsigned i;
-
-       if (tx_queue->buffer) {
-               for (i = 0; i <= tx_queue->ptr_mask; ++i)
-                       efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
-       }
-
-       while (tx_queue->tso_headers_free != NULL)
-               efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
-                                   &tx_queue->efx->pci_dev->dev);
+       return NETDEV_TX_OK;
 }
index ade108232048aca7771e2a3d0ea9d6d5a15f7a45..0376a5e6b2bf9c0584e45bf2e34c6f5ee8608033 100644 (file)
@@ -177,7 +177,7 @@ int stmmac_mdio_register(struct net_device *ndev)
        new_bus->write = &stmmac_mdio_write;
        new_bus->reset = &stmmac_mdio_reset;
        snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
-               new_bus->name, mdio_bus_data->bus_id);
+               new_bus->name, priv->plat->bus_id);
        new_bus->priv = ndev;
        new_bus->irq = irqlist;
        new_bus->phy_mask = mdio_bus_data->phy_mask;
@@ -213,12 +213,10 @@ int stmmac_mdio_register(struct net_device *ndev)
                         * and no PHY number was provided to the MAC,
                         * use the one probed here.
                         */
-                       if ((priv->plat->bus_id == mdio_bus_data->bus_id) &&
-                           (priv->plat->phy_addr == -1))
+                       if (priv->plat->phy_addr == -1)
                                priv->plat->phy_addr = addr;
 
-                       act = (priv->plat->bus_id == mdio_bus_data->bus_id) &&
-                               (priv->plat->phy_addr == addr);
+                       act = (priv->plat->phy_addr == addr);
                        switch (phydev->irq) {
                        case PHY_POLL:
                                irq_str = "POLL";
@@ -258,6 +256,9 @@ int stmmac_mdio_unregister(struct net_device *ndev)
 {
        struct stmmac_priv *priv = netdev_priv(ndev);
 
+       if (!priv->mii)
+               return 0;
+
        mdiobus_unregister(priv->mii);
        priv->mii->priv = NULL;
        mdiobus_free(priv->mii);
index 13afb8edfadca49892f068e22554855dc930aa49..1f069b0f6af592342ecc1f6a023b9334acc4099b 100644 (file)
@@ -40,7 +40,6 @@ static void stmmac_default_data(void)
        plat_dat.has_gmac = 1;
        plat_dat.force_sf_dma_mode = 1;
 
-       mdio_data.bus_id = 1;
        mdio_data.phy_reset = NULL;
        mdio_data.phy_mask = 0;
        plat_dat.mdio_bus_data = &mdio_data;
index b93245c11995bc15329321e6879be78d4c1b44dc..ed112b55ae7f95a587150a0f5ed23d58ca318f97 100644 (file)
@@ -78,6 +78,7 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
 {
        int ret = 0;
        struct resource *res;
+       struct device *dev = &pdev->dev;
        void __iomem *addr = NULL;
        struct stmmac_priv *priv = NULL;
        struct plat_stmmacenet_data *plat_dat = NULL;
@@ -87,18 +88,10 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
        if (!res)
                return -ENODEV;
 
-       if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
-               pr_err("%s: ERROR: memory allocation failed"
-                      "cannot get the I/O addr 0x%x\n",
-                      __func__, (unsigned int)res->start);
-               return -EBUSY;
-       }
-
-       addr = ioremap(res->start, resource_size(res));
+       addr = devm_request_and_ioremap(dev, res);
        if (!addr) {
                pr_err("%s: ERROR: memory mapping failed", __func__);
-               ret = -ENOMEM;
-               goto out_release_region;
+               return -ENOMEM;
        }
 
        if (pdev->dev.of_node) {
@@ -107,14 +100,13 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
                                        GFP_KERNEL);
                if (!plat_dat) {
                        pr_err("%s: ERROR: no memory", __func__);
-                       ret = -ENOMEM;
-                       goto out_unmap;
+                       return  -ENOMEM;
                }
 
                ret = stmmac_probe_config_dt(pdev, plat_dat, &mac);
                if (ret) {
                        pr_err("%s: main dt probe failed", __func__);
-                       goto out_unmap;
+                       return ret;
                }
        } else {
                plat_dat = pdev->dev.platform_data;
@@ -124,13 +116,13 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
        if (plat_dat->init) {
                ret = plat_dat->init(pdev);
                if (unlikely(ret))
-                       goto out_unmap;
+                       return ret;
        }
 
        priv = stmmac_dvr_probe(&(pdev->dev), plat_dat, addr);
        if (!priv) {
                pr_err("%s: main driver probe failed", __func__);
-               goto out_unmap;
+               return -ENODEV;
        }
 
        /* Get MAC address if available (DT) */
@@ -142,8 +134,7 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
        if (priv->dev->irq == -ENXIO) {
                pr_err("%s: ERROR: MAC IRQ configuration "
                       "information not found\n", __func__);
-               ret = -ENXIO;
-               goto out_unmap;
+               return -ENXIO;
        }
 
        /*
@@ -165,15 +156,6 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
        pr_debug("STMMAC platform driver registration completed");
 
        return 0;
-
-out_unmap:
-       iounmap(addr);
-       platform_set_drvdata(pdev, NULL);
-
-out_release_region:
-       release_mem_region(res->start, resource_size(res));
-
-       return ret;
 }
 
 /**
@@ -186,7 +168,6 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
 {
        struct net_device *ndev = platform_get_drvdata(pdev);
        struct stmmac_priv *priv = netdev_priv(ndev);
-       struct resource *res;
        int ret = stmmac_dvr_remove(ndev);
 
        if (priv->plat->exit)
@@ -194,10 +175,6 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, NULL);
 
-       iounmap((void __force __iomem *)priv->ioaddr);
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       release_mem_region(res->start, resource_size(res));
-
        return ret;
 }
 
index 967fe8cb476e56eda49013963a347f262712536d..c9c977bf02ace45e1e4be1b863d682995d39185f 100644 (file)
@@ -212,7 +212,6 @@ static void bigmac_clean_rings(struct bigmac *bp)
 static void bigmac_init_rings(struct bigmac *bp, int from_irq)
 {
        struct bmac_init_block *bb = bp->bmac_block;
-       struct net_device *dev = bp->dev;
        int i;
        gfp_t gfp_flags = GFP_KERNEL;
 
index 1b173a6145d642fb3c2fe26c58c8d8b995ce50b8..b26cbda5efa9b5264dd4e2bb885d35ea7e26b692 100644 (file)
@@ -32,7 +32,7 @@ config TI_DAVINCI_EMAC
 
 config TI_DAVINCI_MDIO
        tristate "TI DaVinci MDIO Support"
-       depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
+       depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX )
        select PHYLIB
        ---help---
          This driver supports TI's DaVinci MDIO module.
@@ -42,7 +42,7 @@ config TI_DAVINCI_MDIO
 
 config TI_DAVINCI_CPDMA
        tristate "TI DaVinci CPDMA Support"
-       depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
+       depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX )
        ---help---
          This driver supports TI's DaVinci CPDMA dma engine.
 
index 1e5d85b06e71b1dea196be8e0f4db1a928c62c4b..df55e240374646e2f076d9b4764fab6dc0a07bab 100644 (file)
@@ -28,6 +28,9 @@
 #include <linux/workqueue.h>
 #include <linux/delay.h>
 #include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_device.h>
 
 #include <linux/platform_data/cpsw.h>
 
@@ -383,6 +386,11 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
                        mac_control |= BIT(7);  /* GIGABITEN    */
                if (phy->duplex)
                        mac_control |= BIT(0);  /* FULLDUPLEXEN */
+
+               /* set speed_in input in case RMII mode is used in 100Mbps */
+               if (phy->speed == 100)
+                       mac_control |= BIT(15);
+
                *link = true;
        } else {
                mac_control = 0;
@@ -709,6 +717,158 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
        slave->sliver   = regs + data->sliver_reg_ofs;
 }
 
+static int cpsw_probe_dt(struct cpsw_platform_data *data,
+                        struct platform_device *pdev)
+{
+       struct device_node *node = pdev->dev.of_node;
+       struct device_node *slave_node;
+       int i = 0, ret;
+       u32 prop;
+
+       if (!node)
+               return -EINVAL;
+
+       if (of_property_read_u32(node, "slaves", &prop)) {
+               pr_err("Missing slaves property in the DT.\n");
+               return -EINVAL;
+       }
+       data->slaves = prop;
+
+       data->slave_data = kzalloc(sizeof(struct cpsw_slave_data) *
+                                  data->slaves, GFP_KERNEL);
+       if (!data->slave_data) {
+               pr_err("Could not allocate slave memory.\n");
+               return -EINVAL;
+       }
+
+       data->no_bd_ram = of_property_read_bool(node, "no_bd_ram");
+
+       if (of_property_read_u32(node, "cpdma_channels", &prop)) {
+               pr_err("Missing cpdma_channels property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->channels = prop;
+
+       if (of_property_read_u32(node, "host_port_no", &prop)) {
+               pr_err("Missing host_port_no property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->host_port_num = prop;
+
+       if (of_property_read_u32(node, "cpdma_reg_ofs", &prop)) {
+               pr_err("Missing cpdma_reg_ofs property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->cpdma_reg_ofs = prop;
+
+       if (of_property_read_u32(node, "cpdma_sram_ofs", &prop)) {
+               pr_err("Missing cpdma_sram_ofs property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->cpdma_sram_ofs = prop;
+
+       if (of_property_read_u32(node, "ale_reg_ofs", &prop)) {
+               pr_err("Missing ale_reg_ofs property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->ale_reg_ofs = prop;
+
+       if (of_property_read_u32(node, "ale_entries", &prop)) {
+               pr_err("Missing ale_entries property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->ale_entries = prop;
+
+       if (of_property_read_u32(node, "host_port_reg_ofs", &prop)) {
+               pr_err("Missing host_port_reg_ofs property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->host_port_reg_ofs = prop;
+
+       if (of_property_read_u32(node, "hw_stats_reg_ofs", &prop)) {
+               pr_err("Missing hw_stats_reg_ofs property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->hw_stats_reg_ofs = prop;
+
+       if (of_property_read_u32(node, "bd_ram_ofs", &prop)) {
+               pr_err("Missing bd_ram_ofs property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->bd_ram_ofs = prop;
+
+       if (of_property_read_u32(node, "bd_ram_size", &prop)) {
+               pr_err("Missing bd_ram_size property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->bd_ram_size = prop;
+
+       if (of_property_read_u32(node, "rx_descs", &prop)) {
+               pr_err("Missing rx_descs property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->rx_descs = prop;
+
+       if (of_property_read_u32(node, "mac_control", &prop)) {
+               pr_err("Missing mac_control property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->mac_control = prop;
+
+       for_each_child_of_node(node, slave_node) {
+               struct cpsw_slave_data *slave_data = data->slave_data + i;
+               const char *phy_id = NULL;
+               const void *mac_addr = NULL;
+
+               if (of_property_read_string(slave_node, "phy_id", &phy_id)) {
+                       pr_err("Missing slave[%d] phy_id property\n", i);
+                       ret = -EINVAL;
+                       goto error_ret;
+               }
+               slave_data->phy_id = phy_id;
+
+               if (of_property_read_u32(slave_node, "slave_reg_ofs", &prop)) {
+                       pr_err("Missing slave[%d] slave_reg_ofs property\n", i);
+                       ret = -EINVAL;
+                       goto error_ret;
+               }
+               slave_data->slave_reg_ofs = prop;
+
+               if (of_property_read_u32(slave_node, "sliver_reg_ofs",
+                                        &prop)) {
+                       pr_err("Missing slave[%d] sliver_reg_ofs property\n",
+                               i);
+                       ret = -EINVAL;
+                       goto error_ret;
+               }
+               slave_data->sliver_reg_ofs = prop;
+
+               mac_addr = of_get_mac_address(slave_node);
+               if (mac_addr)
+                       memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
+
+               i++;
+       }
+
+       return 0;
+
+error_ret:
+       kfree(data->slave_data);
+       return ret;
+}
+
 static int __devinit cpsw_probe(struct platform_device *pdev)
 {
        struct cpsw_platform_data       *data = pdev->dev.platform_data;
@@ -720,11 +880,6 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
        struct resource                 *res;
        int ret = 0, i, k = 0;
 
-       if (!data) {
-               pr_err("platform data missing\n");
-               return -ENODEV;
-       }
-
        ndev = alloc_etherdev(sizeof(struct cpsw_priv));
        if (!ndev) {
                pr_err("error allocating net_device\n");
@@ -734,13 +889,19 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, ndev);
        priv = netdev_priv(ndev);
        spin_lock_init(&priv->lock);
-       priv->data = *data;
        priv->pdev = pdev;
        priv->ndev = ndev;
        priv->dev  = &ndev->dev;
        priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
        priv->rx_packet_max = max(rx_packet_max, 128);
 
+       if (cpsw_probe_dt(&priv->data, pdev)) {
+               pr_err("cpsw: platform data missing\n");
+               ret = -ENODEV;
+               goto clean_ndev_ret;
+       }
+       data = &priv->data;
+
        if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
                memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
                pr_info("Detected MACID = %pM", priv->mac_addr);
@@ -996,11 +1157,17 @@ static const struct dev_pm_ops cpsw_pm_ops = {
        .resume         = cpsw_resume,
 };
 
+static const struct of_device_id cpsw_of_mtable[] = {
+       { .compatible = "ti,cpsw", },
+       { /* sentinel */ },
+};
+
 static struct platform_driver cpsw_driver = {
        .driver = {
                .name    = "cpsw",
                .owner   = THIS_MODULE,
                .pm      = &cpsw_pm_ops,
+               .of_match_table = of_match_ptr(cpsw_of_mtable),
        },
        .probe = cpsw_probe,
        .remove = __devexit_p(cpsw_remove),
index a9ca4a03d31b2fe2adc68818da636edb8d5aeb2b..51a96dbee9accbae9dbe32e8eb3d81a367819307 100644 (file)
@@ -36,6 +36,8 @@
 #include <linux/io.h>
 #include <linux/pm_runtime.h>
 #include <linux/davinci_emac.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 
 /*
  * This timeout definition is a worst-case ultra defensive measure against
@@ -289,6 +291,25 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
        return 0;
 }
 
+static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
+                        struct platform_device *pdev)
+{
+       struct device_node *node = pdev->dev.of_node;
+       u32 prop;
+
+       if (!node)
+               return -EINVAL;
+
+       if (of_property_read_u32(node, "bus_freq", &prop)) {
+               pr_err("Missing bus_freq property in the DT.\n");
+               return -EINVAL;
+       }
+       data->bus_freq = prop;
+
+       return 0;
+}
+
+
 static int __devinit davinci_mdio_probe(struct platform_device *pdev)
 {
        struct mdio_platform_data *pdata = pdev->dev.platform_data;
@@ -304,8 +325,6 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
 
-       data->pdata = pdata ? (*pdata) : default_pdata;
-
        data->bus = mdiobus_alloc();
        if (!data->bus) {
                dev_err(dev, "failed to alloc mii bus\n");
@@ -313,14 +332,22 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev)
                goto bail_out;
        }
 
+       if (dev->of_node) {
+               if (davinci_mdio_probe_dt(&data->pdata, pdev))
+                       data->pdata = default_pdata;
+               snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
+       } else {
+               data->pdata = pdata ? (*pdata) : default_pdata;
+               snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
+                        pdev->name, pdev->id);
+       }
+
        data->bus->name         = dev_name(dev);
        data->bus->read         = davinci_mdio_read,
        data->bus->write        = davinci_mdio_write,
        data->bus->reset        = davinci_mdio_reset,
        data->bus->parent       = dev;
        data->bus->priv         = data;
-       snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
-               pdev->name, pdev->id);
 
        pm_runtime_enable(&pdev->dev);
        pm_runtime_get_sync(&pdev->dev);
@@ -456,11 +483,17 @@ static const struct dev_pm_ops davinci_mdio_pm_ops = {
        .resume         = davinci_mdio_resume,
 };
 
+static const struct of_device_id davinci_mdio_of_mtable[] = {
+       { .compatible = "ti,davinci_mdio", },
+       { /* sentinel */ },
+};
+
 static struct platform_driver davinci_mdio_driver = {
        .driver = {
                .name    = "davinci_mdio",
                .owner   = THIS_MODULE,
                .pm      = &davinci_mdio_pm_ops,
+               .of_match_table = of_match_ptr(davinci_mdio_of_mtable),
        },
        .probe = davinci_mdio_probe,
        .remove = __devexit_p(davinci_mdio_remove),
index 277c93e9ff4d7201470fd0fdea50a691c28f24de..8fa947a2d9290f2dd7dc612968199aa0f46fdde1 100644 (file)
@@ -1358,7 +1358,6 @@ static int tsi108_open(struct net_device *dev)
                        break;
                }
 
-               data->rxskbs[i] = skb;
                data->rxskbs[i] = skb;
                data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
                data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;
index a5826a3111a6ed7f1570e763c2ef36ce98ddb175..2c08bf6e7bf3b326f828585a3a8fefc96d62ddb3 100644 (file)
@@ -637,8 +637,7 @@ static int __devinit w5100_hw_probe(struct platform_device *pdev)
        if (data && is_valid_ether_addr(data->mac_addr)) {
                memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
        } else {
-               eth_random_addr(ndev->dev_addr);
-               ndev->addr_assign_type |= NET_ADDR_RANDOM;
+               eth_hw_addr_random(ndev);
        }
 
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
index bdd8891c215ad3c1b4ca4fa6b5ae6ddc3724a7df..88943d90c7653565283a064e3a8ed25f65d5d213 100644 (file)
@@ -557,8 +557,7 @@ static int __devinit w5300_hw_probe(struct platform_device *pdev)
        if (data && is_valid_ether_addr(data->mac_addr)) {
                memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
        } else {
-               eth_random_addr(ndev->dev_addr);
-               ndev->addr_assign_type |= NET_ADDR_RANDOM;
+               eth_hw_addr_random(ndev);
        }
 
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
index 95ceb35930437be5103e23ac99b83becfdd669bc..5fd6f4674326f0d3236ae8a97127b842ffffa008 100644 (file)
@@ -35,6 +35,7 @@ struct hv_netvsc_packet;
 /* Represent the xfer page packet which contains 1 or more netvsc packet */
 struct xferpage_packet {
        struct list_head list_ent;
+       u32 status;
 
        /* # of netvsc packets this xfer packet contains */
        u32 count;
@@ -47,6 +48,7 @@ struct xferpage_packet {
 struct hv_netvsc_packet {
        /* Bookkeeping stuff */
        struct list_head list_ent;
+       u32 status;
 
        struct hv_device *device;
        bool is_data_pkt;
@@ -465,8 +467,6 @@ struct nvsp_message {
 
 #define NETVSC_RECEIVE_BUFFER_ID               0xcafe
 
-#define NETVSC_RECEIVE_SG_COUNT                        1
-
 /* Preallocated receive packets */
 #define NETVSC_RECEIVE_PACKETLIST_COUNT                256
 
index 4a1a5f58fa73ffd7899429bcaf28062e29f83dc3..1cd77483da50114c2c309ab70a69dff134c3a919 100644 (file)
@@ -558,7 +558,7 @@ int netvsc_send(struct hv_device *device,
 }
 
 static void netvsc_send_recv_completion(struct hv_device *device,
-                                       u64 transaction_id)
+                                       u64 transaction_id, u32 status)
 {
        struct nvsp_message recvcompMessage;
        int retries = 0;
@@ -571,9 +571,7 @@ static void netvsc_send_recv_completion(struct hv_device *device,
        recvcompMessage.hdr.msg_type =
                                NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
 
-       /* FIXME: Pass in the status */
-       recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status =
-               NVSP_STAT_SUCCESS;
+       recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status;
 
 retry_send_cmplt:
        /* Send the completion */
@@ -613,6 +611,7 @@ static void netvsc_receive_completion(void *context)
        bool fsend_receive_comp = false;
        unsigned long flags;
        struct net_device *ndev;
+       u32 status = NVSP_STAT_NONE;
 
        /*
         * Even though it seems logical to do a GetOutboundNetDevice() here to
@@ -627,6 +626,9 @@ static void netvsc_receive_completion(void *context)
        /* Overloading use of the lock. */
        spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
 
+       if (packet->status != NVSP_STAT_SUCCESS)
+               packet->xfer_page_pkt->status = NVSP_STAT_FAIL;
+
        packet->xfer_page_pkt->count--;
 
        /*
@@ -636,6 +638,7 @@ static void netvsc_receive_completion(void *context)
        if (packet->xfer_page_pkt->count == 0) {
                fsend_receive_comp = true;
                transaction_id = packet->completion.recv.recv_completion_tid;
+               status = packet->xfer_page_pkt->status;
                list_add_tail(&packet->xfer_page_pkt->list_ent,
                              &net_device->recv_pkt_list);
 
@@ -647,7 +650,7 @@ static void netvsc_receive_completion(void *context)
 
        /* Send a receive completion for the xfer page packet */
        if (fsend_receive_comp)
-               netvsc_send_recv_completion(device, transaction_id);
+               netvsc_send_recv_completion(device, transaction_id, status);
 
 }
 
@@ -736,7 +739,8 @@ static void netvsc_receive(struct hv_device *device,
                                       flags);
 
                netvsc_send_recv_completion(device,
-                                           vmxferpage_packet->d.trans_id);
+                                           vmxferpage_packet->d.trans_id,
+                                           NVSP_STAT_FAIL);
 
                return;
        }
@@ -744,6 +748,7 @@ static void netvsc_receive(struct hv_device *device,
        /* Remove the 1st packet to represent the xfer page packet itself */
        xferpage_packet = (struct xferpage_packet *)listHead.next;
        list_del(&xferpage_packet->list_ent);
+       xferpage_packet->status = NVSP_STAT_SUCCESS;
 
        /* This is how much we can satisfy */
        xferpage_packet->count = count - 1;
@@ -760,6 +765,7 @@ static void netvsc_receive(struct hv_device *device,
                list_del(&netvsc_packet->list_ent);
 
                /* Initialize the netvsc packet */
+               netvsc_packet->status = NVSP_STAT_SUCCESS;
                netvsc_packet->xfer_page_pkt = xferpage_packet;
                netvsc_packet->completion.recv.recv_completion =
                                        netvsc_receive_completion;
@@ -904,9 +910,7 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
        INIT_LIST_HEAD(&net_device->recv_pkt_list);
 
        for (i = 0; i < NETVSC_RECEIVE_PACKETLIST_COUNT; i++) {
-               packet = kzalloc(sizeof(struct hv_netvsc_packet) +
-                                (NETVSC_RECEIVE_SG_COUNT *
-                                 sizeof(struct hv_page_buffer)), GFP_KERNEL);
+               packet = kzalloc(sizeof(struct hv_netvsc_packet), GFP_KERNEL);
                if (!packet)
                        break;
 
index 8c5a1c43c81d257c09a67385e01da93f3c24465f..f825a629a699cfe5fac73803353da4b47ca18974 100644 (file)
@@ -265,6 +265,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
        if (!net) {
                netdev_err(net, "got receive callback but net device"
                        " not initialized yet\n");
+               packet->status = NVSP_STAT_FAIL;
                return 0;
        }
 
@@ -272,6 +273,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
        skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
        if (unlikely(!skb)) {
                ++net->stats.rx_dropped;
+               packet->status = NVSP_STAT_FAIL;
                return 0;
        }
 
@@ -400,7 +402,7 @@ static void netvsc_send_garp(struct work_struct *w)
        ndev_ctx = container_of(w, struct net_device_context, dwork.work);
        net_device = hv_get_drvdata(ndev_ctx->device_ctx);
        net = net_device->ndev;
-       netif_notify_peers(net);
+       netdev_notify_peers(net);
 }
 
 
index 1e88a1095934a6d7b62c84cf1fff0e083f088881..928148cc32207a90da8cf173258c0ef8cc2b7e36 100644 (file)
 #include "hyperv_net.h"
 
 
+#define RNDIS_EXT_LEN 100
 struct rndis_request {
        struct list_head list_ent;
        struct completion  wait_event;
 
+       struct rndis_message response_msg;
        /*
-        * FIXME: We assumed a fixed size response here. If we do ever need to
-        * handle a bigger response, we can either define a max response
-        * message or add a response buffer variable above this field
+        * The buffer for extended info after the RNDIS response message. It's
+        * referenced based on the data offset in the RNDIS message. Its size
+        * is enough for current needs, and should be sufficient for the near
+        * future.
         */
-       struct rndis_message response_msg;
+       u8 response_ext[RNDIS_EXT_LEN];
 
        /* Simplify allocation by having a netvsc packet inline */
        struct hv_netvsc_packet pkt;
-       struct hv_page_buffer buf;
-       /* FIXME: We assumed a fixed size request here. */
+       /* Set 2 pages for rndis requests crossing page boundary */
+       struct hv_page_buffer buf[2];
+
        struct rndis_message request_msg;
-       u8 ext[100];
+       /*
+        * The buffer for the extended info after the RNDIS request message.
+        * It is referenced and sized in a similar way as response_ext.
+        */
+       u8 request_ext[RNDIS_EXT_LEN];
 };
 
 static void rndis_filter_send_completion(void *ctx);
@@ -221,6 +229,18 @@ static int rndis_filter_send_request(struct rndis_device *dev,
        packet->page_buf[0].offset =
                (unsigned long)&req->request_msg & (PAGE_SIZE - 1);
 
+       /* Add one page_buf when request_msg crossing page boundary */
+       if (packet->page_buf[0].offset + packet->page_buf[0].len > PAGE_SIZE) {
+               packet->page_buf_cnt++;
+               packet->page_buf[0].len = PAGE_SIZE -
+                       packet->page_buf[0].offset;
+               packet->page_buf[1].pfn = virt_to_phys((void *)&req->request_msg
+                       + packet->page_buf[0].len) >> PAGE_SHIFT;
+               packet->page_buf[1].offset = 0;
+               packet->page_buf[1].len = req->request_msg.msg_len -
+                       packet->page_buf[0].len;
+       }
+
        packet->completion.send.send_completion_ctx = req;/* packet; */
        packet->completion.send.send_completion =
                rndis_filter_send_request_completion;
@@ -255,7 +275,8 @@ static void rndis_filter_receive_response(struct rndis_device *dev,
        spin_unlock_irqrestore(&dev->request_lock, flags);
 
        if (found) {
-               if (resp->msg_len <= sizeof(struct rndis_message)) {
+               if (resp->msg_len <=
+                   sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
                        memcpy(&request->response_msg, resp,
                               resp->msg_len);
                } else {
@@ -392,9 +413,12 @@ int rndis_filter_receive(struct hv_device *dev,
        struct rndis_device *rndis_dev;
        struct rndis_message *rndis_msg;
        struct net_device *ndev;
+       int ret = 0;
 
-       if (!net_dev)
-               return -EINVAL;
+       if (!net_dev) {
+               ret = -EINVAL;
+               goto exit;
+       }
 
        ndev = net_dev->ndev;
 
@@ -402,14 +426,16 @@ int rndis_filter_receive(struct hv_device *dev,
        if (!net_dev->extension) {
                netdev_err(ndev, "got rndis message but no rndis device - "
                          "dropping this message!\n");
-               return -ENODEV;
+               ret = -ENODEV;
+               goto exit;
        }
 
        rndis_dev = (struct rndis_device *)net_dev->extension;
        if (rndis_dev->state == RNDIS_DEV_UNINITIALIZED) {
                netdev_err(ndev, "got rndis message but rndis device "
                           "uninitialized...dropping this message!\n");
-               return -ENODEV;
+               ret = -ENODEV;
+               goto exit;
        }
 
        rndis_msg = pkt->data;
@@ -441,7 +467,11 @@ int rndis_filter_receive(struct hv_device *dev,
                break;
        }
 
-       return 0;
+exit:
+       if (ret != 0)
+               pkt->status = NVSP_STAT_FAIL;
+
+       return ret;
 }
 
 static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
@@ -641,6 +671,7 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
        if (t == 0) {
                netdev_err(ndev,
                        "timeout before we got a set response...\n");
+               ret = -ETIMEDOUT;
                /*
                 * We can't deallocate the request since we may still receive a
                 * send completion for it.
@@ -678,8 +709,7 @@ static int rndis_filter_init_device(struct rndis_device *dev)
        init = &request->request_msg.msg.init_req;
        init->major_ver = RNDIS_MAJOR_VERSION;
        init->minor_ver = RNDIS_MINOR_VERSION;
-       /* FIXME: Use 1536 - rounded ethernet frame size */
-       init->max_xfer_size = 2048;
+       init->max_xfer_size = 0x4000;
 
        dev->state = RNDIS_DEV_INITIALIZING;
 
similarity index 76%
rename from drivers/ieee802154/Kconfig
rename to drivers/net/ieee802154/Kconfig
index 1fc4eefc20edb680991b4ef2d3e2d4f1caab49b9..08ae4655423a6b8f7deeee92d588fa38f0ec5cf4 100644 (file)
@@ -34,3 +34,14 @@ config IEEE802154_AT86RF230
         depends on IEEE802154_DRIVERS && MAC802154
         tristate "AT86RF230/231 transceiver driver"
         depends on SPI
+
+config IEEE802154_MRF24J40
+       tristate "Microchip MRF24J40 transceiver driver"
+       depends on IEEE802154_DRIVERS && MAC802154
+       depends on SPI
+       ---help---
+         Say Y here to enable the MRF24J20 SPI 802.15.4 wireless
+         controller.
+
+         This driver can also be built as a module. To do so, say M here.
+         the module will be called 'mrf24j40'.
similarity index 74%
rename from drivers/ieee802154/Makefile
rename to drivers/net/ieee802154/Makefile
index 4f4371d3aa7d7f87220fea548651933c1a727333..abb0c08decb0f89a623921bab13d223f4acb56bd 100644 (file)
@@ -1,3 +1,4 @@
 obj-$(CONFIG_IEEE802154_FAKEHARD) += fakehard.o
 obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o
 obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o
+obj-$(CONFIG_IEEE802154_MRF24J40) += mrf24j40.o
similarity index 98%
rename from drivers/ieee802154/at86rf230.c
rename to drivers/net/ieee802154/at86rf230.c
index 5d309408395dcd7175021d4b161cb2f33ad735d1..ba753d87a32f5120a8ebf1a8e8641273259db65f 100644 (file)
@@ -952,17 +952,7 @@ static struct spi_driver at86rf230_driver = {
        .resume     = at86rf230_resume,
 };
 
-static int __init at86rf230_init(void)
-{
-       return spi_register_driver(&at86rf230_driver);
-}
-module_init(at86rf230_init);
-
-static void __exit at86rf230_exit(void)
-{
-       spi_unregister_driver(&at86rf230_driver);
-}
-module_exit(at86rf230_exit);
+module_spi_driver(at86rf230_driver);
 
 MODULE_DESCRIPTION("AT86RF230 Transceiver Driver");
 MODULE_LICENSE("GPL v2");
similarity index 99%
rename from drivers/ieee802154/fakehard.c
rename to drivers/net/ieee802154/fakehard.c
index 73d453159408d62725e81317001889071211f094..7d39add7d467712f8cb21ec9aee4d01cd6606c7f 100644 (file)
@@ -446,4 +446,3 @@ static __exit void fake_exit(void)
 module_init(fake_init);
 module_exit(fake_exit);
 MODULE_LICENSE("GPL");
-
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
new file mode 100644 (file)
index 0000000..0e53d4f
--- /dev/null
@@ -0,0 +1,767 @@
+/*
+ * Driver for Microchip MRF24J40 802.15.4 Wireless-PAN Networking controller
+ *
+ * Copyright (C) 2012 Alan Ott <alan@signal11.us>
+ *                    Signal 11 Software
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <net/wpan-phy.h>
+#include <net/mac802154.h>
+
+/* MRF24J40 Short Address Registers */
+#define REG_RXMCR    0x00  /* Receive MAC control */
+#define REG_PANIDL   0x01  /* PAN ID (low) */
+#define REG_PANIDH   0x02  /* PAN ID (high) */
+#define REG_SADRL    0x03  /* Short address (low) */
+#define REG_SADRH    0x04  /* Short address (high) */
+#define REG_EADR0    0x05  /* Long address (low) (high is EADR7) */
+#define REG_TXMCR    0x11  /* Transmit MAC control */
+#define REG_PACON0   0x16  /* Power Amplifier Control */
+#define REG_PACON1   0x17  /* Power Amplifier Control */
+#define REG_PACON2   0x18  /* Power Amplifier Control */
+#define REG_TXNCON   0x1B  /* Transmit Normal FIFO Control */
+#define REG_TXSTAT   0x24  /* TX MAC Status Register */
+#define REG_SOFTRST  0x2A  /* Soft Reset */
+#define REG_TXSTBL   0x2E  /* TX Stabilization */
+#define REG_INTSTAT  0x31  /* Interrupt Status */
+#define REG_INTCON   0x32  /* Interrupt Control */
+#define REG_RFCTL    0x36  /* RF Control Mode Register */
+#define REG_BBREG1   0x39  /* Baseband Registers */
+#define REG_BBREG2   0x3A  /* */
+#define REG_BBREG6   0x3E  /* */
+#define REG_CCAEDTH  0x3F  /* Energy Detection Threshold */
+
+/* MRF24J40 Long Address Registers */
+#define REG_RFCON0     0x200  /* RF Control Registers */
+#define REG_RFCON1     0x201
+#define REG_RFCON2     0x202
+#define REG_RFCON3     0x203
+#define REG_RFCON5     0x205
+#define REG_RFCON6     0x206
+#define REG_RFCON7     0x207
+#define REG_RFCON8     0x208
+#define REG_RSSI       0x210
+#define REG_SLPCON0    0x211  /* Sleep Clock Control Registers */
+#define REG_SLPCON1    0x220
+#define REG_WAKETIMEL  0x222  /* Wake-up Time Match Value Low */
+#define REG_WAKETIMEH  0x223  /* Wake-up Time Match Value High */
+#define REG_RX_FIFO    0x300  /* Receive FIFO */
+
+/* Device configuration: Only channels 11-26 on page 0 are supported. */
+#define MRF24J40_CHAN_MIN 11
+#define MRF24J40_CHAN_MAX 26
+#define CHANNEL_MASK (((u32)1 << (MRF24J40_CHAN_MAX + 1)) \
+                     - ((u32)1 << MRF24J40_CHAN_MIN))
+
+#define TX_FIFO_SIZE 128 /* From datasheet */
+#define RX_FIFO_SIZE 144 /* From datasheet */
+#define SET_CHANNEL_DELAY_US 192 /* From datasheet */
+
+/* Device Private Data */
+struct mrf24j40 {
+       struct spi_device *spi;
+       struct ieee802154_dev *dev;
+
+       struct mutex buffer_mutex; /* only used to protect buf */
+       struct completion tx_complete;
+       struct work_struct irqwork;
+       u8 *buf; /* 3 bytes. Used for SPI single-register transfers. */
+};
+
+/* Read/Write SPI Commands for Short and Long Address registers. */
+#define MRF24J40_READSHORT(reg) ((reg) << 1)
+#define MRF24J40_WRITESHORT(reg) ((reg) << 1 | 1)
+#define MRF24J40_READLONG(reg) (1 << 15 | (reg) << 5)
+#define MRF24J40_WRITELONG(reg) (1 << 15 | (reg) << 5 | 1 << 4)
+
+/* Maximum speed to run the device at. TODO: Get the real max value from
+ * someone at Microchip since it isn't in the datasheet. */
+#define MAX_SPI_SPEED_HZ 1000000
+
+#define printdev(X) (&X->spi->dev)
+
+static int write_short_reg(struct mrf24j40 *devrec, u8 reg, u8 value)
+{
+       int ret;
+       struct spi_message msg;
+       struct spi_transfer xfer = {
+               .len = 2,
+               .tx_buf = devrec->buf,
+               .rx_buf = devrec->buf,
+       };
+
+       spi_message_init(&msg);
+       spi_message_add_tail(&xfer, &msg);
+
+       mutex_lock(&devrec->buffer_mutex);
+       devrec->buf[0] = MRF24J40_WRITESHORT(reg);
+       devrec->buf[1] = value;
+
+       ret = spi_sync(devrec->spi, &msg);
+       if (ret)
+               dev_err(printdev(devrec),
+                       "SPI write Failed for short register 0x%hhx\n", reg);
+
+       mutex_unlock(&devrec->buffer_mutex);
+       return ret;
+}
+
+static int read_short_reg(struct mrf24j40 *devrec, u8 reg, u8 *val)
+{
+       int ret = -1;
+       struct spi_message msg;
+       struct spi_transfer xfer = {
+               .len = 2,
+               .tx_buf = devrec->buf,
+               .rx_buf = devrec->buf,
+       };
+
+       spi_message_init(&msg);
+       spi_message_add_tail(&xfer, &msg);
+
+       mutex_lock(&devrec->buffer_mutex);
+       devrec->buf[0] = MRF24J40_READSHORT(reg);
+       devrec->buf[1] = 0;
+
+       ret = spi_sync(devrec->spi, &msg);
+       if (ret)
+               dev_err(printdev(devrec),
+                       "SPI read Failed for short register 0x%hhx\n", reg);
+       else
+               *val = devrec->buf[1];
+
+       mutex_unlock(&devrec->buffer_mutex);
+       return ret;
+}
+
+static int read_long_reg(struct mrf24j40 *devrec, u16 reg, u8 *value)
+{
+       int ret;
+       u16 cmd;
+       struct spi_message msg;
+       struct spi_transfer xfer = {
+               .len = 3,
+               .tx_buf = devrec->buf,
+               .rx_buf = devrec->buf,
+       };
+
+       spi_message_init(&msg);
+       spi_message_add_tail(&xfer, &msg);
+
+       cmd = MRF24J40_READLONG(reg);
+       mutex_lock(&devrec->buffer_mutex);
+       devrec->buf[0] = cmd >> 8 & 0xff;
+       devrec->buf[1] = cmd & 0xff;
+       devrec->buf[2] = 0;
+
+       ret = spi_sync(devrec->spi, &msg);
+       if (ret)
+               dev_err(printdev(devrec),
+                       "SPI read Failed for long register 0x%hx\n", reg);
+       else
+               *value = devrec->buf[2];
+
+       mutex_unlock(&devrec->buffer_mutex);
+       return ret;
+}
+
+static int write_long_reg(struct mrf24j40 *devrec, u16 reg, u8 val)
+{
+       int ret;
+       u16 cmd;
+       struct spi_message msg;
+       struct spi_transfer xfer = {
+               .len = 3,
+               .tx_buf = devrec->buf,
+               .rx_buf = devrec->buf,
+       };
+
+       spi_message_init(&msg);
+       spi_message_add_tail(&xfer, &msg);
+
+       cmd = MRF24J40_WRITELONG(reg);
+       mutex_lock(&devrec->buffer_mutex);
+       devrec->buf[0] = cmd >> 8 & 0xff;
+       devrec->buf[1] = cmd & 0xff;
+       devrec->buf[2] = val;
+
+       ret = spi_sync(devrec->spi, &msg);
+       if (ret)
+               dev_err(printdev(devrec),
+                       "SPI write Failed for long register 0x%hx\n", reg);
+
+       mutex_unlock(&devrec->buffer_mutex);
+       return ret;
+}
+
+/* This function relies on an undocumented write method. Once a write command
+   and address is set, as many bytes of data as desired can be clocked into
+   the device. The datasheet only shows setting one byte at a time. */
+static int write_tx_buf(struct mrf24j40 *devrec, u16 reg,
+                       const u8 *data, size_t length)
+{
+       int ret;
+       u16 cmd;
+       u8 lengths[2];
+       struct spi_message msg;
+       struct spi_transfer addr_xfer = {
+               .len = 2,
+               .tx_buf = devrec->buf,
+       };
+       struct spi_transfer lengths_xfer = {
+               .len = 2,
+               .tx_buf = &lengths, /* TODO: Is DMA really required for SPI? */
+       };
+       struct spi_transfer data_xfer = {
+               .len = length,
+               .tx_buf = data,
+       };
+
+       /* Range check the length. 2 bytes are used for the length fields.*/
+       if (length > TX_FIFO_SIZE-2) {
+               dev_err(printdev(devrec), "write_tx_buf() was passed too large a buffer. Performing short write.\n");
+               length = TX_FIFO_SIZE-2;
+       }
+
+       spi_message_init(&msg);
+       spi_message_add_tail(&addr_xfer, &msg);
+       spi_message_add_tail(&lengths_xfer, &msg);
+       spi_message_add_tail(&data_xfer, &msg);
+
+       cmd = MRF24J40_WRITELONG(reg);
+       mutex_lock(&devrec->buffer_mutex);
+       devrec->buf[0] = cmd >> 8 & 0xff;
+       devrec->buf[1] = cmd & 0xff;
+       lengths[0] = 0x0; /* Header Length. Set to 0 for now. TODO */
+       lengths[1] = length; /* Total length */
+
+       ret = spi_sync(devrec->spi, &msg);
+       if (ret)
+               dev_err(printdev(devrec), "SPI write Failed for TX buf\n");
+
+       mutex_unlock(&devrec->buffer_mutex);
+       return ret;
+}
+
+static int mrf24j40_read_rx_buf(struct mrf24j40 *devrec,
+                               u8 *data, u8 *len, u8 *lqi)
+{
+       u8 rx_len;
+       u8 addr[2];
+       u8 lqi_rssi[2];
+       u16 cmd;
+       int ret;
+       struct spi_message msg;
+       struct spi_transfer addr_xfer = {
+               .len = 2,
+               .tx_buf = &addr,
+       };
+       struct spi_transfer data_xfer = {
+               .len = 0x0, /* set below */
+               .rx_buf = data,
+       };
+       struct spi_transfer status_xfer = {
+               .len = 2,
+               .rx_buf = &lqi_rssi,
+       };
+
+       /* Get the length of the data in the RX FIFO. The length in this
+        * register exclues the 1-byte length field at the beginning. */
+       ret = read_long_reg(devrec, REG_RX_FIFO, &rx_len);
+       if (ret)
+               goto out;
+
+       /* Range check the RX FIFO length, accounting for the one-byte
+        * length field at the begining. */
+       if (rx_len > RX_FIFO_SIZE-1) {
+               dev_err(printdev(devrec), "Invalid length read from device. Performing short read.\n");
+               rx_len = RX_FIFO_SIZE-1;
+       }
+
+       if (rx_len > *len) {
+               /* Passed in buffer wasn't big enough. Should never happen. */
+               dev_err(printdev(devrec), "Buffer not big enough. Performing short read\n");
+               rx_len = *len;
+       }
+
+       /* Set up the commands to read the data. */
+       cmd = MRF24J40_READLONG(REG_RX_FIFO+1);
+       addr[0] = cmd >> 8 & 0xff;
+       addr[1] = cmd & 0xff;
+       data_xfer.len = rx_len;
+
+       spi_message_init(&msg);
+       spi_message_add_tail(&addr_xfer, &msg);
+       spi_message_add_tail(&data_xfer, &msg);
+       spi_message_add_tail(&status_xfer, &msg);
+
+       ret = spi_sync(devrec->spi, &msg);
+       if (ret) {
+               dev_err(printdev(devrec), "SPI RX Buffer Read Failed.\n");
+               goto out;
+       }
+
+       *lqi = lqi_rssi[0];
+       *len = rx_len;
+
+#ifdef DEBUG
+       print_hex_dump(KERN_DEBUG, "mrf24j40 rx: ",
+               DUMP_PREFIX_OFFSET, 16, 1, data, *len, 0);
+       printk(KERN_DEBUG "mrf24j40 rx: lqi: %02hhx rssi: %02hhx\n",
+               lqi_rssi[0], lqi_rssi[1]);
+#endif
+
+out:
+       return ret;
+}
+
+static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
+{
+       struct mrf24j40 *devrec = dev->priv;
+       u8 val;
+       int ret = 0;
+
+       dev_dbg(printdev(devrec), "tx packet of %d bytes\n", skb->len);
+
+       ret = write_tx_buf(devrec, 0x000, skb->data, skb->len);
+       if (ret)
+               goto err;
+
+       /* Set TXNTRIG bit of TXNCON to send packet */
+       ret = read_short_reg(devrec, REG_TXNCON, &val);
+       if (ret)
+               goto err;
+       val |= 0x1;
+       val &= ~0x4;
+       write_short_reg(devrec, REG_TXNCON, val);
+
+       INIT_COMPLETION(devrec->tx_complete);
+
+       /* Wait for the device to send the TX complete interrupt. */
+       ret = wait_for_completion_interruptible_timeout(
+                                               &devrec->tx_complete,
+                                               5 * HZ);
+       if (ret == -ERESTARTSYS)
+               goto err;
+       if (ret == 0) {
+               ret = -ETIMEDOUT;
+               goto err;
+       }
+
+       /* Check for send error from the device. */
+       ret = read_short_reg(devrec, REG_TXSTAT, &val);
+       if (ret)
+               goto err;
+       if (val & 0x1) {
+               dev_err(printdev(devrec), "Error Sending. Retry count exceeded\n");
+               ret = -ECOMM; /* TODO: Better error code ? */
+       } else
+               dev_dbg(printdev(devrec), "Packet Sent\n");
+
+err:
+
+       return ret;
+}
+
+static int mrf24j40_ed(struct ieee802154_dev *dev, u8 *level)
+{
+       /* TODO: */
+       printk(KERN_WARNING "mrf24j40: ed not implemented\n");
+       *level = 0;
+       return 0;
+}
+
+static int mrf24j40_start(struct ieee802154_dev *dev)
+{
+       struct mrf24j40 *devrec = dev->priv;
+       u8 val;
+       int ret;
+
+       dev_dbg(printdev(devrec), "start\n");
+
+       ret = read_short_reg(devrec, REG_INTCON, &val);
+       if (ret)
+               return ret;
+       val &= ~(0x1|0x8); /* Clear TXNIE and RXIE. Enable interrupts */
+       write_short_reg(devrec, REG_INTCON, val);
+
+       return 0;
+}
+
+static void mrf24j40_stop(struct ieee802154_dev *dev)
+{
+       struct mrf24j40 *devrec = dev->priv;
+       u8 val;
+       int ret;
+       dev_dbg(printdev(devrec), "stop\n");
+
+       ret = read_short_reg(devrec, REG_INTCON, &val);
+       if (ret)
+               return;
+       val |= 0x1|0x8; /* Set TXNIE and RXIE. Disable Interrupts */
+       write_short_reg(devrec, REG_INTCON, val);
+
+       return;
+}
+
+static int mrf24j40_set_channel(struct ieee802154_dev *dev,
+                               int page, int channel)
+{
+       struct mrf24j40 *devrec = dev->priv;
+       u8 val;
+       int ret;
+
+       dev_dbg(printdev(devrec), "Set Channel %d\n", channel);
+
+       WARN_ON(page != 0);
+       WARN_ON(channel < MRF24J40_CHAN_MIN);
+       WARN_ON(channel > MRF24J40_CHAN_MAX);
+
+       /* Set Channel TODO */
+       val = (channel-11) << 4 | 0x03;
+       write_long_reg(devrec, REG_RFCON0, val);
+
+       /* RF Reset */
+       ret = read_short_reg(devrec, REG_RFCTL, &val);
+       if (ret)
+               return ret;
+       val |= 0x04;
+       write_short_reg(devrec, REG_RFCTL, val);
+       val &= ~0x04;
+       write_short_reg(devrec, REG_RFCTL, val);
+
+       udelay(SET_CHANNEL_DELAY_US); /* per datasheet */
+
+       return 0;
+}
+
+static int mrf24j40_filter(struct ieee802154_dev *dev,
+                          struct ieee802154_hw_addr_filt *filt,
+                          unsigned long changed)
+{
+       struct mrf24j40 *devrec = dev->priv;
+
+       dev_dbg(printdev(devrec), "filter\n");
+
+       if (changed & IEEE802515_AFILT_SADDR_CHANGED) {
+               /* Short Addr */
+               u8 addrh, addrl;
+               addrh = filt->short_addr >> 8 & 0xff;
+               addrl = filt->short_addr & 0xff;
+
+               write_short_reg(devrec, REG_SADRH, addrh);
+               write_short_reg(devrec, REG_SADRL, addrl);
+               dev_dbg(printdev(devrec),
+                       "Set short addr to %04hx\n", filt->short_addr);
+       }
+
+       if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) {
+               /* Device Address */
+               int i;
+               for (i = 0; i < 8; i++)
+                       write_short_reg(devrec, REG_EADR0+i,
+                                       filt->ieee_addr[i]);
+
+#ifdef DEBUG
+               printk(KERN_DEBUG "Set long addr to: ");
+               for (i = 0; i < 8; i++)
+                       printk("%02hhx ", filt->ieee_addr[i]);
+               printk(KERN_DEBUG "\n");
+#endif
+       }
+
+       if (changed & IEEE802515_AFILT_PANID_CHANGED) {
+               /* PAN ID */
+               u8 panidl, panidh;
+               panidh = filt->pan_id >> 8 & 0xff;
+               panidl = filt->pan_id & 0xff;
+               write_short_reg(devrec, REG_PANIDH, panidh);
+               write_short_reg(devrec, REG_PANIDL, panidl);
+
+               dev_dbg(printdev(devrec), "Set PANID to %04hx\n", filt->pan_id);
+       }
+
+       if (changed & IEEE802515_AFILT_PANC_CHANGED) {
+               /* Pan Coordinator */
+               u8 val;
+               int ret;
+
+               ret = read_short_reg(devrec, REG_RXMCR, &val);
+               if (ret)
+                       return ret;
+               if (filt->pan_coord)
+                       val |= 0x8;
+               else
+                       val &= ~0x8;
+               write_short_reg(devrec, REG_RXMCR, val);
+
+               /* REG_SLOTTED is maintained as default (unslotted/CSMA-CA).
+                * REG_ORDER is maintained as default (no beacon/superframe).
+                */
+
+               dev_dbg(printdev(devrec), "Set Pan Coord to %s\n",
+                                       filt->pan_coord ? "on" : "off");
+       }
+
+       return 0;
+}
+
+static int mrf24j40_handle_rx(struct mrf24j40 *devrec)
+{
+       u8 len = RX_FIFO_SIZE;
+       u8 lqi = 0;
+       u8 val;
+       int ret = 0;
+       struct sk_buff *skb;
+
+       /* Turn off reception of packets off the air. This prevents the
+        * device from overwriting the buffer while we're reading it. */
+       ret = read_short_reg(devrec, REG_BBREG1, &val);
+       if (ret)
+               goto out;
+       val |= 4; /* SET RXDECINV */
+       write_short_reg(devrec, REG_BBREG1, val);
+
+       skb = alloc_skb(len, GFP_KERNEL);
+       if (!skb) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       ret = mrf24j40_read_rx_buf(devrec, skb_put(skb, len), &len, &lqi);
+       if (ret < 0) {
+               dev_err(printdev(devrec), "Failure reading RX FIFO\n");
+               kfree_skb(skb);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* Cut off the checksum */
+       skb_trim(skb, len-2);
+
+       /* TODO: Other drivers call ieee20154_rx_irqsafe() here (eg: cc2040,
+        * also from a workqueue).  I think irqsafe is not necessary here.
+        * Can someone confirm? */
+       ieee802154_rx_irqsafe(devrec->dev, skb, lqi);
+
+       dev_dbg(printdev(devrec), "RX Handled\n");
+
+out:
+       /* Turn back on reception of packets off the air. */
+       ret = read_short_reg(devrec, REG_BBREG1, &val);
+       if (ret)
+               return ret;
+       val &= ~0x4; /* Clear RXDECINV */
+       write_short_reg(devrec, REG_BBREG1, val);
+
+       return ret;
+}
+
+static struct ieee802154_ops mrf24j40_ops = {
+       .owner = THIS_MODULE,
+       .xmit = mrf24j40_tx,
+       .ed = mrf24j40_ed,
+       .start = mrf24j40_start,
+       .stop = mrf24j40_stop,
+       .set_channel = mrf24j40_set_channel,
+       .set_hw_addr_filt = mrf24j40_filter,
+};
+
+static irqreturn_t mrf24j40_isr(int irq, void *data)
+{
+       struct mrf24j40 *devrec = data;
+
+       disable_irq_nosync(irq);
+
+       schedule_work(&devrec->irqwork);
+
+       return IRQ_HANDLED;
+}
+
+static void mrf24j40_isrwork(struct work_struct *work)
+{
+       struct mrf24j40 *devrec = container_of(work, struct mrf24j40, irqwork);
+       u8 intstat;
+       int ret;
+
+       /* Read the interrupt status */
+       ret = read_short_reg(devrec, REG_INTSTAT, &intstat);
+       if (ret)
+               goto out;
+
+       /* Check for TX complete */
+       if (intstat & 0x1)
+               complete(&devrec->tx_complete);
+
+       /* Check for Rx */
+       if (intstat & 0x8)
+               mrf24j40_handle_rx(devrec);
+
+out:
+       enable_irq(devrec->spi->irq);
+}
+
+static int __devinit mrf24j40_probe(struct spi_device *spi)
+{
+       int ret = -ENOMEM;
+       u8 val;
+       struct mrf24j40 *devrec;
+
+       printk(KERN_INFO "mrf24j40: probe(). IRQ: %d\n", spi->irq);
+
+       devrec = kzalloc(sizeof(struct mrf24j40), GFP_KERNEL);
+       if (!devrec)
+               goto err_devrec;
+       devrec->buf = kzalloc(3, GFP_KERNEL);
+       if (!devrec->buf)
+               goto err_buf;
+
+       spi->mode = SPI_MODE_0; /* TODO: Is this appropriate for right here? */
+       if (spi->max_speed_hz > MAX_SPI_SPEED_HZ)
+               spi->max_speed_hz = MAX_SPI_SPEED_HZ;
+
+       mutex_init(&devrec->buffer_mutex);
+       init_completion(&devrec->tx_complete);
+       INIT_WORK(&devrec->irqwork, mrf24j40_isrwork);
+       devrec->spi = spi;
+       dev_set_drvdata(&spi->dev, devrec);
+
+       /* Register with the 802154 subsystem */
+
+       devrec->dev = ieee802154_alloc_device(0, &mrf24j40_ops);
+       if (!devrec->dev)
+               goto err_alloc_dev;
+
+       devrec->dev->priv = devrec;
+       devrec->dev->parent = &devrec->spi->dev;
+       devrec->dev->phy->channels_supported[0] = CHANNEL_MASK;
+       devrec->dev->flags = IEEE802154_HW_OMIT_CKSUM|IEEE802154_HW_AACK;
+
+       dev_dbg(printdev(devrec), "registered mrf24j40\n");
+       ret = ieee802154_register_device(devrec->dev);
+       if (ret)
+               goto err_register_device;
+
+       /* Initialize the device.
+               From datasheet section 3.2: Initialization. */
+       write_short_reg(devrec, REG_SOFTRST, 0x07);
+       write_short_reg(devrec, REG_PACON2, 0x98);
+       write_short_reg(devrec, REG_TXSTBL, 0x95);
+       write_long_reg(devrec, REG_RFCON0, 0x03);
+       write_long_reg(devrec, REG_RFCON1, 0x01);
+       write_long_reg(devrec, REG_RFCON2, 0x80);
+       write_long_reg(devrec, REG_RFCON6, 0x90);
+       write_long_reg(devrec, REG_RFCON7, 0x80);
+       write_long_reg(devrec, REG_RFCON8, 0x10);
+       write_long_reg(devrec, REG_SLPCON1, 0x21);
+       write_short_reg(devrec, REG_BBREG2, 0x80);
+       write_short_reg(devrec, REG_CCAEDTH, 0x60);
+       write_short_reg(devrec, REG_BBREG6, 0x40);
+       write_short_reg(devrec, REG_RFCTL, 0x04);
+       write_short_reg(devrec, REG_RFCTL, 0x0);
+       udelay(192);
+
+       /* Set RX Mode. RXMCR<1:0>: 0x0 normal, 0x1 promisc, 0x2 error */
+       ret = read_short_reg(devrec, REG_RXMCR, &val);
+       if (ret)
+               goto err_read_reg;
+       val &= ~0x3; /* Clear RX mode (normal) */
+       write_short_reg(devrec, REG_RXMCR, val);
+
+       ret = request_irq(spi->irq,
+                         mrf24j40_isr,
+                         IRQF_TRIGGER_FALLING,
+                         dev_name(&spi->dev),
+                         devrec);
+
+       if (ret) {
+               dev_err(printdev(devrec), "Unable to get IRQ");
+               goto err_irq;
+       }
+
+       return 0;
+
+err_irq:
+err_read_reg:
+       ieee802154_unregister_device(devrec->dev);
+err_register_device:
+       ieee802154_free_device(devrec->dev);
+err_alloc_dev:
+       kfree(devrec->buf);
+err_buf:
+       kfree(devrec);
+err_devrec:
+       return ret;
+}
+
+static int __devexit mrf24j40_remove(struct spi_device *spi)
+{
+       struct mrf24j40 *devrec = dev_get_drvdata(&spi->dev);
+
+       dev_dbg(printdev(devrec), "remove\n");
+
+       free_irq(spi->irq, devrec);
+       flush_work_sync(&devrec->irqwork); /* TODO: Is this the right call? */
+       ieee802154_unregister_device(devrec->dev);
+       ieee802154_free_device(devrec->dev);
+       /* TODO: Will ieee802154_free_device() wait until ->xmit() is
+        * complete? */
+
+       /* Clean up the SPI stuff. */
+       dev_set_drvdata(&spi->dev, NULL);
+       kfree(devrec->buf);
+       kfree(devrec);
+       return 0;
+}
+
+static const struct spi_device_id mrf24j40_ids[] = {
+       { "mrf24j40", 0 },
+       { "mrf24j40ma", 0 },
+       { },
+};
+MODULE_DEVICE_TABLE(spi, mrf24j40_ids);
+
+static struct spi_driver mrf24j40_driver = {
+       .driver = {
+               .name = "mrf24j40",
+               .bus = &spi_bus_type,
+               .owner = THIS_MODULE,
+       },
+       .id_table = mrf24j40_ids,
+       .probe = mrf24j40_probe,
+       .remove = __devexit_p(mrf24j40_remove),
+};
+
+static int __init mrf24j40_init(void)
+{
+       return spi_register_driver(&mrf24j40_driver);
+}
+
+static void __exit mrf24j40_exit(void)
+{
+       spi_unregister_driver(&mrf24j40_driver);
+}
+
+module_init(mrf24j40_init);
+module_exit(mrf24j40_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alan Ott");
+MODULE_DESCRIPTION("MRF24J40 SPI 802.15.4 Controller Driver");
index e2a06fd996d51409ed711fb360105d7f423f27a8..81f8f9e31db510892acae3cdb39a799de7e21be9 100644 (file)
@@ -157,7 +157,7 @@ static const struct net_device_ops loopback_ops = {
  */
 static void loopback_setup(struct net_device *dev)
 {
-       dev->mtu                = (16 * 1024) + 20 + 20 + 12;
+       dev->mtu                = 64 * 1024;
        dev->hard_header_len    = ETH_HLEN;     /* 14   */
        dev->addr_len           = ETH_ALEN;     /* 6    */
        dev->tx_queue_len       = 0;
@@ -197,6 +197,7 @@ static __net_init int loopback_net_init(struct net *net)
        if (err)
                goto out_free_netdev;
 
+       BUG_ON(dev->ifindex != LOOPBACK_IFINDEX);
        net->loopback_dev = dev;
        return 0;
 
index 66a9bfe7b1c87f40c8da94c34b3245e1ff3fcc0c..68a43fe602e7a89ccffa54d1a5b82e2da4e5330c 100644 (file)
@@ -546,9 +546,9 @@ static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
        return 0;
 }
 
-static int macvlan_fdb_add(struct ndmsg *ndm,
+static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                           struct net_device *dev,
-                          unsigned char *addr,
+                          const unsigned char *addr,
                           u16 flags)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
@@ -567,7 +567,7 @@ static int macvlan_fdb_add(struct ndmsg *ndm,
 
 static int macvlan_fdb_del(struct ndmsg *ndm,
                           struct net_device *dev,
-                          unsigned char *addr)
+                          const unsigned char *addr)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
        int err = -EINVAL;
index 3090dc65a6f131541d055a17cb96e7c8c3e9d06a..983bbf4d5ef6a7437118ac2d21ba2cb66599bc8c 100644 (file)
@@ -159,6 +159,19 @@ config MDIO_BUS_MUX_GPIO
          several child MDIO busses to a parent bus.  Child bus
          selection is under the control of GPIO lines.
 
+config MDIO_BUS_MUX_MMIOREG
+       tristate "Support for MMIO device-controlled MDIO bus multiplexers"
+       depends on OF_MDIO
+       select MDIO_BUS_MUX
+       help
+         This module provides a driver for MDIO bus multiplexers that
+         are controlled via a simple memory-mapped device, like an FPGA.
+         The multiplexer connects one of several child MDIO busses to a
+         parent bus.  Child bus selection is under the control of one of
+         the FPGA's registers.
+
+         Currently, only 8-bit registers are supported.
+
 endif # PHYLIB
 
 config MICREL_KS8995MA
index 6d2dc6c94f2e4bc30c41729b9c9d2cdbfe4de120..426674debae44dfb9967e305ad571f7012010afb 100644 (file)
@@ -28,3 +28,4 @@ obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
 obj-$(CONFIG_AMD_PHY)          += amd.o
 obj-$(CONFIG_MDIO_BUS_MUX)     += mdio-mux.o
 obj-$(CONFIG_MDIO_BUS_MUX_GPIO)        += mdio-mux-gpio.o
+obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
index b0da0226661f76b1c9af768cc1eeb6f21d5e1b61..24e05c43bff872e34e35e1deb4d385884057ae4a 100644 (file)
@@ -980,7 +980,7 @@ static int dp83640_probe(struct phy_device *phydev)
 
        if (choose_this_phy(clock, phydev)) {
                clock->chosen = dp83640;
-               clock->ptp_clock = ptp_clock_register(&clock->caps);
+               clock->ptp_clock = ptp_clock_register(&clock->caps, &phydev->dev);
                if (IS_ERR(clock->ptp_clock)) {
                        err = PTR_ERR(clock->ptp_clock);
                        goto no_register;
index 6d1e3fcc43e237b8076c2be6a2b5c41db5473d38..ec40ba882f612dba77a72b0a4a9e5689c7898ce5 100644 (file)
@@ -122,6 +122,123 @@ static int lxt971_config_intr(struct phy_device *phydev)
        return err;
 }
 
+/*
+ * A2 version of LXT973 chip has an ERRATA: it randomly return the contents
+ * of the previous even register when you read a odd register regularly
+ */
+
+static int lxt973a2_update_link(struct phy_device *phydev)
+{
+       int status;
+       int control;
+       int retry = 8; /* we try 8 times */
+
+       /* Do a fake read */
+       status = phy_read(phydev, MII_BMSR);
+
+       if (status < 0)
+               return status;
+
+       control = phy_read(phydev, MII_BMCR);
+       if (control < 0)
+               return control;
+
+       do {
+               /* Read link and autonegotiation status */
+               status = phy_read(phydev, MII_BMSR);
+       } while (status >= 0 && retry-- && status == control);
+
+       if (status < 0)
+               return status;
+
+       if ((status & BMSR_LSTATUS) == 0)
+               phydev->link = 0;
+       else
+               phydev->link = 1;
+
+       return 0;
+}
+
+int lxt973a2_read_status(struct phy_device *phydev)
+{
+       int adv;
+       int err;
+       int lpa;
+       int lpagb = 0;
+
+       /* Update the link, but return if there was an error */
+       err = lxt973a2_update_link(phydev);
+       if (err)
+               return err;
+
+       if (AUTONEG_ENABLE == phydev->autoneg) {
+               int retry = 1;
+
+               adv = phy_read(phydev, MII_ADVERTISE);
+
+               if (adv < 0)
+                       return adv;
+
+               do {
+                       lpa = phy_read(phydev, MII_LPA);
+
+                       if (lpa < 0)
+                               return lpa;
+
+                       /* If both registers are equal, it is suspect but not
+                       * impossible, hence a new try
+                       */
+               } while (lpa == adv && retry--);
+
+               lpa &= adv;
+
+               phydev->speed = SPEED_10;
+               phydev->duplex = DUPLEX_HALF;
+               phydev->pause = phydev->asym_pause = 0;
+
+               if (lpagb & (LPA_1000FULL | LPA_1000HALF)) {
+                       phydev->speed = SPEED_1000;
+
+                       if (lpagb & LPA_1000FULL)
+                               phydev->duplex = DUPLEX_FULL;
+               } else if (lpa & (LPA_100FULL | LPA_100HALF)) {
+                       phydev->speed = SPEED_100;
+
+                       if (lpa & LPA_100FULL)
+                               phydev->duplex = DUPLEX_FULL;
+               } else {
+                       if (lpa & LPA_10FULL)
+                               phydev->duplex = DUPLEX_FULL;
+               }
+
+               if (phydev->duplex == DUPLEX_FULL) {
+                       phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
+                       phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
+               }
+       } else {
+               int bmcr = phy_read(phydev, MII_BMCR);
+
+               if (bmcr < 0)
+                       return bmcr;
+
+               if (bmcr & BMCR_FULLDPLX)
+                       phydev->duplex = DUPLEX_FULL;
+               else
+                       phydev->duplex = DUPLEX_HALF;
+
+               if (bmcr & BMCR_SPEED1000)
+                       phydev->speed = SPEED_1000;
+               else if (bmcr & BMCR_SPEED100)
+                       phydev->speed = SPEED_100;
+               else
+                       phydev->speed = SPEED_10;
+
+               phydev->pause = phydev->asym_pause = 0;
+       }
+
+       return 0;
+}
+
 static int lxt973_probe(struct phy_device *phydev)
 {
        int val = phy_read(phydev, MII_LXT973_PCR);
@@ -173,6 +290,16 @@ static struct phy_driver lxt97x_driver[] = {
        .ack_interrupt  = lxt971_ack_interrupt,
        .config_intr    = lxt971_config_intr,
        .driver         = { .owner = THIS_MODULE,},
+}, {
+       .phy_id         = 0x00137a10,
+       .name           = "LXT973-A2",
+       .phy_id_mask    = 0xffffffff,
+       .features       = PHY_BASIC_FEATURES,
+       .flags          = 0,
+       .probe          = lxt973_probe,
+       .config_aneg    = lxt973_config_aneg,
+       .read_status    = lxt973a2_read_status,
+       .driver         = { .owner = THIS_MODULE,},
 }, {
        .phy_id         = 0x00137a10,
        .name           = "LXT973",
index 7189adf54bd18eb376cd5fc83745c4c4c0ec130b..899274f2f9b1dd1da0aac0442807b6e42b0cddc2 100644 (file)
 #include <linux/gpio.h>
 #include <linux/mdio-gpio.h>
 
-#ifdef CONFIG_OF_GPIO
 #include <linux/of_gpio.h>
 #include <linux/of_mdio.h>
-#include <linux/of_platform.h>
-#endif
 
 struct mdio_gpio_info {
        struct mdiobb_ctrl ctrl;
        int mdc, mdio;
 };
 
+static void *mdio_gpio_of_get_data(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct mdio_gpio_platform_data *pdata;
+       int ret;
+
+       pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+       if (!pdata)
+               return NULL;
+
+       ret = of_get_gpio(np, 0);
+       if (ret < 0)
+               return NULL;
+
+       pdata->mdc = ret;
+
+       ret = of_get_gpio(np, 1);
+       if (ret < 0)
+               return NULL;
+       pdata->mdio = ret;
+
+       return pdata;
+}
+
 static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
 {
        struct mdio_gpio_info *bitbang =
@@ -162,10 +183,15 @@ static void __devexit mdio_gpio_bus_destroy(struct device *dev)
 
 static int __devinit mdio_gpio_probe(struct platform_device *pdev)
 {
-       struct mdio_gpio_platform_data *pdata = pdev->dev.platform_data;
+       struct mdio_gpio_platform_data *pdata;
        struct mii_bus *new_bus;
        int ret;
 
+       if (pdev->dev.of_node)
+               pdata = mdio_gpio_of_get_data(pdev);
+       else
+               pdata = pdev->dev.platform_data;
+
        if (!pdata)
                return -ENODEV;
 
@@ -173,7 +199,11 @@ static int __devinit mdio_gpio_probe(struct platform_device *pdev)
        if (!new_bus)
                return -ENODEV;
 
-       ret = mdiobus_register(new_bus);
+       if (pdev->dev.of_node)
+               ret = of_mdiobus_register(new_bus, pdev->dev.of_node);
+       else
+               ret = mdiobus_register(new_bus);
+
        if (ret)
                mdio_gpio_bus_deinit(&pdev->dev);
 
@@ -187,112 +217,30 @@ static int __devexit mdio_gpio_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_OF_GPIO
-
-static int __devinit mdio_ofgpio_probe(struct platform_device *ofdev)
-{
-       struct mdio_gpio_platform_data *pdata;
-       struct mii_bus *new_bus;
-       int ret;
-
-       pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
-       if (!pdata)
-               return -ENOMEM;
-
-       ret = of_get_gpio(ofdev->dev.of_node, 0);
-       if (ret < 0)
-               goto out_free;
-       pdata->mdc = ret;
-
-       ret = of_get_gpio(ofdev->dev.of_node, 1);
-       if (ret < 0)
-               goto out_free;
-       pdata->mdio = ret;
-
-       new_bus = mdio_gpio_bus_init(&ofdev->dev, pdata, pdata->mdc);
-       if (!new_bus)
-               goto out_free;
-
-       ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
-       if (ret)
-               mdio_gpio_bus_deinit(&ofdev->dev);
-
-       return ret;
-
-out_free:
-       kfree(pdata);
-       return -ENODEV;
-}
-
-static int __devexit mdio_ofgpio_remove(struct platform_device *ofdev)
-{
-       mdio_gpio_bus_destroy(&ofdev->dev);
-       kfree(ofdev->dev.platform_data);
-
-       return 0;
-}
-
-static struct of_device_id mdio_ofgpio_match[] = {
-       {
-               .compatible = "virtual,mdio-gpio",
-       },
-       {},
-};
-MODULE_DEVICE_TABLE(of, mdio_ofgpio_match);
-
-static struct platform_driver mdio_ofgpio_driver = {
-       .driver = {
-               .name = "mdio-ofgpio",
-               .owner = THIS_MODULE,
-               .of_match_table = mdio_ofgpio_match,
-       },
-       .probe = mdio_ofgpio_probe,
-       .remove = __devexit_p(mdio_ofgpio_remove),
+static struct of_device_id mdio_gpio_of_match[] = {
+       { .compatible = "virtual,mdio-gpio", },
+       { /* sentinel */ }
 };
 
-static inline int __init mdio_ofgpio_init(void)
-{
-       return platform_driver_register(&mdio_ofgpio_driver);
-}
-
-static inline void mdio_ofgpio_exit(void)
-{
-       platform_driver_unregister(&mdio_ofgpio_driver);
-}
-#else
-static inline int __init mdio_ofgpio_init(void) { return 0; }
-static inline void mdio_ofgpio_exit(void) { }
-#endif /* CONFIG_OF_GPIO */
-
 static struct platform_driver mdio_gpio_driver = {
        .probe = mdio_gpio_probe,
        .remove = __devexit_p(mdio_gpio_remove),
        .driver         = {
                .name   = "mdio-gpio",
                .owner  = THIS_MODULE,
+               .of_match_table = mdio_gpio_of_match,
        },
 };
 
 static int __init mdio_gpio_init(void)
 {
-       int ret;
-
-       ret = mdio_ofgpio_init();
-       if (ret)
-               return ret;
-
-       ret = platform_driver_register(&mdio_gpio_driver);
-       if (ret)
-               mdio_ofgpio_exit();
-
-       return ret;
+       return platform_driver_register(&mdio_gpio_driver);
 }
 module_init(mdio_gpio_init);
 
 static void __exit mdio_gpio_exit(void)
 {
        platform_driver_unregister(&mdio_gpio_driver);
-       mdio_ofgpio_exit();
 }
 module_exit(mdio_gpio_exit);
 
diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c
new file mode 100644 (file)
index 0000000..9061ba6
--- /dev/null
@@ -0,0 +1,171 @@
+/*
+ * Simple memory-mapped device MDIO MUX driver
+ *
+ * Author: Timur Tabi <timur@freescale.com>
+ *
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/phy.h>
+#include <linux/mdio-mux.h>
+
+struct mdio_mux_mmioreg_state {
+       void *mux_handle;
+       phys_addr_t phys;
+       uint8_t mask;
+};
+
+/*
+ * MDIO multiplexing switch function
+ *
+ * This function is called by the mdio-mux layer when it thinks the mdio bus
+ * multiplexer needs to switch.
+ *
+ * 'current_child' is the current value of the mux register (masked via
+ * s->mask).
+ *
+ * 'desired_child' is the value of the 'reg' property of the target child MDIO
+ * node.
+ *
+ * The first time this function is called, current_child == -1.
+ *
+ * If current_child == desired_child, then the mux is already set to the
+ * correct bus.
+ */
+static int mdio_mux_mmioreg_switch_fn(int current_child, int desired_child,
+                                     void *data)
+{
+       struct mdio_mux_mmioreg_state *s = data;
+
+       if (current_child ^ desired_child) {
+               void *p = ioremap(s->phys, 1);
+               uint8_t x, y;
+
+               if (!p)
+                       return -ENOMEM;
+
+               x = ioread8(p);
+               y = (x & ~s->mask) | desired_child;
+               if (x != y) {
+                       iowrite8((x & ~s->mask) | desired_child, p);
+                       pr_debug("%s: %02x -> %02x\n", __func__, x, y);
+               }
+
+               iounmap(p);
+       }
+
+       return 0;
+}
+
+static int __devinit mdio_mux_mmioreg_probe(struct platform_device *pdev)
+{
+       struct device_node *np2, *np = pdev->dev.of_node;
+       struct mdio_mux_mmioreg_state *s;
+       struct resource res;
+       const __be32 *iprop;
+       int len, ret;
+
+       dev_dbg(&pdev->dev, "probing node %s\n", np->full_name);
+
+       s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
+       if (!s)
+               return -ENOMEM;
+
+       ret = of_address_to_resource(np, 0, &res);
+       if (ret) {
+               dev_err(&pdev->dev, "could not obtain memory map for node %s\n",
+                       np->full_name);
+               return ret;
+       }
+       s->phys = res.start;
+
+       if (resource_size(&res) != sizeof(uint8_t)) {
+               dev_err(&pdev->dev, "only 8-bit registers are supported\n");
+               return -EINVAL;
+       }
+
+       iprop = of_get_property(np, "mux-mask", &len);
+       if (!iprop || len != sizeof(uint32_t)) {
+               dev_err(&pdev->dev, "missing or invalid mux-mask property\n");
+               return -ENODEV;
+       }
+       if (be32_to_cpup(iprop) > 255) {
+               dev_err(&pdev->dev, "only 8-bit registers are supported\n");
+               return -EINVAL;
+       }
+       s->mask = be32_to_cpup(iprop);
+
+       /*
+        * Verify that the 'reg' property of each child MDIO bus does not
+        * set any bits outside of the 'mask'.
+        */
+       for_each_available_child_of_node(np, np2) {
+               iprop = of_get_property(np2, "reg", &len);
+               if (!iprop || len != sizeof(uint32_t)) {
+                       dev_err(&pdev->dev, "mdio-mux child node %s is "
+                               "missing a 'reg' property\n", np2->full_name);
+                       return -ENODEV;
+               }
+               if (be32_to_cpup(iprop) & ~s->mask) {
+                       dev_err(&pdev->dev, "mdio-mux child node %s has "
+                               "a 'reg' value with unmasked bits\n",
+                               np2->full_name);
+                       return -ENODEV;
+               }
+       }
+
+       ret = mdio_mux_init(&pdev->dev, mdio_mux_mmioreg_switch_fn,
+                           &s->mux_handle, s);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to register mdio-mux bus %s\n",
+                       np->full_name);
+               return ret;
+       }
+
+       pdev->dev.platform_data = s;
+
+       return 0;
+}
+
+static int __devexit mdio_mux_mmioreg_remove(struct platform_device *pdev)
+{
+       struct mdio_mux_mmioreg_state *s = dev_get_platdata(&pdev->dev);
+
+       mdio_mux_uninit(s->mux_handle);
+
+       return 0;
+}
+
+static struct of_device_id mdio_mux_mmioreg_match[] = {
+       {
+               .compatible = "mdio-mux-mmioreg",
+       },
+       {},
+};
+MODULE_DEVICE_TABLE(of, mdio_mux_mmioreg_match);
+
+static struct platform_driver mdio_mux_mmioreg_driver = {
+       .driver = {
+               .name           = "mdio-mux-mmioreg",
+               .owner          = THIS_MODULE,
+               .of_match_table = mdio_mux_mmioreg_match,
+       },
+       .probe          = mdio_mux_mmioreg_probe,
+       .remove         = __devexit_p(mdio_mux_mmioreg_remove),
+};
+
+module_platform_driver(mdio_mux_mmioreg_driver);
+
+MODULE_AUTHOR("Timur Tabi <timur@freescale.com>");
+MODULE_DESCRIPTION("Memory-mapped device MDIO MUX driver");
+MODULE_LICENSE("GPL v2");
index 7ca2ff97c368d6027b5c1a2cb1f94566741616b9..ef9ea924822349217f536ad41d124a791aecd2fb 100644 (file)
@@ -1035,66 +1035,6 @@ static void phy_write_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
        bus->write(bus, addr, MII_MMD_DATA, data);
 }
 
-static u32 phy_eee_to_adv(u16 eee_adv)
-{
-       u32 adv = 0;
-
-       if (eee_adv & MDIO_EEE_100TX)
-               adv |= ADVERTISED_100baseT_Full;
-       if (eee_adv & MDIO_EEE_1000T)
-               adv |= ADVERTISED_1000baseT_Full;
-       if (eee_adv & MDIO_EEE_10GT)
-               adv |= ADVERTISED_10000baseT_Full;
-       if (eee_adv & MDIO_EEE_1000KX)
-               adv |= ADVERTISED_1000baseKX_Full;
-       if (eee_adv & MDIO_EEE_10GKX4)
-               adv |= ADVERTISED_10000baseKX4_Full;
-       if (eee_adv & MDIO_EEE_10GKR)
-               adv |= ADVERTISED_10000baseKR_Full;
-
-       return adv;
-}
-
-static u32 phy_eee_to_supported(u16 eee_caported)
-{
-       u32 supported = 0;
-
-       if (eee_caported & MDIO_EEE_100TX)
-               supported |= SUPPORTED_100baseT_Full;
-       if (eee_caported & MDIO_EEE_1000T)
-               supported |= SUPPORTED_1000baseT_Full;
-       if (eee_caported & MDIO_EEE_10GT)
-               supported |= SUPPORTED_10000baseT_Full;
-       if (eee_caported & MDIO_EEE_1000KX)
-               supported |= SUPPORTED_1000baseKX_Full;
-       if (eee_caported & MDIO_EEE_10GKX4)
-               supported |= SUPPORTED_10000baseKX4_Full;
-       if (eee_caported & MDIO_EEE_10GKR)
-               supported |= SUPPORTED_10000baseKR_Full;
-
-       return supported;
-}
-
-static u16 phy_adv_to_eee(u32 adv)
-{
-       u16 reg = 0;
-
-       if (adv & ADVERTISED_100baseT_Full)
-               reg |= MDIO_EEE_100TX;
-       if (adv & ADVERTISED_1000baseT_Full)
-               reg |= MDIO_EEE_1000T;
-       if (adv & ADVERTISED_10000baseT_Full)
-               reg |= MDIO_EEE_10GT;
-       if (adv & ADVERTISED_1000baseKX_Full)
-               reg |= MDIO_EEE_1000KX;
-       if (adv & ADVERTISED_10000baseKX4_Full)
-               reg |= MDIO_EEE_10GKX4;
-       if (adv & ADVERTISED_10000baseKR_Full)
-               reg |= MDIO_EEE_10GKR;
-
-       return reg;
-}
-
 /**
  * phy_init_eee - init and check the EEE feature
  * @phydev: target phy_device struct
@@ -1132,7 +1072,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
                if (eee_cap < 0)
                        return eee_cap;
 
-               cap = phy_eee_to_supported(eee_cap);
+               cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
                if (!cap)
                        goto eee_exit;
 
@@ -1149,8 +1089,8 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
                if (eee_adv < 0)
                        return eee_adv;
 
-               adv = phy_eee_to_adv(eee_adv);
-               lp = phy_eee_to_adv(eee_lp);
+               adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
+               lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
                idx = phy_find_setting(phydev->speed, phydev->duplex);
                if ((lp & adv & settings[idx].setting))
                        goto eee_exit;
@@ -1210,21 +1150,21 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
                                    MDIO_MMD_PCS, phydev->addr);
        if (val < 0)
                return val;
-       data->supported = phy_eee_to_supported(val);
+       data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
 
        /* Get advertisement EEE */
        val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV,
                                    MDIO_MMD_AN, phydev->addr);
        if (val < 0)
                return val;
-       data->advertised = phy_eee_to_adv(val);
+       data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
 
        /* Get LP advertisement EEE */
        val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE,
                                    MDIO_MMD_AN, phydev->addr);
        if (val < 0)
                return val;
-       data->lp_advertised = phy_eee_to_adv(val);
+       data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
 
        return 0;
 }
@@ -1241,7 +1181,7 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
 {
        int val;
 
-       val = phy_adv_to_eee(data->advertised);
+       val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
        phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
                               phydev->addr, val);
 
index 5c0557222f20b26a10fa450146098b699ff4e98e..eb3f5cefeba3c6ddcbd53a44cb63a759d9b803ef 100644 (file)
@@ -93,6 +93,18 @@ struct ppp_file {
 #define PF_TO_PPP(pf)          PF_TO_X(pf, struct ppp)
 #define PF_TO_CHANNEL(pf)      PF_TO_X(pf, struct channel)
 
+/*
+ * Data structure to hold primary network stats for which
+ * we want to use 64 bit storage.  Other network stats
+ * are stored in dev->stats of the ppp strucute.
+ */
+struct ppp_link_stats {
+       u64 rx_packets;
+       u64 tx_packets;
+       u64 rx_bytes;
+       u64 tx_bytes;
+};
+
 /*
  * Data structure describing one ppp unit.
  * A ppp unit corresponds to a ppp network interface device
@@ -136,6 +148,7 @@ struct ppp {
        unsigned pass_len, active_len;
 #endif /* CONFIG_PPP_FILTER */
        struct net      *ppp_net;       /* the net we belong to */
+       struct ppp_link_stats stats64;  /* 64 bit network stats */
 };
 
 /*
@@ -1021,9 +1034,34 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        return err;
 }
 
+struct rtnl_link_stats64*
+ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
+{
+       struct ppp *ppp = netdev_priv(dev);
+
+       ppp_recv_lock(ppp);
+       stats64->rx_packets = ppp->stats64.rx_packets;
+       stats64->rx_bytes   = ppp->stats64.rx_bytes;
+       ppp_recv_unlock(ppp);
+
+       ppp_xmit_lock(ppp);
+       stats64->tx_packets = ppp->stats64.tx_packets;
+       stats64->tx_bytes   = ppp->stats64.tx_bytes;
+       ppp_xmit_unlock(ppp);
+
+       stats64->rx_errors        = dev->stats.rx_errors;
+       stats64->tx_errors        = dev->stats.tx_errors;
+       stats64->rx_dropped       = dev->stats.rx_dropped;
+       stats64->tx_dropped       = dev->stats.tx_dropped;
+       stats64->rx_length_errors = dev->stats.rx_length_errors;
+
+       return stats64;
+}
+
 static const struct net_device_ops ppp_netdev_ops = {
-       .ndo_start_xmit = ppp_start_xmit,
-       .ndo_do_ioctl   = ppp_net_ioctl,
+       .ndo_start_xmit  = ppp_start_xmit,
+       .ndo_do_ioctl    = ppp_net_ioctl,
+       .ndo_get_stats64 = ppp_get_stats64,
 };
 
 static void ppp_setup(struct net_device *dev)
@@ -1157,8 +1195,8 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
 #endif /* CONFIG_PPP_FILTER */
        }
 
-       ++ppp->dev->stats.tx_packets;
-       ppp->dev->stats.tx_bytes += skb->len - 2;
+       ++ppp->stats64.tx_packets;
+       ppp->stats64.tx_bytes += skb->len - 2;
 
        switch (proto) {
        case PPP_IP:
@@ -1745,8 +1783,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
                break;
        }
 
-       ++ppp->dev->stats.rx_packets;
-       ppp->dev->stats.rx_bytes += skb->len - 2;
+       ++ppp->stats64.rx_packets;
+       ppp->stats64.rx_bytes += skb->len - 2;
 
        npi = proto_to_npindex(proto);
        if (npi < 0) {
@@ -2570,12 +2608,12 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
        struct slcompress *vj = ppp->vj;
 
        memset(st, 0, sizeof(*st));
-       st->p.ppp_ipackets = ppp->dev->stats.rx_packets;
+       st->p.ppp_ipackets = ppp->stats64.rx_packets;
        st->p.ppp_ierrors = ppp->dev->stats.rx_errors;
-       st->p.ppp_ibytes = ppp->dev->stats.rx_bytes;
-       st->p.ppp_opackets = ppp->dev->stats.tx_packets;
+       st->p.ppp_ibytes = ppp->stats64.rx_bytes;
+       st->p.ppp_opackets = ppp->stats64.tx_packets;
        st->p.ppp_oerrors = ppp->dev->stats.tx_errors;
-       st->p.ppp_obytes = ppp->dev->stats.tx_bytes;
+       st->p.ppp_obytes = ppp->stats64.tx_bytes;
        if (!vj)
                return;
        st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed;
index 6a7260b03a1e0a91c2186a913bf134b45e6d1047..6b08bd419fba912b239e5f6ad2c9151922e6649a 100644 (file)
@@ -21,7 +21,7 @@ config NET_TEAM_MODE_BROADCAST
        ---help---
          Basic mode where packets are transmitted always by all suitable ports.
 
-         All added ports are setup to have team's mac address.
+         All added ports are setup to have team's device address.
 
          To compile this team mode as a module, choose M here: the module
          will be called team_mode_broadcast.
@@ -33,7 +33,7 @@ config NET_TEAM_MODE_ROUNDROBIN
          Basic mode where port used for transmitting packets is selected in
          round-robin fashion using packet counter.
 
-         All added ports are setup to have team's mac address.
+         All added ports are setup to have team's device address.
 
          To compile this team mode as a module, choose M here: the module
          will be called team_mode_roundrobin.
index f8cd61f449a4772da5a50a446cd07fc80d5928c8..5c7547c4f802550426c8f9769f9d9ba9a93becf9 100644 (file)
@@ -54,29 +54,29 @@ static struct team_port *team_port_get_rtnl(const struct net_device *dev)
 }
 
 /*
- * Since the ability to change mac address for open port device is tested in
+ * Since the ability to change device address for open port device is tested in
  * team_port_add, this function can be called without control of return value
  */
-static int __set_port_mac(struct net_device *port_dev,
-                         const unsigned char *dev_addr)
+static int __set_port_dev_addr(struct net_device *port_dev,
+                              const unsigned char *dev_addr)
 {
        struct sockaddr addr;
 
-       memcpy(addr.sa_data, dev_addr, ETH_ALEN);
-       addr.sa_family = ARPHRD_ETHER;
+       memcpy(addr.sa_data, dev_addr, port_dev->addr_len);
+       addr.sa_family = port_dev->type;
        return dev_set_mac_address(port_dev, &addr);
 }
 
-static int team_port_set_orig_mac(struct team_port *port)
+static int team_port_set_orig_dev_addr(struct team_port *port)
 {
-       return __set_port_mac(port->dev, port->orig.dev_addr);
+       return __set_port_dev_addr(port->dev, port->orig.dev_addr);
 }
 
-int team_port_set_team_mac(struct team_port *port)
+int team_port_set_team_dev_addr(struct team_port *port)
 {
-       return __set_port_mac(port->dev, port->team->dev->dev_addr);
+       return __set_port_dev_addr(port->dev, port->team->dev->dev_addr);
 }
-EXPORT_SYMBOL(team_port_set_team_mac);
+EXPORT_SYMBOL(team_port_set_team_dev_addr);
 
 static void team_refresh_port_linkup(struct team_port *port)
 {
@@ -658,6 +658,122 @@ static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
 }
 
 
+/*************************************
+ * Multiqueue Tx port select override
+ *************************************/
+
+static int team_queue_override_init(struct team *team)
+{
+       struct list_head *listarr;
+       unsigned int queue_cnt = team->dev->num_tx_queues - 1;
+       unsigned int i;
+
+       if (!queue_cnt)
+               return 0;
+       listarr = kmalloc(sizeof(struct list_head) * queue_cnt, GFP_KERNEL);
+       if (!listarr)
+               return -ENOMEM;
+       team->qom_lists = listarr;
+       for (i = 0; i < queue_cnt; i++)
+               INIT_LIST_HEAD(listarr++);
+       return 0;
+}
+
+static void team_queue_override_fini(struct team *team)
+{
+       kfree(team->qom_lists);
+}
+
+static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id)
+{
+       return &team->qom_lists[queue_id - 1];
+}
+
+/*
+ * note: already called with rcu_read_lock
+ */
+static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
+{
+       struct list_head *qom_list;
+       struct team_port *port;
+
+       if (!team->queue_override_enabled || !skb->queue_mapping)
+               return false;
+       qom_list = __team_get_qom_list(team, skb->queue_mapping);
+       list_for_each_entry_rcu(port, qom_list, qom_list) {
+               if (!team_dev_queue_xmit(team, port, skb))
+                       return true;
+       }
+       return false;
+}
+
+static void __team_queue_override_port_del(struct team *team,
+                                          struct team_port *port)
+{
+       list_del_rcu(&port->qom_list);
+       synchronize_rcu();
+       INIT_LIST_HEAD(&port->qom_list);
+}
+
+static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
+                                                     struct team_port *cur)
+{
+       if (port->priority < cur->priority)
+               return true;
+       if (port->priority > cur->priority)
+               return false;
+       if (port->index < cur->index)
+               return true;
+       return false;
+}
+
+static void __team_queue_override_port_add(struct team *team,
+                                          struct team_port *port)
+{
+       struct team_port *cur;
+       struct list_head *qom_list;
+       struct list_head *node;
+
+       if (!port->queue_id || !team_port_enabled(port))
+               return;
+
+       qom_list = __team_get_qom_list(team, port->queue_id);
+       node = qom_list;
+       list_for_each_entry(cur, qom_list, qom_list) {
+               if (team_queue_override_port_has_gt_prio_than(port, cur))
+                       break;
+               node = &cur->qom_list;
+       }
+       list_add_tail_rcu(&port->qom_list, node);
+}
+
+static void __team_queue_override_enabled_check(struct team *team)
+{
+       struct team_port *port;
+       bool enabled = false;
+
+       list_for_each_entry(port, &team->port_list, list) {
+               if (!list_empty(&port->qom_list)) {
+                       enabled = true;
+                       break;
+               }
+       }
+       if (enabled == team->queue_override_enabled)
+               return;
+       netdev_dbg(team->dev, "%s queue override\n",
+                  enabled ? "Enabling" : "Disabling");
+       team->queue_override_enabled = enabled;
+}
+
+static void team_queue_override_port_refresh(struct team *team,
+                                            struct team_port *port)
+{
+       __team_queue_override_port_del(team, port);
+       __team_queue_override_port_add(team, port);
+       __team_queue_override_enabled_check(team);
+}
+
+
 /****************
  * Port handling
  ****************/
@@ -688,6 +804,7 @@ static void team_port_enable(struct team *team,
        hlist_add_head_rcu(&port->hlist,
                           team_port_index_hash(team, port->index));
        team_adjust_ops(team);
+       team_queue_override_port_refresh(team, port);
        if (team->ops.port_enabled)
                team->ops.port_enabled(team, port);
 }
@@ -716,6 +833,7 @@ static void team_port_disable(struct team *team,
        hlist_del_rcu(&port->hlist);
        __reconstruct_port_hlist(team, port->index);
        port->index = -1;
+       team_queue_override_port_refresh(team, port);
        __team_adjust_ops(team, team->en_port_count - 1);
        /*
         * Wait until readers see adjusted ops. This ensures that
@@ -849,6 +967,8 @@ static struct netpoll_info *team_netpoll_info(struct team *team)
 #endif
 
 static void __team_port_change_port_added(struct team_port *port, bool linkup);
+static int team_dev_type_check_change(struct net_device *dev,
+                                     struct net_device *port_dev);
 
 static int team_port_add(struct team *team, struct net_device *port_dev)
 {
@@ -857,9 +977,8 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
        char *portname = port_dev->name;
        int err;
 
-       if (port_dev->flags & IFF_LOOPBACK ||
-           port_dev->type != ARPHRD_ETHER) {
-               netdev_err(dev, "Device %s is of an unsupported type\n",
+       if (port_dev->flags & IFF_LOOPBACK) {
+               netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
                           portname);
                return -EINVAL;
        }
@@ -870,6 +989,17 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
                return -EBUSY;
        }
 
+       if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
+           vlan_uses_dev(dev)) {
+               netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
+                          portname);
+               return -EPERM;
+       }
+
+       err = team_dev_type_check_change(dev, port_dev);
+       if (err)
+               return err;
+
        if (port_dev->flags & IFF_UP) {
                netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
                           portname);
@@ -883,6 +1013,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
 
        port->dev = port_dev;
        port->team = team;
+       INIT_LIST_HEAD(&port->qom_list);
 
        port->orig.mtu = port_dev->mtu;
        err = dev_set_mtu(port_dev, dev->mtu);
@@ -891,7 +1022,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
                goto err_set_mtu;
        }
 
-       memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN);
+       memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
 
        err = team_port_enter(team, port);
        if (err) {
@@ -972,7 +1103,7 @@ err_vids_add:
 
 err_dev_open:
        team_port_leave(team, port);
-       team_port_set_orig_mac(port);
+       team_port_set_orig_dev_addr(port);
 
 err_port_enter:
        dev_set_mtu(port_dev, port->orig.mtu);
@@ -1010,7 +1141,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
        vlan_vids_del_by_dev(port_dev, dev);
        dev_close(port_dev);
        team_port_leave(team, port);
-       team_port_set_orig_mac(port);
+       team_port_set_orig_dev_addr(port);
        dev_set_mtu(port_dev, port->orig.mtu);
        synchronize_rcu();
        kfree(port);
@@ -1095,6 +1226,49 @@ static int team_user_linkup_en_option_set(struct team *team,
        return 0;
 }
 
+static int team_priority_option_get(struct team *team,
+                                   struct team_gsetter_ctx *ctx)
+{
+       struct team_port *port = ctx->info->port;
+
+       ctx->data.s32_val = port->priority;
+       return 0;
+}
+
+static int team_priority_option_set(struct team *team,
+                                   struct team_gsetter_ctx *ctx)
+{
+       struct team_port *port = ctx->info->port;
+
+       port->priority = ctx->data.s32_val;
+       team_queue_override_port_refresh(team, port);
+       return 0;
+}
+
+static int team_queue_id_option_get(struct team *team,
+                                   struct team_gsetter_ctx *ctx)
+{
+       struct team_port *port = ctx->info->port;
+
+       ctx->data.u32_val = port->queue_id;
+       return 0;
+}
+
+static int team_queue_id_option_set(struct team *team,
+                                   struct team_gsetter_ctx *ctx)
+{
+       struct team_port *port = ctx->info->port;
+
+       if (port->queue_id == ctx->data.u32_val)
+               return 0;
+       if (ctx->data.u32_val >= team->dev->real_num_tx_queues)
+               return -EINVAL;
+       port->queue_id = ctx->data.u32_val;
+       team_queue_override_port_refresh(team, port);
+       return 0;
+}
+
+
 static const struct team_option team_options[] = {
        {
                .name = "mode",
@@ -1123,6 +1297,20 @@ static const struct team_option team_options[] = {
                .getter = team_user_linkup_en_option_get,
                .setter = team_user_linkup_en_option_set,
        },
+       {
+               .name = "priority",
+               .type = TEAM_OPTION_TYPE_S32,
+               .per_port = true,
+               .getter = team_priority_option_get,
+               .setter = team_priority_option_set,
+       },
+       {
+               .name = "queue_id",
+               .type = TEAM_OPTION_TYPE_U32,
+               .per_port = true,
+               .getter = team_queue_id_option_get,
+               .setter = team_queue_id_option_set,
+       },
 };
 
 static struct lock_class_key team_netdev_xmit_lock_key;
@@ -1158,6 +1346,9 @@ static int team_init(struct net_device *dev)
        for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
                INIT_HLIST_HEAD(&team->en_port_hlist[i]);
        INIT_LIST_HEAD(&team->port_list);
+       err = team_queue_override_init(team);
+       if (err)
+               goto err_team_queue_override_init;
 
        team_adjust_ops(team);
 
@@ -1173,6 +1364,8 @@ static int team_init(struct net_device *dev)
        return 0;
 
 err_options_register:
+       team_queue_override_fini(team);
+err_team_queue_override_init:
        free_percpu(team->pcpu_stats);
 
        return err;
@@ -1190,6 +1383,7 @@ static void team_uninit(struct net_device *dev)
 
        __team_change_mode(team, NULL); /* cleanup */
        __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
+       team_queue_override_fini(team);
        mutex_unlock(&team->lock);
 }
 
@@ -1219,10 +1413,12 @@ static int team_close(struct net_device *dev)
 static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct team *team = netdev_priv(dev);
-       bool tx_success = false;
+       bool tx_success;
        unsigned int len = skb->len;
 
-       tx_success = team->ops.transmit(team, skb);
+       tx_success = team_queue_override_transmit(team, skb);
+       if (!tx_success)
+               tx_success = team->ops.transmit(team, skb);
        if (tx_success) {
                struct team_pcpu_stats *pcpu_stats;
 
@@ -1296,17 +1492,18 @@ static void team_set_rx_mode(struct net_device *dev)
 
 static int team_set_mac_address(struct net_device *dev, void *p)
 {
+       struct sockaddr *addr = p;
        struct team *team = netdev_priv(dev);
        struct team_port *port;
-       int err;
 
-       err = eth_mac_addr(dev, p);
-       if (err)
-               return err;
+       if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+       memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+       dev->addr_assign_type &= ~NET_ADDR_RANDOM;
        rcu_read_lock();
        list_for_each_entry_rcu(port, &team->port_list, list)
-               if (team->ops.port_change_mac)
-                       team->ops.port_change_mac(team, port);
+               if (team->ops.port_change_dev_addr)
+                       team->ops.port_change_dev_addr(team, port);
        rcu_read_unlock();
        return 0;
 }
@@ -1537,6 +1734,45 @@ static const struct net_device_ops team_netdev_ops = {
  * rt netlink interface
  ***********************/
 
+static void team_setup_by_port(struct net_device *dev,
+                              struct net_device *port_dev)
+{
+       dev->header_ops = port_dev->header_ops;
+       dev->type = port_dev->type;
+       dev->hard_header_len = port_dev->hard_header_len;
+       dev->addr_len = port_dev->addr_len;
+       dev->mtu = port_dev->mtu;
+       memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
+       memcpy(dev->dev_addr, port_dev->dev_addr, port_dev->addr_len);
+       dev->addr_assign_type &= ~NET_ADDR_RANDOM;
+}
+
+static int team_dev_type_check_change(struct net_device *dev,
+                                     struct net_device *port_dev)
+{
+       struct team *team = netdev_priv(dev);
+       char *portname = port_dev->name;
+       int err;
+
+       if (dev->type == port_dev->type)
+               return 0;
+       if (!list_empty(&team->port_list)) {
+               netdev_err(dev, "Device %s is of different type\n", portname);
+               return -EBUSY;
+       }
+       err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev);
+       err = notifier_to_errno(err);
+       if (err) {
+               netdev_err(dev, "Refused to change device type\n");
+               return err;
+       }
+       dev_uc_flush(dev);
+       dev_mc_flush(dev);
+       team_setup_by_port(dev, port_dev);
+       call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
+       return 0;
+}
+
 static void team_setup(struct net_device *dev)
 {
        ether_setup(dev);
@@ -1651,7 +1887,7 @@ static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
        if (!msg)
                return -ENOMEM;
 
-       hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
+       hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
                          &team_nl_family, 0, TEAM_CMD_NOOP);
        if (!hdr) {
                err = -EMSGSIZE;
@@ -1660,7 +1896,7 @@ static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
 
        genlmsg_end(msg, hdr);
 
-       return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
+       return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
 
 err_msg_put:
        nlmsg_free(msg);
@@ -1717,7 +1953,7 @@ static int team_nl_send_generic(struct genl_info *info, struct team *team,
        if (err < 0)
                goto err_fill;
 
-       err = genlmsg_unicast(genl_info_net(info), skb, info->snd_pid);
+       err = genlmsg_unicast(genl_info_net(info), skb, info->snd_portid);
        return err;
 
 err_fill:
@@ -1726,11 +1962,11 @@ err_fill:
 }
 
 typedef int team_nl_send_func_t(struct sk_buff *skb,
-                               struct team *team, u32 pid);
+                               struct team *team, u32 portid);
 
-static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 pid)
+static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 portid)
 {
-       return genlmsg_unicast(dev_net(team->dev), skb, pid);
+       return genlmsg_unicast(dev_net(team->dev), skb, portid);
 }
 
 static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
@@ -1790,6 +2026,12 @@ static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
                    nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
                        goto nest_cancel;
                break;
+       case TEAM_OPTION_TYPE_S32:
+               if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32))
+                       goto nest_cancel;
+               if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val))
+                       goto nest_cancel;
+               break;
        default:
                BUG();
        }
@@ -1809,13 +2051,13 @@ nest_cancel:
 }
 
 static int __send_and_alloc_skb(struct sk_buff **pskb,
-                               struct team *team, u32 pid,
+                               struct team *team, u32 portid,
                                team_nl_send_func_t *send_func)
 {
        int err;
 
        if (*pskb) {
-               err = send_func(*pskb, team, pid);
+               err = send_func(*pskb, team, portid);
                if (err)
                        return err;
        }
@@ -1825,7 +2067,7 @@ static int __send_and_alloc_skb(struct sk_buff **pskb,
        return 0;
 }
 
-static int team_nl_send_options_get(struct team *team, u32 pid, u32 seq,
+static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq,
                                    int flags, team_nl_send_func_t *send_func,
                                    struct list_head *sel_opt_inst_list)
 {
@@ -1842,11 +2084,11 @@ static int team_nl_send_options_get(struct team *team, u32 pid, u32 seq,
                                    struct team_option_inst, tmp_list);
 
 start_again:
-       err = __send_and_alloc_skb(&skb, team, pid, send_func);
+       err = __send_and_alloc_skb(&skb, team, portid, send_func);
        if (err)
                return err;
 
-       hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags | NLM_F_MULTI,
+       hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
                          TEAM_CMD_OPTIONS_GET);
        if (!hdr)
                return -EMSGSIZE;
@@ -1879,15 +2121,15 @@ start_again:
                goto start_again;
 
 send_done:
-       nlh = nlmsg_put(skb, pid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
+       nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
        if (!nlh) {
-               err = __send_and_alloc_skb(&skb, team, pid, send_func);
+               err = __send_and_alloc_skb(&skb, team, portid, send_func);
                if (err)
                        goto errout;
                goto send_done;
        }
 
-       return send_func(skb, team, pid);
+       return send_func(skb, team, portid);
 
 nla_put_failure:
        err = -EMSGSIZE;
@@ -1910,7 +2152,7 @@ static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
 
        list_for_each_entry(opt_inst, &team->option_inst_list, list)
                list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
-       err = team_nl_send_options_get(team, info->snd_pid, info->snd_seq,
+       err = team_nl_send_options_get(team, info->snd_portid, info->snd_seq,
                                       NLM_F_ACK, team_nl_send_unicast,
                                       &sel_opt_inst_list);
 
@@ -1978,6 +2220,9 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
                case NLA_FLAG:
                        opt_type = TEAM_OPTION_TYPE_BOOL;
                        break;
+               case NLA_S32:
+                       opt_type = TEAM_OPTION_TYPE_S32;
+                       break;
                default:
                        goto team_put;
                }
@@ -2034,6 +2279,9 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
                        case TEAM_OPTION_TYPE_BOOL:
                                ctx.data.bool_val = attr_data ? true : false;
                                break;
+                       case TEAM_OPTION_TYPE_S32:
+                               ctx.data.s32_val = nla_get_s32(attr_data);
+                               break;
                        default:
                                BUG();
                        }
@@ -2058,7 +2306,7 @@ team_put:
 }
 
 static int team_nl_fill_port_list_get(struct sk_buff *skb,
-                                     u32 pid, u32 seq, int flags,
+                                     u32 portid, u32 seq, int flags,
                                      struct team *team,
                                      bool fillall)
 {
@@ -2066,7 +2314,7 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb,
        void *hdr;
        struct team_port *port;
 
-       hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
+       hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags,
                          TEAM_CMD_PORT_LIST_GET);
        if (!hdr)
                return -EMSGSIZE;
@@ -2115,7 +2363,7 @@ static int team_nl_fill_port_list_get_all(struct sk_buff *skb,
                                          struct genl_info *info, int flags,
                                          struct team *team)
 {
-       return team_nl_fill_port_list_get(skb, info->snd_pid,
+       return team_nl_fill_port_list_get(skb, info->snd_portid,
                                          info->snd_seq, NLM_F_ACK,
                                          team, true);
 }
@@ -2168,7 +2416,7 @@ static struct genl_multicast_group team_change_event_mcgrp = {
 };
 
 static int team_nl_send_multicast(struct sk_buff *skb,
-                                 struct team *team, u32 pid)
+                                 struct team *team, u32 portid)
 {
        return genlmsg_multicast_netns(dev_net(team->dev), skb, 0,
                                       team_change_event_mcgrp.id, GFP_KERNEL);
@@ -2246,7 +2494,7 @@ static void __team_options_change_check(struct team *team)
                        list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
        }
        err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
-       if (err)
+       if (err && err != -ESRCH)
                netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
                            err);
 }
@@ -2275,9 +2523,9 @@ static void __team_port_change_send(struct team_port *port, bool linkup)
 
 send_event:
        err = team_nl_send_event_port_list_get(port->team);
-       if (err)
-               netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n",
-                           port->dev->name);
+       if (err && err != -ESRCH)
+               netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n",
+                           port->dev->name, err);
 
 }
 
index c96e4d2967f01e1588d5b44d07ba27ceae6853d6..9db0171e93669f483eeae19791ed4e090c333bb0 100644 (file)
@@ -48,18 +48,18 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb)
 
 static int bc_port_enter(struct team *team, struct team_port *port)
 {
-       return team_port_set_team_mac(port);
+       return team_port_set_team_dev_addr(port);
 }
 
-static void bc_port_change_mac(struct team *team, struct team_port *port)
+static void bc_port_change_dev_addr(struct team *team, struct team_port *port)
 {
-       team_port_set_team_mac(port);
+       team_port_set_team_dev_addr(port);
 }
 
 static const struct team_mode_ops bc_mode_ops = {
        .transmit               = bc_transmit,
        .port_enter             = bc_port_enter,
-       .port_change_mac        = bc_port_change_mac,
+       .port_change_dev_addr   = bc_port_change_dev_addr,
 };
 
 static const struct team_mode bc_mode = {
index ad7ed0ec544c436fe8fa7df1b3a4360ae011d1f0..105135aa8f0586844927b641951087110a834cc1 100644 (file)
@@ -66,18 +66,18 @@ drop:
 
 static int rr_port_enter(struct team *team, struct team_port *port)
 {
-       return team_port_set_team_mac(port);
+       return team_port_set_team_dev_addr(port);
 }
 
-static void rr_port_change_mac(struct team *team, struct team_port *port)
+static void rr_port_change_dev_addr(struct team *team, struct team_port *port)
 {
-       team_port_set_team_mac(port);
+       team_port_set_team_dev_addr(port);
 }
 
 static const struct team_mode_ops rr_mode_ops = {
        .transmit               = rr_transmit,
        .port_enter             = rr_port_enter,
-       .port_change_mac        = rr_port_change_mac,
+       .port_change_dev_addr   = rr_port_change_dev_addr,
 };
 
 static const struct team_mode rr_mode = {
index 32e31c5c5dc6bfe95b4d38e377a508448d85c890..33ab824773c5a795dc49cc91fd28e67bb3e501a5 100644 (file)
@@ -221,7 +221,8 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
        /* Get the MAC address */
        ret = asix_read_cmd(dev, AX88172_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf);
        if (ret < 0) {
-               dbg("read AX_CMD_READ_NODE_ID failed: %d", ret);
+               netdev_dbg(dev->net, "read AX_CMD_READ_NODE_ID failed: %d\n",
+                          ret);
                goto out;
        }
        memcpy(dev->net->dev_addr, buf, ETH_ALEN);
@@ -303,7 +304,7 @@ static int ax88772_reset(struct usbnet *dev)
 
        ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
        if (ret < 0) {
-               dbg("Select PHY #1 failed: %d", ret);
+               netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
                goto out;
        }
 
@@ -331,13 +332,13 @@ static int ax88772_reset(struct usbnet *dev)
 
        msleep(150);
        rx_ctl = asix_read_rx_ctl(dev);
-       dbg("RX_CTL is 0x%04x after software reset", rx_ctl);
+       netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl);
        ret = asix_write_rx_ctl(dev, 0x0000);
        if (ret < 0)
                goto out;
 
        rx_ctl = asix_read_rx_ctl(dev);
-       dbg("RX_CTL is 0x%04x setting to 0x0000", rx_ctl);
+       netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl);
 
        ret = asix_sw_reset(dev, AX_SWRESET_PRL);
        if (ret < 0)
@@ -364,7 +365,7 @@ static int ax88772_reset(struct usbnet *dev)
                                AX88772_IPG0_DEFAULT | AX88772_IPG1_DEFAULT,
                                AX88772_IPG2_DEFAULT, 0, NULL);
        if (ret < 0) {
-               dbg("Write IPG,IPG1,IPG2 failed: %d", ret);
+               netdev_dbg(dev->net, "Write IPG,IPG1,IPG2 failed: %d\n", ret);
                goto out;
        }
 
@@ -381,10 +382,13 @@ static int ax88772_reset(struct usbnet *dev)
                goto out;
 
        rx_ctl = asix_read_rx_ctl(dev);
-       dbg("RX_CTL is 0x%04x after all initializations", rx_ctl);
+       netdev_dbg(dev->net, "RX_CTL is 0x%04x after all initializations\n",
+                  rx_ctl);
 
        rx_ctl = asix_read_medium_status(dev);
-       dbg("Medium Status is 0x%04x after all initializations", rx_ctl);
+       netdev_dbg(dev->net,
+                  "Medium Status is 0x%04x after all initializations\n",
+                  rx_ctl);
 
        return 0;
 
@@ -416,7 +420,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
        /* Get the MAC address */
        ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf);
        if (ret < 0) {
-               dbg("Failed to read MAC address: %d", ret);
+               netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
                return ret;
        }
        memcpy(dev->net->dev_addr, buf, ETH_ALEN);
@@ -439,7 +443,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
        /* Reset the PHY to normal operation mode */
        ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
        if (ret < 0) {
-               dbg("Select PHY #1 failed: %d", ret);
+               netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
                return ret;
        }
 
@@ -459,7 +463,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
 
        /* Read PHYID register *AFTER* the PHY was reset properly */
        phyid = asix_get_phyid(dev);
-       dbg("PHYID=0x%08x", phyid);
+       netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid);
 
        /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
        if (dev->driver_info->flags & FLAG_FRAMING_AX) {
@@ -575,13 +579,13 @@ static int ax88178_reset(struct usbnet *dev)
        u32 phyid;
 
        asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status);
-       dbg("GPIO Status: 0x%04x", status);
+       netdev_dbg(dev->net, "GPIO Status: 0x%04x\n", status);
 
        asix_write_cmd(dev, AX_CMD_WRITE_ENABLE, 0, 0, 0, NULL);
        asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x0017, 0, 2, &eeprom);
        asix_write_cmd(dev, AX_CMD_WRITE_DISABLE, 0, 0, 0, NULL);
 
-       dbg("EEPROM index 0x17 is 0x%04x", eeprom);
+       netdev_dbg(dev->net, "EEPROM index 0x17 is 0x%04x\n", eeprom);
 
        if (eeprom == cpu_to_le16(0xffff)) {
                data->phymode = PHY_MODE_MARVELL;
@@ -592,7 +596,7 @@ static int ax88178_reset(struct usbnet *dev)
                data->ledmode = le16_to_cpu(eeprom) >> 8;
                gpio0 = (le16_to_cpu(eeprom) & 0x80) ? 0 : 1;
        }
-       dbg("GPIO0: %d, PhyMode: %d", gpio0, data->phymode);
+       netdev_dbg(dev->net, "GPIO0: %d, PhyMode: %d\n", gpio0, data->phymode);
 
        /* Power up external GigaPHY through AX88178 GPIO pin */
        asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_1 | AX_GPIO_GPO1EN, 40);
@@ -601,14 +605,14 @@ static int ax88178_reset(struct usbnet *dev)
                asix_write_gpio(dev, 0x001c, 300);
                asix_write_gpio(dev, 0x003c, 30);
        } else {
-               dbg("gpio phymode == 1 path");
+               netdev_dbg(dev->net, "gpio phymode == 1 path\n");
                asix_write_gpio(dev, AX_GPIO_GPO1EN, 30);
                asix_write_gpio(dev, AX_GPIO_GPO1EN | AX_GPIO_GPO_1, 30);
        }
 
        /* Read PHYID register *AFTER* powering up PHY */
        phyid = asix_get_phyid(dev);
-       dbg("PHYID=0x%08x", phyid);
+       netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid);
 
        /* Set AX88178 to enable MII/GMII/RGMII interface for external PHY */
        asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, 0, 0, 0, NULL);
@@ -770,7 +774,7 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
        /* Get the MAC address */
        ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf);
        if (ret < 0) {
-               dbg("Failed to read MAC address: %d", ret);
+               netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
                return ret;
        }
        memcpy(dev->net->dev_addr, buf, ETH_ALEN);
@@ -929,6 +933,10 @@ static const struct usb_device_id  products [] = {
        // JVC MP-PRX1 Port Replicator
        USB_DEVICE (0x04f1, 0x3008),
        .driver_info = (unsigned long) &ax8817x_info,
+}, {
+       // Lenovo U2L100P 10/100
+       USB_DEVICE (0x17ef, 0x7203),
+       .driver_info = (unsigned long) &ax88772_info,
 }, {
        // ASIX AX88772B 10/100
        USB_DEVICE (0x0b95, 0x772b),
index 26c5bebd9ecab4c39c23044bbc45d0adc84a5897..18d9579123ea82a2b649e0f2bdcdaa6a9f2ca80c 100644 (file)
@@ -236,7 +236,8 @@ static void catc_rx_done(struct urb *urb)
        }
 
        if (status) {
-               dbg("rx_done, status %d, length %d", status, urb->actual_length);
+               dev_dbg(&urb->dev->dev, "rx_done, status %d, length %d\n",
+                       status, urb->actual_length);
                return;
        }
 
@@ -275,10 +276,11 @@ static void catc_rx_done(struct urb *urb)
                if (atomic_read(&catc->recq_sz)) {
                        int state;
                        atomic_dec(&catc->recq_sz);
-                       dbg("getting extra packet");
+                       netdev_dbg(catc->netdev, "getting extra packet\n");
                        urb->dev = catc->usbdev;
                        if ((state = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
-                               dbg("submit(rx_urb) status %d", state);
+                               netdev_dbg(catc->netdev,
+                                          "submit(rx_urb) status %d\n", state);
                        }
                } else {
                        clear_bit(RX_RUNNING, &catc->flags);
@@ -317,18 +319,20 @@ static void catc_irq_done(struct urb *urb)
                return;
        /* -EPIPE:  should clear the halt */
        default:                /* error */
-               dbg("irq_done, status %d, data %02x %02x.", status, data[0], data[1]);
+               dev_dbg(&urb->dev->dev,
+                       "irq_done, status %d, data %02x %02x.\n",
+                       status, data[0], data[1]);
                goto resubmit;
        }
 
        if (linksts == LinkGood) {
                netif_carrier_on(catc->netdev);
-               dbg("link ok");
+               netdev_dbg(catc->netdev, "link ok\n");
        }
 
        if (linksts == LinkBad) {
                netif_carrier_off(catc->netdev);
-               dbg("link bad");
+               netdev_dbg(catc->netdev, "link bad\n");
        }
 
        if (hasdata) {
@@ -385,7 +389,7 @@ static void catc_tx_done(struct urb *urb)
        int r, status = urb->status;
 
        if (status == -ECONNRESET) {
-               dbg("Tx Reset.");
+               dev_dbg(&urb->dev->dev, "Tx Reset.\n");
                urb->status = 0;
                catc->netdev->trans_start = jiffies;
                catc->netdev->stats.tx_errors++;
@@ -395,7 +399,8 @@ static void catc_tx_done(struct urb *urb)
        }
 
        if (status) {
-               dbg("tx_done, status %d, length %d", status, urb->actual_length);
+               dev_dbg(&urb->dev->dev, "tx_done, status %d, length %d\n",
+                       status, urb->actual_length);
                return;
        }
 
@@ -511,7 +516,8 @@ static void catc_ctrl_done(struct urb *urb)
        int status = urb->status;
 
        if (status)
-               dbg("ctrl_done, status %d, len %d.", status, urb->actual_length);
+               dev_dbg(&urb->dev->dev, "ctrl_done, status %d, len %d.\n",
+                       status, urb->actual_length);
 
        spin_lock_irqsave(&catc->ctrl_lock, flags);
 
@@ -667,7 +673,9 @@ static void catc_set_multicast_list(struct net_device *netdev)
                f5u011_mchash_async(catc, catc->multicast);
                if (catc->rxmode[0] != rx) {
                        catc->rxmode[0] = rx;
-                       dbg("Setting RX mode to %2.2X %2.2X", catc->rxmode[0], catc->rxmode[1]);
+                       netdev_dbg(catc->netdev,
+                                  "Setting RX mode to %2.2X %2.2X\n",
+                                  catc->rxmode[0], catc->rxmode[1]);
                        f5u011_rxmode_async(catc, catc->rxmode);
                }
        }
@@ -766,6 +774,7 @@ static const struct net_device_ops catc_netdev_ops = {
 
 static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id)
 {
+       struct device *dev = &intf->dev;
        struct usb_device *usbdev = interface_to_usbdev(intf);
        struct net_device *netdev;
        struct catc *catc;
@@ -774,7 +783,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
 
        if (usb_set_interface(usbdev,
                        intf->altsetting->desc.bInterfaceNumber, 1)) {
-                dev_err(&intf->dev, "Can't set altsetting 1.\n");
+               dev_err(dev, "Can't set altsetting 1.\n");
                return -EIO;
        }
 
@@ -817,7 +826,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
        if (le16_to_cpu(usbdev->descriptor.idVendor) == 0x0423 && 
            le16_to_cpu(usbdev->descriptor.idProduct) == 0xa &&
            le16_to_cpu(catc->usbdev->descriptor.bcdDevice) == 0x0130) {
-               dbg("Testing for f5u011");
+               dev_dbg(dev, "Testing for f5u011\n");
                catc->is_f5u011 = 1;            
                atomic_set(&catc->recq_sz, 0);
                pktsz = RX_PKT_SZ;
@@ -838,7 +847,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
                 catc->irq_buf, 2, catc_irq_done, catc, 1);
 
        if (!catc->is_f5u011) {
-               dbg("Checking memory size\n");
+               dev_dbg(dev, "Checking memory size\n");
 
                i = 0x12345678;
                catc_write_mem(catc, 0x7a80, &i, 4);
@@ -850,7 +859,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
                case 0x12345678:
                        catc_set_reg(catc, TxBufCount, 8);
                        catc_set_reg(catc, RxBufCount, 32);
-                       dbg("64k Memory\n");
+                       dev_dbg(dev, "64k Memory\n");
                        break;
                default:
                        dev_warn(&intf->dev,
@@ -858,49 +867,49 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
                case 0x87654321:
                        catc_set_reg(catc, TxBufCount, 4);
                        catc_set_reg(catc, RxBufCount, 16);
-                       dbg("32k Memory\n");
+                       dev_dbg(dev, "32k Memory\n");
                        break;
                }
          
-               dbg("Getting MAC from SEEROM.");
+               dev_dbg(dev, "Getting MAC from SEEROM.\n");
          
                catc_get_mac(catc, netdev->dev_addr);
                
-               dbg("Setting MAC into registers.");
+               dev_dbg(dev, "Setting MAC into registers.\n");
          
                for (i = 0; i < 6; i++)
                        catc_set_reg(catc, StationAddr0 - i, netdev->dev_addr[i]);
                
-               dbg("Filling the multicast list.");
+               dev_dbg(dev, "Filling the multicast list.\n");
          
                memset(broadcast, 0xff, 6);
                catc_multicast(broadcast, catc->multicast);
                catc_multicast(netdev->dev_addr, catc->multicast);
                catc_write_mem(catc, 0xfa80, catc->multicast, 64);
                
-               dbg("Clearing error counters.");
+               dev_dbg(dev, "Clearing error counters.\n");
                
                for (i = 0; i < 8; i++)
                        catc_set_reg(catc, EthStats + i, 0);
                catc->last_stats = jiffies;
                
-               dbg("Enabling.");
+               dev_dbg(dev, "Enabling.\n");
                
                catc_set_reg(catc, MaxBurst, RX_MAX_BURST);
                catc_set_reg(catc, OpModes, OpTxMerge | OpRxMerge | OpLenInclude | Op3MemWaits);
                catc_set_reg(catc, LEDCtrl, LEDLink);
                catc_set_reg(catc, RxUnit, RxEnable | RxPolarity | RxMultiCast);
        } else {
-               dbg("Performing reset\n");
+               dev_dbg(dev, "Performing reset\n");
                catc_reset(catc);
                catc_get_mac(catc, netdev->dev_addr);
                
-               dbg("Setting RX Mode");
+               dev_dbg(dev, "Setting RX Mode\n");
                catc->rxmode[0] = RxEnable | RxPolarity | RxMultiCast;
                catc->rxmode[1] = 0;
                f5u011_rxmode(catc, catc->rxmode);
        }
-       dbg("Init done.");
+       dev_dbg(dev, "Init done.\n");
        printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, %pM.\n",
               netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate",
               usbdev->bus->bus_name, usbdev->devpath, netdev->dev_addr);
index 49ab45e17fe8999c6da7281773c4be8c4da2cee3..1e207f086b759416a916cb9180af1be85e8a3fce 100644 (file)
@@ -302,18 +302,9 @@ static const struct driver_info    cx82310_info = {
        .tx_fixup       = cx82310_tx_fixup,
 };
 
-#define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \
-       .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
-                      USB_DEVICE_ID_MATCH_DEV_INFO, \
-       .idVendor = (vend), \
-       .idProduct = (prod), \
-       .bDeviceClass = (cl), \
-       .bDeviceSubClass = (sc), \
-       .bDeviceProtocol = (pr)
-
 static const struct usb_device_id products[] = {
        {
-               USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0),
+               USB_DEVICE_AND_INTERFACE_INFO(0x0572, 0xcb01, 0xff, 0, 0),
                .driver_info = (unsigned long) &cx82310_info
        },
        { },
index db3c8021f2a3aedb2dd69cd093cd65023f8b6cdc..a7e3f4e55bf3651b3874710ffcf4cfcdeb37e299 100644 (file)
@@ -91,7 +91,9 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
        // get the packet count of the received skb
        count = le32_to_cpu(header->packet_count);
        if (count > GL_MAX_TRANSMIT_PACKETS) {
-               dbg("genelink: invalid received packet count %u", count);
+               netdev_dbg(dev->net,
+                          "genelink: invalid received packet count %u\n",
+                          count);
                return 0;
        }
 
@@ -107,7 +109,8 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
 
                // this may be a broken packet
                if (size > GL_MAX_PACKET_LEN) {
-                       dbg("genelink: invalid rx length %d", size);
+                       netdev_dbg(dev->net, "genelink: invalid rx length %d\n",
+                                  size);
                        return 0;
                }
 
@@ -133,7 +136,8 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
        skb_pull(skb, 4);
 
        if (skb->len > GL_MAX_PACKET_LEN) {
-               dbg("genelink: invalid rx length %d", skb->len);
+               netdev_dbg(dev->net, "genelink: invalid rx length %d\n",
+                          skb->len);
                return 0;
        }
        return 1;
index c3d03490c97d04c4724b472bade7f1b6bc9bba0d..c75e11e1b385f3f5654605959b1a6db5ba44641a 100644 (file)
@@ -267,19 +267,16 @@ static int kaweth_control(struct kaweth_device *kaweth,
        struct usb_ctrlrequest *dr;
        int retval;
 
-       dbg("kaweth_control()");
+       netdev_dbg(kaweth->net, "kaweth_control()\n");
 
        if(in_interrupt()) {
-               dbg("in_interrupt()");
+               netdev_dbg(kaweth->net, "in_interrupt()\n");
                return -EBUSY;
        }
 
        dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
-
-       if (!dr) {
-               dbg("kmalloc() failed");
+       if (!dr)
                return -ENOMEM;
-       }
 
        dr->bRequestType = requesttype;
        dr->bRequest = request;
@@ -305,7 +302,7 @@ static int kaweth_read_configuration(struct kaweth_device *kaweth)
 {
        int retval;
 
-       dbg("Reading kaweth configuration");
+       netdev_dbg(kaweth->net, "Reading kaweth configuration\n");
 
        retval = kaweth_control(kaweth,
                                usb_rcvctrlpipe(kaweth->dev, 0),
@@ -327,7 +324,7 @@ static int kaweth_set_urb_size(struct kaweth_device *kaweth, __u16 urb_size)
 {
        int retval;
 
-       dbg("Setting URB size to %d", (unsigned)urb_size);
+       netdev_dbg(kaweth->net, "Setting URB size to %d\n", (unsigned)urb_size);
 
        retval = kaweth_control(kaweth,
                                usb_sndctrlpipe(kaweth->dev, 0),
@@ -349,7 +346,7 @@ static int kaweth_set_sofs_wait(struct kaweth_device *kaweth, __u16 sofs_wait)
 {
        int retval;
 
-       dbg("Set SOFS wait to %d", (unsigned)sofs_wait);
+       netdev_dbg(kaweth->net, "Set SOFS wait to %d\n", (unsigned)sofs_wait);
 
        retval = kaweth_control(kaweth,
                                usb_sndctrlpipe(kaweth->dev, 0),
@@ -372,7 +369,8 @@ static int kaweth_set_receive_filter(struct kaweth_device *kaweth,
 {
        int retval;
 
-       dbg("Set receive filter to %d", (unsigned)receive_filter);
+       netdev_dbg(kaweth->net, "Set receive filter to %d\n",
+                  (unsigned)receive_filter);
 
        retval = kaweth_control(kaweth,
                                usb_sndctrlpipe(kaweth->dev, 0),
@@ -421,12 +419,13 @@ static int kaweth_download_firmware(struct kaweth_device *kaweth,
        kaweth->firmware_buf[4] = type;
        kaweth->firmware_buf[5] = interrupt;
 
-       dbg("High: %i, Low:%i", kaweth->firmware_buf[3],
+       netdev_dbg(kaweth->net, "High: %i, Low:%i\n", kaweth->firmware_buf[3],
                   kaweth->firmware_buf[2]);
 
-       dbg("Downloading firmware at %p to kaweth device at %p",
-           fw->data, kaweth);
-       dbg("Firmware length: %d", data_len);
+       netdev_dbg(kaweth->net,
+                  "Downloading firmware at %p to kaweth device at %p\n",
+                  fw->data, kaweth);
+       netdev_dbg(kaweth->net, "Firmware length: %d\n", data_len);
 
        return kaweth_control(kaweth,
                              usb_sndctrlpipe(kaweth->dev, 0),
@@ -454,7 +453,7 @@ static int kaweth_trigger_firmware(struct kaweth_device *kaweth,
        kaweth->firmware_buf[6] = 0x00;
        kaweth->firmware_buf[7] = 0x00;
 
-       dbg("Triggering firmware");
+       netdev_dbg(kaweth->net, "Triggering firmware\n");
 
        return kaweth_control(kaweth,
                              usb_sndctrlpipe(kaweth->dev, 0),
@@ -474,11 +473,11 @@ static int kaweth_reset(struct kaweth_device *kaweth)
 {
        int result;
 
-       dbg("kaweth_reset(%p)", kaweth);
+       netdev_dbg(kaweth->net, "kaweth_reset(%p)\n", kaweth);
        result = usb_reset_configuration(kaweth->dev);
        mdelay(10);
 
-       dbg("kaweth_reset() returns %d.",result);
+       netdev_dbg(kaweth->net, "kaweth_reset() returns %d.\n", result);
 
        return result;
 }
@@ -595,6 +594,7 @@ static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth);
  ****************************************************************/
 static void kaweth_usb_receive(struct urb *urb)
 {
+       struct device *dev = &urb->dev->dev;
        struct kaweth_device *kaweth = urb->context;
        struct net_device *net = kaweth->net;
        int status = urb->status;
@@ -610,25 +610,25 @@ static void kaweth_usb_receive(struct urb *urb)
                kaweth->stats.rx_errors++;
                kaweth->end = 1;
                wake_up(&kaweth->term_wait);
-               dbg("Status was -EPIPE.");
+               dev_dbg(dev, "Status was -EPIPE.\n");
                return;
        }
        if (unlikely(status == -ECONNRESET || status == -ESHUTDOWN)) {
                /* we are killed - set a flag and wake the disconnect handler */
                kaweth->end = 1;
                wake_up(&kaweth->term_wait);
-               dbg("Status was -ECONNRESET or -ESHUTDOWN.");
+               dev_dbg(dev, "Status was -ECONNRESET or -ESHUTDOWN.\n");
                return;
        }
        if (unlikely(status == -EPROTO || status == -ETIME ||
                     status == -EILSEQ)) {
                kaweth->stats.rx_errors++;
-               dbg("Status was -EPROTO, -ETIME, or -EILSEQ.");
+               dev_dbg(dev, "Status was -EPROTO, -ETIME, or -EILSEQ.\n");
                return;
        }
        if (unlikely(status == -EOVERFLOW)) {
                kaweth->stats.rx_errors++;
-               dbg("Status was -EOVERFLOW.");
+               dev_dbg(dev, "Status was -EOVERFLOW.\n");
        }
        spin_lock(&kaweth->device_lock);
        if (IS_BLOCKED(kaweth->status)) {
@@ -687,7 +687,7 @@ static int kaweth_open(struct net_device *net)
        struct kaweth_device *kaweth = netdev_priv(net);
        int res;
 
-       dbg("Opening network device.");
+       netdev_dbg(kaweth->net, "Opening network device.\n");
 
        res = usb_autopm_get_interface(kaweth->intf);
        if (res) {
@@ -787,7 +787,8 @@ static void kaweth_usb_transmit_complete(struct urb *urb)
 
        if (unlikely(status != 0))
                if (status != -ENOENT)
-                       dbg("%s: TX status %d.", kaweth->net->name, status);
+                       dev_dbg(&urb->dev->dev, "%s: TX status %d.\n",
+                               kaweth->net->name, status);
 
        netif_wake_queue(kaweth->net);
        dev_kfree_skb_irq(skb);
@@ -871,7 +872,7 @@ static void kaweth_set_rx_mode(struct net_device *net)
                                      KAWETH_PACKET_FILTER_BROADCAST |
                                     KAWETH_PACKET_FILTER_MULTICAST;
 
-       dbg("Setting Rx mode to %d", packet_filter_bitmap);
+       netdev_dbg(net, "Setting Rx mode to %d\n", packet_filter_bitmap);
 
        netif_stop_queue(net);
 
@@ -916,7 +917,8 @@ static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth)
                        result);
        }
        else {
-               dbg("Set Rx mode to %d", packet_filter_bitmap);
+               netdev_dbg(kaweth->net, "Set Rx mode to %d\n",
+                          packet_filter_bitmap);
        }
 }
 
@@ -951,7 +953,7 @@ static int kaweth_suspend(struct usb_interface *intf, pm_message_t message)
        struct kaweth_device *kaweth = usb_get_intfdata(intf);
        unsigned long flags;
 
-       dbg("Suspending device");
+       dev_dbg(&intf->dev, "Suspending device\n");
        spin_lock_irqsave(&kaweth->device_lock, flags);
        kaweth->status |= KAWETH_STATUS_SUSPENDING;
        spin_unlock_irqrestore(&kaweth->device_lock, flags);
@@ -968,7 +970,7 @@ static int kaweth_resume(struct usb_interface *intf)
        struct kaweth_device *kaweth = usb_get_intfdata(intf);
        unsigned long flags;
 
-       dbg("Resuming device");
+       dev_dbg(&intf->dev, "Resuming device\n");
        spin_lock_irqsave(&kaweth->device_lock, flags);
        kaweth->status &= ~KAWETH_STATUS_SUSPENDING;
        spin_unlock_irqrestore(&kaweth->device_lock, flags);
@@ -1003,36 +1005,37 @@ static int kaweth_probe(
                const struct usb_device_id *id      /* from id_table */
        )
 {
-       struct usb_device *dev = interface_to_usbdev(intf);
+       struct device *dev = &intf->dev;
+       struct usb_device *udev = interface_to_usbdev(intf);
        struct kaweth_device *kaweth;
        struct net_device *netdev;
        const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
        int result = 0;
 
-       dbg("Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x",
-                dev->devnum,
-                le16_to_cpu(dev->descriptor.idVendor),
-                le16_to_cpu(dev->descriptor.idProduct),
-                le16_to_cpu(dev->descriptor.bcdDevice));
+       dev_dbg(dev,
+               "Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x\n",
+               udev->devnum, le16_to_cpu(udev->descriptor.idVendor),
+               le16_to_cpu(udev->descriptor.idProduct),
+               le16_to_cpu(udev->descriptor.bcdDevice));
 
-       dbg("Device at %p", dev);
+       dev_dbg(dev, "Device at %p\n", udev);
 
-       dbg("Descriptor length: %x type: %x",
-                (int)dev->descriptor.bLength,
-                (int)dev->descriptor.bDescriptorType);
+       dev_dbg(dev, "Descriptor length: %x type: %x\n",
+               (int)udev->descriptor.bLength,
+               (int)udev->descriptor.bDescriptorType);
 
        netdev = alloc_etherdev(sizeof(*kaweth));
        if (!netdev)
                return -ENOMEM;
 
        kaweth = netdev_priv(netdev);
-       kaweth->dev = dev;
+       kaweth->dev = udev;
        kaweth->net = netdev;
 
        spin_lock_init(&kaweth->device_lock);
        init_waitqueue_head(&kaweth->term_wait);
 
-       dbg("Resetting.");
+       dev_dbg(dev, "Resetting.\n");
 
        kaweth_reset(kaweth);
 
@@ -1041,17 +1044,17 @@ static int kaweth_probe(
         * downloaded. Don't try to do it again, or we'll hang the device.
         */
 
-       if (le16_to_cpu(dev->descriptor.bcdDevice) >> 8) {
-               dev_info(&intf->dev, "Firmware present in device.\n");
+       if (le16_to_cpu(udev->descriptor.bcdDevice) >> 8) {
+               dev_info(dev, "Firmware present in device.\n");
        } else {
                /* Download the firmware */
-               dev_info(&intf->dev, "Downloading firmware...\n");
+               dev_info(dev, "Downloading firmware...\n");
                kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL);
                if ((result = kaweth_download_firmware(kaweth,
                                                      "kaweth/new_code.bin",
                                                      100,
                                                      2)) < 0) {
-                       dev_err(&intf->dev, "Error downloading firmware (%d)\n",
+                       dev_err(dev, "Error downloading firmware (%d)\n",
                                result);
                        goto err_fw;
                }
@@ -1060,8 +1063,7 @@ static int kaweth_probe(
                                                      "kaweth/new_code_fix.bin",
                                                      100,
                                                      3)) < 0) {
-                       dev_err(&intf->dev,
-                               "Error downloading firmware fix (%d)\n",
+                       dev_err(dev, "Error downloading firmware fix (%d)\n",
                                result);
                        goto err_fw;
                }
@@ -1070,8 +1072,7 @@ static int kaweth_probe(
                                                      "kaweth/trigger_code.bin",
                                                      126,
                                                      2)) < 0) {
-                       dev_err(&intf->dev,
-                               "Error downloading trigger code (%d)\n",
+                       dev_err(dev, "Error downloading trigger code (%d)\n",
                                result);
                        goto err_fw;
 
@@ -1081,19 +1082,18 @@ static int kaweth_probe(
                                                      "kaweth/trigger_code_fix.bin",
                                                      126,
                                                      3)) < 0) {
-                       dev_err(&intf->dev, "Error downloading trigger code fix (%d)\n", result);
+                       dev_err(dev, "Error downloading trigger code fix (%d)\n", result);
                        goto err_fw;
                }
 
 
                if ((result = kaweth_trigger_firmware(kaweth, 126)) < 0) {
-                       dev_err(&intf->dev, "Error triggering firmware (%d)\n",
-                               result);
+                       dev_err(dev, "Error triggering firmware (%d)\n", result);
                        goto err_fw;
                }
 
                /* Device will now disappear for a moment...  */
-               dev_info(&intf->dev, "Firmware loaded.  I'll be back...\n");
+               dev_info(dev, "Firmware loaded.  I'll be back...\n");
 err_fw:
                free_page((unsigned long)kaweth->firmware_buf);
                free_netdev(netdev);
@@ -1103,29 +1103,29 @@ err_fw:
        result = kaweth_read_configuration(kaweth);
 
        if(result < 0) {
-               dev_err(&intf->dev, "Error reading configuration (%d), no net device created\n", result);
+               dev_err(dev, "Error reading configuration (%d), no net device created\n", result);
                goto err_free_netdev;
        }
 
-       dev_info(&intf->dev, "Statistics collection: %x\n", kaweth->configuration.statistics_mask);
-       dev_info(&intf->dev, "Multicast filter limit: %x\n", kaweth->configuration.max_multicast_filters & ((1 << 15) - 1));
-       dev_info(&intf->dev, "MTU: %d\n", le16_to_cpu(kaweth->configuration.segment_size));
-       dev_info(&intf->dev, "Read MAC address %pM\n", kaweth->configuration.hw_addr);
+       dev_info(dev, "Statistics collection: %x\n", kaweth->configuration.statistics_mask);
+       dev_info(dev, "Multicast filter limit: %x\n", kaweth->configuration.max_multicast_filters & ((1 << 15) - 1));
+       dev_info(dev, "MTU: %d\n", le16_to_cpu(kaweth->configuration.segment_size));
+       dev_info(dev, "Read MAC address %pM\n", kaweth->configuration.hw_addr);
 
        if(!memcmp(&kaweth->configuration.hw_addr,
                    &bcast_addr,
                   sizeof(bcast_addr))) {
-               dev_err(&intf->dev, "Firmware not functioning properly, no net device created\n");
+               dev_err(dev, "Firmware not functioning properly, no net device created\n");
                goto err_free_netdev;
        }
 
        if(kaweth_set_urb_size(kaweth, KAWETH_BUF_SIZE) < 0) {
-               dbg("Error setting URB size");
+               dev_dbg(dev, "Error setting URB size\n");
                goto err_free_netdev;
        }
 
        if(kaweth_set_sofs_wait(kaweth, KAWETH_SOFS_TO_WAIT) < 0) {
-               dev_err(&intf->dev, "Error setting SOFS wait\n");
+               dev_err(dev, "Error setting SOFS wait\n");
                goto err_free_netdev;
        }
 
@@ -1135,11 +1135,11 @@ err_fw:
                                            KAWETH_PACKET_FILTER_MULTICAST);
 
        if(result < 0) {
-               dev_err(&intf->dev, "Error setting receive filter\n");
+               dev_err(dev, "Error setting receive filter\n");
                goto err_free_netdev;
        }
 
-       dbg("Initializing net device.");
+       dev_dbg(dev, "Initializing net device.\n");
 
        kaweth->intf = intf;
 
@@ -1181,20 +1181,20 @@ err_fw:
 
 #if 0
 // dma_supported() is deeply broken on almost all architectures
-       if (dma_supported (&intf->dev, 0xffffffffffffffffULL))
+       if (dma_supported (dev, 0xffffffffffffffffULL))
                kaweth->net->features |= NETIF_F_HIGHDMA;
 #endif
 
-       SET_NETDEV_DEV(netdev, &intf->dev);
+       SET_NETDEV_DEV(netdev, dev);
        if (register_netdev(netdev) != 0) {
-               dev_err(&intf->dev, "Error registering netdev.\n");
+               dev_err(dev, "Error registering netdev.\n");
                goto err_intfdata;
        }
 
-       dev_info(&intf->dev, "kaweth interface created at %s\n",
+       dev_info(dev, "kaweth interface created at %s\n",
                 kaweth->net->name);
 
-       dbg("Kaweth probe returning.");
+       dev_dbg(dev, "Kaweth probe returning.\n");
 
        return 0;
 
@@ -1232,7 +1232,7 @@ static void kaweth_disconnect(struct usb_interface *intf)
        }
        netdev = kaweth->net;
 
-       dbg("Unregistering net device");
+       netdev_dbg(kaweth->net, "Unregistering net device\n");
        unregister_netdev(netdev);
 
        usb_free_urb(kaweth->rx_urb);
index 28c4d513ba850c9165a6e0376986fcff0bb9a0f5..c062a3e8295c12d5f69158026b3055522f0f5422 100644 (file)
@@ -155,12 +155,10 @@ static void nc_dump_registers(struct usbnet *dev)
        u8      reg;
        u16     *vp = kmalloc(sizeof (u16));
 
-       if (!vp) {
-               dbg("no memory?");
+       if (!vp)
                return;
-       }
 
-       dbg("%s registers:", dev->net->name);
+       netdev_dbg(dev->net, "registers:\n");
        for (reg = 0; reg < 0x20; reg++) {
                int retval;
 
@@ -172,11 +170,10 @@ static void nc_dump_registers(struct usbnet *dev)
 
                retval = nc_register_read(dev, reg, vp);
                if (retval < 0)
-                       dbg("%s reg [0x%x] ==> error %d",
-                               dev->net->name, reg, retval);
+                       netdev_dbg(dev->net, "reg [0x%x] ==> error %d\n",
+                                  reg, retval);
                else
-                       dbg("%s reg [0x%x] = 0x%x",
-                               dev->net->name, reg, *vp);
+                       netdev_dbg(dev->net, "reg [0x%x] = 0x%x\n", reg, *vp);
        }
        kfree(vp);
 }
@@ -300,15 +297,15 @@ static int net1080_reset(struct usbnet *dev)
        // nc_dump_registers(dev);
 
        if ((retval = nc_register_read(dev, REG_STATUS, vp)) < 0) {
-               dbg("can't read %s-%s status: %d",
-                       dev->udev->bus->bus_name, dev->udev->devpath, retval);
+               netdev_dbg(dev->net, "can't read %s-%s status: %d\n",
+                          dev->udev->bus->bus_name, dev->udev->devpath, retval);
                goto done;
        }
        status = *vp;
        nc_dump_status(dev, status);
 
        if ((retval = nc_register_read(dev, REG_USBCTL, vp)) < 0) {
-               dbg("can't read USBCTL, %d", retval);
+               netdev_dbg(dev->net, "can't read USBCTL, %d\n", retval);
                goto done;
        }
        usbctl = *vp;
@@ -318,7 +315,7 @@ static int net1080_reset(struct usbnet *dev)
                        USBCTL_FLUSH_THIS | USBCTL_FLUSH_OTHER);
 
        if ((retval = nc_register_read(dev, REG_TTL, vp)) < 0) {
-               dbg("can't read TTL, %d", retval);
+               netdev_dbg(dev->net, "can't read TTL, %d\n", retval);
                goto done;
        }
        ttl = *vp;
@@ -326,7 +323,7 @@ static int net1080_reset(struct usbnet *dev)
 
        nc_register_write(dev, REG_TTL,
                        MK_TTL(NC_READ_TTL_MS, TTL_OTHER(ttl)) );
-       dbg("%s: assigned TTL, %d ms", dev->net->name, NC_READ_TTL_MS);
+       netdev_dbg(dev->net, "assigned TTL, %d ms\n", NC_READ_TTL_MS);
 
        netif_info(dev, link, dev->net, "port %c, peer %sconnected\n",
                   (status & STATUS_PORT_A) ? 'A' : 'B',
@@ -350,7 +347,7 @@ static int net1080_check_connect(struct usbnet *dev)
        status = *vp;
        kfree(vp);
        if (retval != 0) {
-               dbg("%s net1080_check_conn read - %d", dev->net->name, retval);
+               netdev_dbg(dev->net, "net1080_check_conn read - %d\n", retval);
                return retval;
        }
        if ((status & STATUS_CONN_OTHER) != STATUS_CONN_OTHER)
@@ -420,11 +417,9 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
        u16                     hdr_len, packet_len;
 
        if (!(skb->len & 0x01)) {
-#ifdef DEBUG
-               struct net_device       *net = dev->net;
-               dbg("rx framesize %d range %d..%d mtu %d", skb->len,
-                       net->hard_header_len, dev->hard_mtu, net->mtu);
-#endif
+               netdev_dbg(dev->net, "rx framesize %d range %d..%d mtu %d\n",
+                          skb->len, dev->net->hard_header_len, dev->hard_mtu,
+                          dev->net->mtu);
                dev->net->stats.rx_frame_errors++;
                nc_ensure_sync(dev);
                return 0;
@@ -435,17 +430,17 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
        packet_len = le16_to_cpup(&header->packet_len);
        if (FRAMED_SIZE(packet_len) > NC_MAX_PACKET) {
                dev->net->stats.rx_frame_errors++;
-               dbg("packet too big, %d", packet_len);
+               netdev_dbg(dev->net, "packet too big, %d\n", packet_len);
                nc_ensure_sync(dev);
                return 0;
        } else if (hdr_len < MIN_HEADER) {
                dev->net->stats.rx_frame_errors++;
-               dbg("header too short, %d", hdr_len);
+               netdev_dbg(dev->net, "header too short, %d\n", hdr_len);
                nc_ensure_sync(dev);
                return 0;
        } else if (hdr_len > MIN_HEADER) {
                // out of band data for us?
-               dbg("header OOB, %d bytes", hdr_len - MIN_HEADER);
+               netdev_dbg(dev->net, "header OOB, %d bytes\n", hdr_len - MIN_HEADER);
                nc_ensure_sync(dev);
                // switch (vendor/product ids) { ... }
        }
@@ -458,23 +453,23 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
        if ((packet_len & 0x01) == 0) {
                if (skb->data [packet_len] != PAD_BYTE) {
                        dev->net->stats.rx_frame_errors++;
-                       dbg("bad pad");
+                       netdev_dbg(dev->net, "bad pad\n");
                        return 0;
                }
                skb_trim(skb, skb->len - 1);
        }
        if (skb->len != packet_len) {
                dev->net->stats.rx_frame_errors++;
-               dbg("bad packet len %d (expected %d)",
-                       skb->len, packet_len);
+               netdev_dbg(dev->net, "bad packet len %d (expected %d)\n",
+                          skb->len, packet_len);
                nc_ensure_sync(dev);
                return 0;
        }
        if (header->packet_id != get_unaligned(&trailer->packet_id)) {
                dev->net->stats.rx_fifo_errors++;
-               dbg("(2+ dropped) rx packet_id mismatch 0x%x 0x%x",
-                       le16_to_cpu(header->packet_id),
-                       le16_to_cpu(trailer->packet_id));
+               netdev_dbg(dev->net, "(2+ dropped) rx packet_id mismatch 0x%x 0x%x\n",
+                          le16_to_cpu(header->packet_id),
+                          le16_to_cpu(trailer->packet_id));
                return 0;
        }
 #if 0
index 3543c9e578247e6ee550ef7f18f680b680be3587..6883c371c59f5a2eb416466563cad5e1fbaa5b9a 100644 (file)
@@ -108,7 +108,7 @@ static int qmi_wwan_register_subdriver(struct usbnet *dev)
        atomic_set(&info->pmcount, 0);
 
        /* register subdriver */
-       subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 512, &qmi_wwan_cdc_wdm_manage_power);
+       subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 4096, &qmi_wwan_cdc_wdm_manage_power);
        if (IS_ERR(subdriver)) {
                dev_err(&info->control->dev, "subdriver registration failed\n");
                rv = PTR_ERR(subdriver);
@@ -139,10 +139,18 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
 
        BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state)));
 
-       /* require a single interrupt status endpoint for subdriver */
+       /* control and data is shared? */
+       if (intf->cur_altsetting->desc.bNumEndpoints == 3) {
+               info->control = intf;
+               info->data = intf;
+               goto shared;
+       }
+
+       /* else require a single interrupt status endpoint on control intf */
        if (intf->cur_altsetting->desc.bNumEndpoints != 1)
                goto err;
 
+       /* and a number of CDC descriptors */
        while (len > 3) {
                struct usb_descriptor_header *h = (void *)buf;
 
@@ -231,8 +239,9 @@ next_desc:
        if (status < 0)
                goto err;
 
+shared:
        status = qmi_wwan_register_subdriver(dev);
-       if (status < 0) {
+       if (status < 0 && info->control != info->data) {
                usb_set_intfdata(info->data, NULL);
                usb_driver_release_interface(driver, info->data);
        }
@@ -241,20 +250,6 @@ err:
        return status;
 }
 
-/* Some devices combine the "control" and "data" functions into a
- * single interface with all three endpoints: interrupt + bulk in and
- * out
- */
-static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf)
-{
-       struct qmi_wwan_state *info = (void *)&dev->data;
-
-       /*  control and data is shared */
-       info->control = intf;
-       info->data = intf;
-       return qmi_wwan_register_subdriver(dev);
-}
-
 static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf)
 {
        struct qmi_wwan_state *info = (void *)&dev->data;
@@ -331,20 +326,12 @@ static const struct driver_info   qmi_wwan_info = {
        .manage_power   = qmi_wwan_manage_power,
 };
 
-static const struct driver_info        qmi_wwan_shared = {
-       .description    = "WWAN/QMI device",
-       .flags          = FLAG_WWAN,
-       .bind           = qmi_wwan_bind_shared,
-       .unbind         = qmi_wwan_unbind,
-       .manage_power   = qmi_wwan_manage_power,
-};
-
 #define HUAWEI_VENDOR_ID       0x12D1
 
 /* map QMI/wwan function by a fixed interface number */
 #define QMI_FIXED_INTF(vend, prod, num) \
        USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \
-       .driver_info = (unsigned long)&qmi_wwan_shared
+       .driver_info = (unsigned long)&qmi_wwan_info
 
 /* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */
 #define QMI_GOBI1K_DEVICE(vend, prod) \
@@ -372,15 +359,15 @@ static const struct usb_device_id products[] = {
        },
        {       /* Huawei E392, E398 and possibly others in "Windows mode" */
                USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17),
-               .driver_info        = (unsigned long)&qmi_wwan_shared,
+               .driver_info        = (unsigned long)&qmi_wwan_info,
        },
        {       /* Pantech UML290, P4200 and more */
                USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff),
-               .driver_info        = (unsigned long)&qmi_wwan_shared,
+               .driver_info        = (unsigned long)&qmi_wwan_info,
        },
        {       /* Pantech UML290 - newer firmware */
                USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf1, 0xff),
-               .driver_info        = (unsigned long)&qmi_wwan_shared,
+               .driver_info        = (unsigned long)&qmi_wwan_info,
        },
 
        /* 3. Combined interface devices matching on interface number */
@@ -467,7 +454,7 @@ static int qmi_wwan_probe(struct usb_interface *intf, const struct usb_device_id
         */
        if (!id->driver_info) {
                dev_dbg(&intf->dev, "setting defaults for dynamic device id\n");
-               id->driver_info = (unsigned long)&qmi_wwan_shared;
+               id->driver_info = (unsigned long)&qmi_wwan_info;
        }
 
        return usbnet_probe(intf, id);
index 0e2c92e0e5323e03b57b442e43db78f4f0739e8a..5f39a3b225ef8729f47707dbfa2e8dcc98b52b54 100644 (file)
@@ -275,7 +275,7 @@ static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
                return -EBUSY;
 
        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-       dbg("%s: Setting MAC address to %pM\n", netdev->name, netdev->dev_addr);
+       netdev_dbg(netdev, "Setting MAC address to %pM\n", netdev->dev_addr);
        /* Set the IDR registers. */
        set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr);
 #ifdef EEPROM_WRITE
@@ -503,12 +503,12 @@ static void intr_callback(struct urb *urb)
        if ((d[INT_MSR] & MSR_LINK) == 0) {
                if (netif_carrier_ok(dev->netdev)) {
                        netif_carrier_off(dev->netdev);
-                       dbg("%s: LINK LOST\n", __func__);
+                       netdev_dbg(dev->netdev, "%s: LINK LOST\n", __func__);
                }
        } else {
                if (!netif_carrier_ok(dev->netdev)) {
                        netif_carrier_on(dev->netdev);
-                       dbg("%s: LINK CAME BACK\n", __func__);
+                       netdev_dbg(dev->netdev, "%s: LINK CAME BACK\n", __func__);
                }
        }
 
index 8e22417fa6c11b5d41845bda1ff4c84e9d369cda..c27d27701aee2e2ce0d7e22f077c579da2ee1b07 100644 (file)
@@ -68,9 +68,8 @@ static        atomic_t iface_counter = ATOMIC_INIT(0);
  */
 #define SIERRA_NET_USBCTL_BUF_LEN      1024
 
-struct sierra_net_info_data {
-       u16 rx_urb_size;
-};
+/* Overriding the default usbnet rx_urb_size */
+#define SIERRA_NET_RX_URB_SIZE         (8 * 1024)
 
 /* Private data structure */
 struct sierra_net_data {
@@ -560,7 +559,7 @@ static void sierra_net_defer_kevent(struct usbnet *dev, int work)
 /*
  * Sync Retransmit Timer Handler. On expiry, kick the work queue
  */
-void sierra_sync_timer(unsigned long syncdata)
+static void sierra_sync_timer(unsigned long syncdata)
 {
        struct usbnet *dev = (struct usbnet *)syncdata;
 
@@ -678,9 +677,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
        static const u8 shdwn_tmplate[sizeof(priv->shdwn_msg)] = {
                0x00, 0x00, SIERRA_NET_HIP_SHUTD_ID, 0x00};
 
-       struct sierra_net_info_data *data =
-                       (struct sierra_net_info_data *)dev->driver_info->data;
-
        dev_dbg(&dev->udev->dev, "%s", __func__);
 
        ifacenum = intf->cur_altsetting->desc.bInterfaceNumber;
@@ -725,9 +721,9 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
        sierra_net_set_ctx_index(priv, 0);
 
        /* decrease the rx_urb_size and max_tx_size to 4k on USB 1.1 */
-       dev->rx_urb_size  = data->rx_urb_size;
+       dev->rx_urb_size  = SIERRA_NET_RX_URB_SIZE;
        if (dev->udev->speed != USB_SPEED_HIGH)
-               dev->rx_urb_size  = min_t(size_t, 4096, data->rx_urb_size);
+               dev->rx_urb_size  = min_t(size_t, 4096, SIERRA_NET_RX_URB_SIZE);
 
        dev->net->hard_header_len += SIERRA_NET_HIP_EXT_HDR_LEN;
        dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
@@ -842,7 +838,7 @@ static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
                                netdev_err(dev->net, "HIP/ETH: Invalid pkt\n");
 
                        dev->net->stats.rx_frame_errors++;
-                       /* dev->net->stats.rx_errors incremented by caller */;
+                       /* dev->net->stats.rx_errors incremented by caller */
                        return 0;
                }
 
@@ -866,8 +862,8 @@ static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
 }
 
 /* ---------------------------- Transmit data path ----------------------*/
-struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
-               gfp_t flags)
+static struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev,
+                                          struct sk_buff *skb, gfp_t flags)
 {
        struct sierra_net_data *priv = sierra_net_get_private(dev);
        u16 len;
@@ -918,10 +914,6 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
        return NULL;
 }
 
-static const struct sierra_net_info_data sierra_net_info_data_direct_ip = {
-       .rx_urb_size = 8 * 1024,
-};
-
 static const struct driver_info sierra_net_info_direct_ip = {
        .description = "Sierra Wireless USB-to-WWAN Modem",
        .flags = FLAG_WWAN | FLAG_SEND_ZLP,
@@ -930,7 +922,6 @@ static const struct driver_info sierra_net_info_direct_ip = {
        .status = sierra_net_status,
        .rx_fixup = sierra_net_rx_fixup,
        .tx_fixup = sierra_net_tx_fixup,
-       .data = (unsigned long)&sierra_net_info_data_direct_ip,
 };
 
 #define DIRECT_IP_DEVICE(vend, prod) \
index 376143e8a1aaf6f78ee44fb76888922064c46dd1..b77ae76f4aa8f96bbbcabb2d2c4eac5a837d3c29 100644 (file)
@@ -52,6 +52,7 @@
 #define USB_PRODUCT_ID_LAN7500         (0x7500)
 #define USB_PRODUCT_ID_LAN7505         (0x7505)
 #define RXW_PADDING                    2
+#define SUPPORTED_WAKE                 (WAKE_MAGIC)
 
 #define check_warn(ret, fmt, args...) \
        ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); })
@@ -65,6 +66,7 @@
 struct smsc75xx_priv {
        struct usbnet *dev;
        u32 rfe_ctl;
+       u32 wolopts;
        u32 multicast_hash_table[DP_SEL_VHF_HASH_LEN];
        struct mutex dataport_mutex;
        spinlock_t rfe_ctl_lock;
@@ -135,6 +137,30 @@ static int __must_check smsc75xx_write_reg(struct usbnet *dev, u32 index,
        return ret;
 }
 
+static int smsc75xx_set_feature(struct usbnet *dev, u32 feature)
+{
+       if (WARN_ON_ONCE(!dev))
+               return -EINVAL;
+
+       cpu_to_le32s(&feature);
+
+       return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+               USB_REQ_SET_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
+               USB_CTRL_SET_TIMEOUT);
+}
+
+static int smsc75xx_clear_feature(struct usbnet *dev, u32 feature)
+{
+       if (WARN_ON_ONCE(!dev))
+               return -EINVAL;
+
+       cpu_to_le32s(&feature);
+
+       return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+               USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
+               USB_CTRL_SET_TIMEOUT);
+}
+
 /* Loop until the read is completed with timeout
  * called with phy_mutex held */
 static int smsc75xx_phy_wait_not_busy(struct usbnet *dev)
@@ -578,6 +604,26 @@ static int smsc75xx_ethtool_set_eeprom(struct net_device *netdev,
        return smsc75xx_write_eeprom(dev, ee->offset, ee->len, data);
 }
 
+static void smsc75xx_ethtool_get_wol(struct net_device *net,
+                                    struct ethtool_wolinfo *wolinfo)
+{
+       struct usbnet *dev = netdev_priv(net);
+       struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+
+       wolinfo->supported = SUPPORTED_WAKE;
+       wolinfo->wolopts = pdata->wolopts;
+}
+
+static int smsc75xx_ethtool_set_wol(struct net_device *net,
+                                   struct ethtool_wolinfo *wolinfo)
+{
+       struct usbnet *dev = netdev_priv(net);
+       struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+
+       pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
+       return 0;
+}
+
 static const struct ethtool_ops smsc75xx_ethtool_ops = {
        .get_link       = usbnet_get_link,
        .nway_reset     = usbnet_nway_reset,
@@ -589,6 +635,8 @@ static const struct ethtool_ops smsc75xx_ethtool_ops = {
        .get_eeprom_len = smsc75xx_ethtool_get_eeprom_len,
        .get_eeprom     = smsc75xx_ethtool_get_eeprom,
        .set_eeprom     = smsc75xx_ethtool_set_eeprom,
+       .get_wol        = smsc75xx_ethtool_get_wol,
+       .set_wol        = smsc75xx_ethtool_set_wol,
 };
 
 static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -756,6 +804,26 @@ static int smsc75xx_set_features(struct net_device *netdev,
        return 0;
 }
 
+static int smsc75xx_wait_ready(struct usbnet *dev)
+{
+       int timeout = 0;
+
+       do {
+               u32 buf;
+               int ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
+               check_warn_return(ret, "Failed to read PMT_CTL: %d", ret);
+
+               if (buf & PMT_CTL_DEV_RDY)
+                       return 0;
+
+               msleep(10);
+               timeout++;
+       } while (timeout < 100);
+
+       netdev_warn(dev->net, "timeout waiting for device ready");
+       return -EIO;
+}
+
 static int smsc75xx_reset(struct usbnet *dev)
 {
        struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
@@ -764,6 +832,9 @@ static int smsc75xx_reset(struct usbnet *dev)
 
        netif_dbg(dev, ifup, dev->net, "entering smsc75xx_reset");
 
+       ret = smsc75xx_wait_ready(dev);
+       check_warn_return(ret, "device not ready in smsc75xx_reset");
+
        ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
        check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
 
@@ -1083,6 +1154,169 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
        }
 }
 
+static int smsc75xx_suspend(struct usb_interface *intf, pm_message_t message)
+{
+       struct usbnet *dev = usb_get_intfdata(intf);
+       struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+       int ret;
+       u32 val;
+
+       ret = usbnet_suspend(intf, message);
+       check_warn_return(ret, "usbnet_suspend error");
+
+       /* if no wol options set, enter lowest power SUSPEND2 mode */
+       if (!(pdata->wolopts & SUPPORTED_WAKE)) {
+               netdev_info(dev->net, "entering SUSPEND2 mode");
+
+               /* disable energy detect (link up) & wake up events */
+               ret = smsc75xx_read_reg(dev, WUCSR, &val);
+               check_warn_return(ret, "Error reading WUCSR");
+
+               val &= ~(WUCSR_MPEN | WUCSR_WUEN);
+
+               ret = smsc75xx_write_reg(dev, WUCSR, val);
+               check_warn_return(ret, "Error writing WUCSR");
+
+               ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
+               check_warn_return(ret, "Error reading PMT_CTL");
+
+               val &= ~(PMT_CTL_ED_EN | PMT_CTL_WOL_EN);
+
+               ret = smsc75xx_write_reg(dev, PMT_CTL, val);
+               check_warn_return(ret, "Error writing PMT_CTL");
+
+               /* enter suspend2 mode */
+               ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
+               check_warn_return(ret, "Error reading PMT_CTL");
+
+               val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST);
+               val |= PMT_CTL_SUS_MODE_2;
+
+               ret = smsc75xx_write_reg(dev, PMT_CTL, val);
+               check_warn_return(ret, "Error writing PMT_CTL");
+
+               return 0;
+       }
+
+       if (pdata->wolopts & WAKE_MAGIC) {
+               /* clear any pending magic packet status */
+               ret = smsc75xx_read_reg(dev, WUCSR, &val);
+               check_warn_return(ret, "Error reading WUCSR");
+
+               val |= WUCSR_MPR;
+
+               ret = smsc75xx_write_reg(dev, WUCSR, val);
+               check_warn_return(ret, "Error writing WUCSR");
+       }
+
+       /* enable/disable magic packup wake */
+       ret = smsc75xx_read_reg(dev, WUCSR, &val);
+       check_warn_return(ret, "Error reading WUCSR");
+
+       if (pdata->wolopts & WAKE_MAGIC) {
+               netdev_info(dev->net, "enabling magic packet wakeup");
+               val |= WUCSR_MPEN;
+       } else {
+               netdev_info(dev->net, "disabling magic packet wakeup");
+               val &= ~WUCSR_MPEN;
+       }
+
+       ret = smsc75xx_write_reg(dev, WUCSR, val);
+       check_warn_return(ret, "Error writing WUCSR");
+
+       /* enable wol wakeup source */
+       ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
+       check_warn_return(ret, "Error reading PMT_CTL");
+
+       val |= PMT_CTL_WOL_EN;
+
+       ret = smsc75xx_write_reg(dev, PMT_CTL, val);
+       check_warn_return(ret, "Error writing PMT_CTL");
+
+       /* enable receiver */
+       ret = smsc75xx_read_reg(dev, MAC_RX, &val);
+       check_warn_return(ret, "Failed to read MAC_RX: %d", ret);
+
+       val |= MAC_RX_RXEN;
+
+       ret = smsc75xx_write_reg(dev, MAC_RX, val);
+       check_warn_return(ret, "Failed to write MAC_RX: %d", ret);
+
+       /* some wol options are enabled, so enter SUSPEND0 */
+       netdev_info(dev->net, "entering SUSPEND0 mode");
+
+       ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
+       check_warn_return(ret, "Error reading PMT_CTL");
+
+       val &= (~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST));
+       val |= PMT_CTL_SUS_MODE_0;
+
+       ret = smsc75xx_write_reg(dev, PMT_CTL, val);
+       check_warn_return(ret, "Error writing PMT_CTL");
+
+       /* clear wol status */
+       val &= ~PMT_CTL_WUPS;
+       val |= PMT_CTL_WUPS_WOL;
+       ret = smsc75xx_write_reg(dev, PMT_CTL, val);
+       check_warn_return(ret, "Error writing PMT_CTL");
+
+       /* read back PMT_CTL */
+       ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
+       check_warn_return(ret, "Error reading PMT_CTL");
+
+       smsc75xx_set_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
+
+       return 0;
+}
+
+static int smsc75xx_resume(struct usb_interface *intf)
+{
+       struct usbnet *dev = usb_get_intfdata(intf);
+       struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+       int ret;
+       u32 val;
+
+       if (pdata->wolopts & WAKE_MAGIC) {
+               netdev_info(dev->net, "resuming from SUSPEND0");
+
+               smsc75xx_clear_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
+
+               /* Disable magic packup wake */
+               ret = smsc75xx_read_reg(dev, WUCSR, &val);
+               check_warn_return(ret, "Error reading WUCSR");
+
+               val &= ~WUCSR_MPEN;
+
+               ret = smsc75xx_write_reg(dev, WUCSR, val);
+               check_warn_return(ret, "Error writing WUCSR");
+
+               /* clear wake-up status */
+               ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
+               check_warn_return(ret, "Error reading PMT_CTL");
+
+               val &= ~PMT_CTL_WOL_EN;
+               val |= PMT_CTL_WUPS;
+
+               ret = smsc75xx_write_reg(dev, PMT_CTL, val);
+               check_warn_return(ret, "Error writing PMT_CTL");
+       } else {
+               netdev_info(dev->net, "resuming from SUSPEND2");
+
+               ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
+               check_warn_return(ret, "Error reading PMT_CTL");
+
+               val |= PMT_CTL_PHY_PWRUP;
+
+               ret = smsc75xx_write_reg(dev, PMT_CTL, val);
+               check_warn_return(ret, "Error writing PMT_CTL");
+       }
+
+       ret = smsc75xx_wait_ready(dev);
+       check_warn_return(ret, "device not ready in smsc75xx_resume");
+
+       return usbnet_resume(intf);
+}
+
 static void smsc75xx_rx_csum_offload(struct usbnet *dev, struct sk_buff *skb,
                                     u32 rx_cmd_a, u32 rx_cmd_b)
 {
@@ -1251,9 +1485,9 @@ static struct usb_driver smsc75xx_driver = {
        .name           = SMSC_CHIPNAME,
        .id_table       = products,
        .probe          = usbnet_probe,
-       .suspend        = usbnet_suspend,
-       .resume         = usbnet_resume,
-       .reset_resume   = usbnet_resume,
+       .suspend        = smsc75xx_suspend,
+       .resume         = smsc75xx_resume,
+       .reset_resume   = smsc75xx_resume,
        .disconnect     = usbnet_disconnect,
        .disable_hub_initiated_lpm = 1,
 };
index d45e539a84b79daa0b8b4f2f1f5c993326e2211d..7479a5761d0d6b35c9a275ae470e49dc959de366 100644 (file)
 #define SMSC95XX_INTERNAL_PHY_ID       (1)
 #define SMSC95XX_TX_OVERHEAD           (8)
 #define SMSC95XX_TX_OVERHEAD_CSUM      (12)
+#define SUPPORTED_WAKE                 (WAKE_MAGIC)
+
+#define check_warn(ret, fmt, args...) \
+       ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); })
+
+#define check_warn_return(ret, fmt, args...) \
+       ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); return ret; } })
+
+#define check_warn_goto_done(ret, fmt, args...) \
+       ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); goto done; } })
 
 struct smsc95xx_priv {
        u32 mac_cr;
        u32 hash_hi;
        u32 hash_lo;
+       u32 wolopts;
        spinlock_t mac_cr_lock;
 };
 
@@ -63,7 +74,8 @@ static bool turbo_mode = true;
 module_param(turbo_mode, bool, 0644);
 MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
 
-static int smsc95xx_read_reg(struct usbnet *dev, u32 index, u32 *data)
+static int __must_check smsc95xx_read_reg(struct usbnet *dev, u32 index,
+                                         u32 *data)
 {
        u32 *buf = kmalloc(4, GFP_KERNEL);
        int ret;
@@ -88,7 +100,8 @@ static int smsc95xx_read_reg(struct usbnet *dev, u32 index, u32 *data)
        return ret;
 }
 
-static int smsc95xx_write_reg(struct usbnet *dev, u32 index, u32 data)
+static int __must_check smsc95xx_write_reg(struct usbnet *dev, u32 index,
+                                          u32 data)
 {
        u32 *buf = kmalloc(4, GFP_KERNEL);
        int ret;
@@ -114,15 +127,41 @@ static int smsc95xx_write_reg(struct usbnet *dev, u32 index, u32 data)
        return ret;
 }
 
+static int smsc95xx_set_feature(struct usbnet *dev, u32 feature)
+{
+       if (WARN_ON_ONCE(!dev))
+               return -EINVAL;
+
+       cpu_to_le32s(&feature);
+
+       return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+               USB_REQ_SET_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
+               USB_CTRL_SET_TIMEOUT);
+}
+
+static int smsc95xx_clear_feature(struct usbnet *dev, u32 feature)
+{
+       if (WARN_ON_ONCE(!dev))
+               return -EINVAL;
+
+       cpu_to_le32s(&feature);
+
+       return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+               USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
+               USB_CTRL_SET_TIMEOUT);
+}
+
 /* Loop until the read is completed with timeout
  * called with phy_mutex held */
-static int smsc95xx_phy_wait_not_busy(struct usbnet *dev)
+static int __must_check smsc95xx_phy_wait_not_busy(struct usbnet *dev)
 {
        unsigned long start_time = jiffies;
        u32 val;
+       int ret;
 
        do {
-               smsc95xx_read_reg(dev, MII_ADDR, &val);
+               ret = smsc95xx_read_reg(dev, MII_ADDR, &val);
+               check_warn_return(ret, "Error reading MII_ACCESS");
                if (!(val & MII_BUSY_))
                        return 0;
        } while (!time_after(jiffies, start_time + HZ));
@@ -134,33 +173,32 @@ static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
 {
        struct usbnet *dev = netdev_priv(netdev);
        u32 val, addr;
+       int ret;
 
        mutex_lock(&dev->phy_mutex);
 
        /* confirm MII not busy */
-       if (smsc95xx_phy_wait_not_busy(dev)) {
-               netdev_warn(dev->net, "MII is busy in smsc95xx_mdio_read\n");
-               mutex_unlock(&dev->phy_mutex);
-               return -EIO;
-       }
+       ret = smsc95xx_phy_wait_not_busy(dev);
+       check_warn_goto_done(ret, "MII is busy in smsc95xx_mdio_read");
 
        /* set the address, index & direction (read from PHY) */
        phy_id &= dev->mii.phy_id_mask;
        idx &= dev->mii.reg_num_mask;
        addr = (phy_id << 11) | (idx << 6) | MII_READ_;
-       smsc95xx_write_reg(dev, MII_ADDR, addr);
+       ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
+       check_warn_goto_done(ret, "Error writing MII_ADDR");
 
-       if (smsc95xx_phy_wait_not_busy(dev)) {
-               netdev_warn(dev->net, "Timed out reading MII reg %02X\n", idx);
-               mutex_unlock(&dev->phy_mutex);
-               return -EIO;
-       }
+       ret = smsc95xx_phy_wait_not_busy(dev);
+       check_warn_goto_done(ret, "Timed out reading MII reg %02X", idx);
 
-       smsc95xx_read_reg(dev, MII_DATA, &val);
+       ret = smsc95xx_read_reg(dev, MII_DATA, &val);
+       check_warn_goto_done(ret, "Error reading MII_DATA");
 
-       mutex_unlock(&dev->phy_mutex);
+       ret = (u16)(val & 0xFFFF);
 
-       return (u16)(val & 0xFFFF);
+done:
+       mutex_unlock(&dev->phy_mutex);
+       return ret;
 }
 
 static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
@@ -168,38 +206,41 @@ static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
 {
        struct usbnet *dev = netdev_priv(netdev);
        u32 val, addr;
+       int ret;
 
        mutex_lock(&dev->phy_mutex);
 
        /* confirm MII not busy */
-       if (smsc95xx_phy_wait_not_busy(dev)) {
-               netdev_warn(dev->net, "MII is busy in smsc95xx_mdio_write\n");
-               mutex_unlock(&dev->phy_mutex);
-               return;
-       }
+       ret = smsc95xx_phy_wait_not_busy(dev);
+       check_warn_goto_done(ret, "MII is busy in smsc95xx_mdio_write");
 
        val = regval;
-       smsc95xx_write_reg(dev, MII_DATA, val);
+       ret = smsc95xx_write_reg(dev, MII_DATA, val);
+       check_warn_goto_done(ret, "Error writing MII_DATA");
 
        /* set the address, index & direction (write to PHY) */
        phy_id &= dev->mii.phy_id_mask;
        idx &= dev->mii.reg_num_mask;
        addr = (phy_id << 11) | (idx << 6) | MII_WRITE_;
-       smsc95xx_write_reg(dev, MII_ADDR, addr);
+       ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
+       check_warn_goto_done(ret, "Error writing MII_ADDR");
 
-       if (smsc95xx_phy_wait_not_busy(dev))
-               netdev_warn(dev->net, "Timed out writing MII reg %02X\n", idx);
+       ret = smsc95xx_phy_wait_not_busy(dev);
+       check_warn_goto_done(ret, "Timed out writing MII reg %02X", idx);
 
+done:
        mutex_unlock(&dev->phy_mutex);
 }
 
-static int smsc95xx_wait_eeprom(struct usbnet *dev)
+static int __must_check smsc95xx_wait_eeprom(struct usbnet *dev)
 {
        unsigned long start_time = jiffies;
        u32 val;
+       int ret;
 
        do {
-               smsc95xx_read_reg(dev, E2P_CMD, &val);
+               ret = smsc95xx_read_reg(dev, E2P_CMD, &val);
+               check_warn_return(ret, "Error reading E2P_CMD");
                if (!(val & E2P_CMD_BUSY_) || (val & E2P_CMD_TIMEOUT_))
                        break;
                udelay(40);
@@ -213,13 +254,15 @@ static int smsc95xx_wait_eeprom(struct usbnet *dev)
        return 0;
 }
 
-static int smsc95xx_eeprom_confirm_not_busy(struct usbnet *dev)
+static int __must_check smsc95xx_eeprom_confirm_not_busy(struct usbnet *dev)
 {
        unsigned long start_time = jiffies;
        u32 val;
+       int ret;
 
        do {
-               smsc95xx_read_reg(dev, E2P_CMD, &val);
+               ret = smsc95xx_read_reg(dev, E2P_CMD, &val);
+               check_warn_return(ret, "Error reading E2P_CMD");
 
                if (!(val & E2P_CMD_BUSY_))
                        return 0;
@@ -246,13 +289,15 @@ static int smsc95xx_read_eeprom(struct usbnet *dev, u32 offset, u32 length,
 
        for (i = 0; i < length; i++) {
                val = E2P_CMD_BUSY_ | E2P_CMD_READ_ | (offset & E2P_CMD_ADDR_);
-               smsc95xx_write_reg(dev, E2P_CMD, val);
+               ret = smsc95xx_write_reg(dev, E2P_CMD, val);
+               check_warn_return(ret, "Error writing E2P_CMD");
 
                ret = smsc95xx_wait_eeprom(dev);
                if (ret < 0)
                        return ret;
 
-               smsc95xx_read_reg(dev, E2P_DATA, &val);
+               ret = smsc95xx_read_reg(dev, E2P_DATA, &val);
+               check_warn_return(ret, "Error reading E2P_DATA");
 
                data[i] = val & 0xFF;
                offset++;
@@ -276,7 +321,8 @@ static int smsc95xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length,
 
        /* Issue write/erase enable command */
        val = E2P_CMD_BUSY_ | E2P_CMD_EWEN_;
-       smsc95xx_write_reg(dev, E2P_CMD, val);
+       ret = smsc95xx_write_reg(dev, E2P_CMD, val);
+       check_warn_return(ret, "Error writing E2P_DATA");
 
        ret = smsc95xx_wait_eeprom(dev);
        if (ret < 0)
@@ -286,11 +332,13 @@ static int smsc95xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length,
 
                /* Fill data register */
                val = data[i];
-               smsc95xx_write_reg(dev, E2P_DATA, val);
+               ret = smsc95xx_write_reg(dev, E2P_DATA, val);
+               check_warn_return(ret, "Error writing E2P_DATA");
 
                /* Send "write" command */
                val = E2P_CMD_BUSY_ | E2P_CMD_WRITE_ | (offset & E2P_CMD_ADDR_);
-               smsc95xx_write_reg(dev, E2P_CMD, val);
+               ret = smsc95xx_write_reg(dev, E2P_CMD, val);
+               check_warn_return(ret, "Error writing E2P_CMD");
 
                ret = smsc95xx_wait_eeprom(dev);
                if (ret < 0)
@@ -308,14 +356,14 @@ static void smsc95xx_async_cmd_callback(struct urb *urb)
        struct usbnet *dev = usb_context->dev;
        int status = urb->status;
 
-       if (status < 0)
-               netdev_warn(dev->net, "async callback failed with %d\n", status);
+       check_warn(status, "async callback failed with %d\n", status);
 
        kfree(usb_context);
        usb_free_urb(urb);
 }
 
-static int smsc95xx_write_reg_async(struct usbnet *dev, u16 index, u32 *data)
+static int __must_check smsc95xx_write_reg_async(struct usbnet *dev, u16 index,
+                                                u32 *data)
 {
        struct usb_context *usb_context;
        int status;
@@ -371,6 +419,7 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
        struct usbnet *dev = netdev_priv(netdev);
        struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
        unsigned long flags;
+       int ret;
 
        pdata->hash_hi = 0;
        pdata->hash_lo = 0;
@@ -411,21 +460,23 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
        spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
 
        /* Initiate async writes, as we can't wait for completion here */
-       smsc95xx_write_reg_async(dev, HASHH, &pdata->hash_hi);
-       smsc95xx_write_reg_async(dev, HASHL, &pdata->hash_lo);
-       smsc95xx_write_reg_async(dev, MAC_CR, &pdata->mac_cr);
+       ret = smsc95xx_write_reg_async(dev, HASHH, &pdata->hash_hi);
+       check_warn(ret, "failed to initiate async write to HASHH");
+
+       ret = smsc95xx_write_reg_async(dev, HASHL, &pdata->hash_lo);
+       check_warn(ret, "failed to initiate async write to HASHL");
+
+       ret = smsc95xx_write_reg_async(dev, MAC_CR, &pdata->mac_cr);
+       check_warn(ret, "failed to initiate async write to MAC_CR");
 }
 
-static void smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
-                                           u16 lcladv, u16 rmtadv)
+static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
+                                          u16 lcladv, u16 rmtadv)
 {
        u32 flow, afc_cfg = 0;
 
        int ret = smsc95xx_read_reg(dev, AFC_CFG, &afc_cfg);
-       if (ret < 0) {
-               netdev_warn(dev->net, "error reading AFC_CFG\n");
-               return;
-       }
+       check_warn_return(ret, "Error reading AFC_CFG");
 
        if (duplex == DUPLEX_FULL) {
                u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
@@ -449,8 +500,13 @@ static void smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
                afc_cfg |= 0xF;
        }
 
-       smsc95xx_write_reg(dev, FLOW, flow);
-       smsc95xx_write_reg(dev, AFC_CFG, afc_cfg);
+       ret = smsc95xx_write_reg(dev, FLOW, flow);
+       check_warn_return(ret, "Error writing FLOW");
+
+       ret = smsc95xx_write_reg(dev, AFC_CFG, afc_cfg);
+       check_warn_return(ret, "Error writing AFC_CFG");
+
+       return 0;
 }
 
 static int smsc95xx_link_reset(struct usbnet *dev)
@@ -460,12 +516,14 @@ static int smsc95xx_link_reset(struct usbnet *dev)
        struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
        unsigned long flags;
        u16 lcladv, rmtadv;
-       u32 intdata;
+       int ret;
 
        /* clear interrupt status */
-       smsc95xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC);
-       intdata = 0xFFFFFFFF;
-       smsc95xx_write_reg(dev, INT_STS, intdata);
+       ret = smsc95xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC);
+       check_warn_return(ret, "Error reading PHY_INT_SRC");
+
+       ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
+       check_warn_return(ret, "Error writing INT_STS");
 
        mii_check_media(mii, 1, 1);
        mii_ethtool_gset(&dev->mii, &ecmd);
@@ -486,9 +544,11 @@ static int smsc95xx_link_reset(struct usbnet *dev)
        }
        spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
 
-       smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
+       ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
+       check_warn_return(ret, "Error writing MAC_CR");
 
-       smsc95xx_phy_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv);
+       ret = smsc95xx_phy_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv);
+       check_warn_return(ret, "Error updating PHY flow control");
 
        return 0;
 }
@@ -524,10 +584,7 @@ static int smsc95xx_set_features(struct net_device *netdev,
        int ret;
 
        ret = smsc95xx_read_reg(dev, COE_CR, &read_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to read COE_CR: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to read COE_CR: %d\n", ret);
 
        if (features & NETIF_F_HW_CSUM)
                read_buf |= Tx_COE_EN_;
@@ -540,10 +597,7 @@ static int smsc95xx_set_features(struct net_device *netdev,
                read_buf &= ~Rx_COE_EN_;
 
        ret = smsc95xx_write_reg(dev, COE_CR, read_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to write COE_CR: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to write COE_CR: %d\n", ret);
 
        netif_dbg(dev, hw, dev->net, "COE_CR = 0x%08x\n", read_buf);
        return 0;
@@ -608,6 +662,26 @@ smsc95xx_ethtool_getregs(struct net_device *netdev, struct ethtool_regs *regs,
        }
 }
 
+static void smsc95xx_ethtool_get_wol(struct net_device *net,
+                                    struct ethtool_wolinfo *wolinfo)
+{
+       struct usbnet *dev = netdev_priv(net);
+       struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+
+       wolinfo->supported = SUPPORTED_WAKE;
+       wolinfo->wolopts = pdata->wolopts;
+}
+
+static int smsc95xx_ethtool_set_wol(struct net_device *net,
+                                   struct ethtool_wolinfo *wolinfo)
+{
+       struct usbnet *dev = netdev_priv(net);
+       struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+
+       pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
+       return 0;
+}
+
 static const struct ethtool_ops smsc95xx_ethtool_ops = {
        .get_link       = usbnet_get_link,
        .nway_reset     = usbnet_nway_reset,
@@ -621,6 +695,8 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
        .set_eeprom     = smsc95xx_ethtool_set_eeprom,
        .get_regs_len   = smsc95xx_ethtool_getregslen,
        .get_regs       = smsc95xx_ethtool_getregs,
+       .get_wol        = smsc95xx_ethtool_get_wol,
+       .set_wol        = smsc95xx_ethtool_set_wol,
 };
 
 static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -658,55 +734,56 @@ static int smsc95xx_set_mac_address(struct usbnet *dev)
        int ret;
 
        ret = smsc95xx_write_reg(dev, ADDRL, addr_lo);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to write ADDRL: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to write ADDRL: %d\n", ret);
 
        ret = smsc95xx_write_reg(dev, ADDRH, addr_hi);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to write ADDRH: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to write ADDRH: %d\n", ret);
 
        return 0;
 }
 
 /* starts the TX path */
-static void smsc95xx_start_tx_path(struct usbnet *dev)
+static int smsc95xx_start_tx_path(struct usbnet *dev)
 {
        struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
        unsigned long flags;
-       u32 reg_val;
+       int ret;
 
        /* Enable Tx at MAC */
        spin_lock_irqsave(&pdata->mac_cr_lock, flags);
        pdata->mac_cr |= MAC_CR_TXEN_;
        spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
 
-       smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
+       ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
+       check_warn_return(ret, "Failed to write MAC_CR: %d\n", ret);
 
        /* Enable Tx at SCSRs */
-       reg_val = TX_CFG_ON_;
-       smsc95xx_write_reg(dev, TX_CFG, reg_val);
+       ret = smsc95xx_write_reg(dev, TX_CFG, TX_CFG_ON_);
+       check_warn_return(ret, "Failed to write TX_CFG: %d\n", ret);
+
+       return 0;
 }
 
 /* Starts the Receive path */
-static void smsc95xx_start_rx_path(struct usbnet *dev)
+static int smsc95xx_start_rx_path(struct usbnet *dev)
 {
        struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
        unsigned long flags;
+       int ret;
 
        spin_lock_irqsave(&pdata->mac_cr_lock, flags);
        pdata->mac_cr |= MAC_CR_RXEN_;
        spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
 
-       smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
+       ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
+       check_warn_return(ret, "Failed to write MAC_CR: %d\n", ret);
+
+       return 0;
 }
 
 static int smsc95xx_phy_initialize(struct usbnet *dev)
 {
-       int bmcr, timeout = 0;
+       int bmcr, ret, timeout = 0;
 
        /* Initialize MII structure */
        dev->mii.dev = dev->net;
@@ -735,7 +812,8 @@ static int smsc95xx_phy_initialize(struct usbnet *dev)
                ADVERTISE_PAUSE_ASYM);
 
        /* read to clear */
-       smsc95xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC);
+       ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC);
+       check_warn_return(ret, "Failed to read PHY_INT_SRC during init");
 
        smsc95xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK,
                PHY_INT_MASK_DEFAULT_);
@@ -753,22 +831,14 @@ static int smsc95xx_reset(struct usbnet *dev)
 
        netif_dbg(dev, ifup, dev->net, "entering smsc95xx_reset\n");
 
-       write_buf = HW_CFG_LRST_;
-       ret = smsc95xx_write_reg(dev, HW_CFG, write_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to write HW_CFG_LRST_ bit in HW_CFG register, ret = %d\n",
-                           ret);
-               return ret;
-       }
+       ret = smsc95xx_write_reg(dev, HW_CFG, HW_CFG_LRST_);
+       check_warn_return(ret, "Failed to write HW_CFG_LRST_ bit in HW_CFG\n");
 
        timeout = 0;
        do {
-               ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
-               if (ret < 0) {
-                       netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
-                       return ret;
-               }
                msleep(10);
+               ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
+               check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
                timeout++;
        } while ((read_buf & HW_CFG_LRST_) && (timeout < 100));
 
@@ -777,21 +847,14 @@ static int smsc95xx_reset(struct usbnet *dev)
                return ret;
        }
 
-       write_buf = PM_CTL_PHY_RST_;
-       ret = smsc95xx_write_reg(dev, PM_CTRL, write_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to write PM_CTRL: %d\n", ret);
-               return ret;
-       }
+       ret = smsc95xx_write_reg(dev, PM_CTRL, PM_CTL_PHY_RST_);
+       check_warn_return(ret, "Failed to write PM_CTRL: %d\n", ret);
 
        timeout = 0;
        do {
-               ret = smsc95xx_read_reg(dev, PM_CTRL, &read_buf);
-               if (ret < 0) {
-                       netdev_warn(dev->net, "Failed to read PM_CTRL: %d\n", ret);
-                       return ret;
-               }
                msleep(10);
+               ret = smsc95xx_read_reg(dev, PM_CTRL, &read_buf);
+               check_warn_return(ret, "Failed to read PM_CTRL: %d\n", ret);
                timeout++;
        } while ((read_buf & PM_CTL_PHY_RST_) && (timeout < 100));
 
@@ -808,10 +871,7 @@ static int smsc95xx_reset(struct usbnet *dev)
                  "MAC Address: %pM\n", dev->net->dev_addr);
 
        ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
 
        netif_dbg(dev, ifup, dev->net,
                  "Read Value from HW_CFG : 0x%08x\n", read_buf);
@@ -819,17 +879,10 @@ static int smsc95xx_reset(struct usbnet *dev)
        read_buf |= HW_CFG_BIR_;
 
        ret = smsc95xx_write_reg(dev, HW_CFG, read_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to write HW_CFG_BIR_ bit in HW_CFG register, ret = %d\n",
-                           ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to write HW_CFG_BIR_ bit in HW_CFG\n");
 
        ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
        netif_dbg(dev, ifup, dev->net,
                  "Read Value from HW_CFG after writing HW_CFG_BIR_: 0x%08x\n",
                  read_buf);
@@ -849,41 +902,28 @@ static int smsc95xx_reset(struct usbnet *dev)
                  "rx_urb_size=%ld\n", (ulong)dev->rx_urb_size);
 
        ret = smsc95xx_write_reg(dev, BURST_CAP, burst_cap);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to write BURST_CAP: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to write BURST_CAP: %d\n", ret);
 
        ret = smsc95xx_read_reg(dev, BURST_CAP, &read_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to read BURST_CAP: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to read BURST_CAP: %d\n", ret);
+
        netif_dbg(dev, ifup, dev->net,
                  "Read Value from BURST_CAP after writing: 0x%08x\n",
                  read_buf);
 
-       read_buf = DEFAULT_BULK_IN_DELAY;
-       ret = smsc95xx_write_reg(dev, BULK_IN_DLY, read_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "ret = %d\n", ret);
-               return ret;
-       }
+       ret = smsc95xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
+       check_warn_return(ret, "Failed to write BULK_IN_DLY: %d\n", ret);
 
        ret = smsc95xx_read_reg(dev, BULK_IN_DLY, &read_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to read BULK_IN_DLY: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to read BULK_IN_DLY: %d\n", ret);
+
        netif_dbg(dev, ifup, dev->net,
                  "Read Value from BULK_IN_DLY after writing: 0x%08x\n",
                  read_buf);
 
        ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
+
        netif_dbg(dev, ifup, dev->net,
                  "Read Value from HW_CFG: 0x%08x\n", read_buf);
 
@@ -896,101 +936,66 @@ static int smsc95xx_reset(struct usbnet *dev)
        read_buf |= NET_IP_ALIGN << 9;
 
        ret = smsc95xx_write_reg(dev, HW_CFG, read_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to write HW_CFG register, ret=%d\n",
-                           ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to write HW_CFG: %d\n", ret);
 
        ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
+
        netif_dbg(dev, ifup, dev->net,
                  "Read Value from HW_CFG after writing: 0x%08x\n", read_buf);
 
-       write_buf = 0xFFFFFFFF;
-       ret = smsc95xx_write_reg(dev, INT_STS, write_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to write INT_STS register, ret=%d\n",
-                           ret);
-               return ret;
-       }
+       ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
+       check_warn_return(ret, "Failed to write INT_STS: %d\n", ret);
 
        ret = smsc95xx_read_reg(dev, ID_REV, &read_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to read ID_REV: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to read ID_REV: %d\n", ret);
        netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", read_buf);
 
        /* Configure GPIO pins as LED outputs */
        write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
                LED_GPIO_CFG_FDX_LED;
        ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to write LED_GPIO_CFG register, ret=%d\n",
-                           ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to write LED_GPIO_CFG: %d\n", ret);
 
        /* Init Tx */
-       write_buf = 0;
-       ret = smsc95xx_write_reg(dev, FLOW, write_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to write FLOW: %d\n", ret);
-               return ret;
-       }
+       ret = smsc95xx_write_reg(dev, FLOW, 0);
+       check_warn_return(ret, "Failed to write FLOW: %d\n", ret);
 
-       read_buf = AFC_CFG_DEFAULT;
-       ret = smsc95xx_write_reg(dev, AFC_CFG, read_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to write AFC_CFG: %d\n", ret);
-               return ret;
-       }
+       ret = smsc95xx_write_reg(dev, AFC_CFG, AFC_CFG_DEFAULT);
+       check_warn_return(ret, "Failed to write AFC_CFG: %d\n", ret);
 
        /* Don't need mac_cr_lock during initialisation */
        ret = smsc95xx_read_reg(dev, MAC_CR, &pdata->mac_cr);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to read MAC_CR: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to read MAC_CR: %d\n", ret);
 
        /* Init Rx */
        /* Set Vlan */
-       write_buf = (u32)ETH_P_8021Q;
-       ret = smsc95xx_write_reg(dev, VLAN1, write_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to write VAN1: %d\n", ret);
-               return ret;
-       }
+       ret = smsc95xx_write_reg(dev, VLAN1, (u32)ETH_P_8021Q);
+       check_warn_return(ret, "Failed to write VLAN1: %d\n", ret);
 
        /* Enable or disable checksum offload engines */
-       smsc95xx_set_features(dev->net, dev->net->features);
+       ret = smsc95xx_set_features(dev->net, dev->net->features);
+       check_warn_return(ret, "Failed to set checksum offload features");
 
        smsc95xx_set_multicast(dev->net);
 
-       if (smsc95xx_phy_initialize(dev) < 0)
-               return -EIO;
+       ret = smsc95xx_phy_initialize(dev);
+       check_warn_return(ret, "Failed to init PHY");
 
        ret = smsc95xx_read_reg(dev, INT_EP_CTL, &read_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to read INT_EP_CTL: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to read INT_EP_CTL: %d\n", ret);
 
        /* enable PHY interrupts */
        read_buf |= INT_EP_CTL_PHY_INT_;
 
        ret = smsc95xx_write_reg(dev, INT_EP_CTL, read_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to write INT_EP_CTL: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to write INT_EP_CTL: %d\n", ret);
 
-       smsc95xx_start_tx_path(dev);
-       smsc95xx_start_rx_path(dev);
+       ret = smsc95xx_start_tx_path(dev);
+       check_warn_return(ret, "Failed to start TX path");
+
+       ret = smsc95xx_start_rx_path(dev);
+       check_warn_return(ret, "Failed to start RX path");
 
        netif_dbg(dev, ifup, dev->net, "smsc95xx_reset, return 0\n");
        return 0;
@@ -1017,10 +1022,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
        printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n");
 
        ret = usbnet_get_endpoints(dev, intf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "usbnet_get_endpoints failed: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "usbnet_get_endpoints failed: %d\n", ret);
 
        dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc95xx_priv),
                GFP_KERNEL);
@@ -1064,6 +1066,153 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
        }
 }
 
+static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
+{
+       struct usbnet *dev = usb_get_intfdata(intf);
+       struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+       int ret;
+       u32 val;
+
+       ret = usbnet_suspend(intf, message);
+       check_warn_return(ret, "usbnet_suspend error");
+
+       /* if no wol options set, enter lowest power SUSPEND2 mode */
+       if (!(pdata->wolopts & SUPPORTED_WAKE)) {
+               netdev_info(dev->net, "entering SUSPEND2 mode");
+
+               /* disable energy detect (link up) & wake up events */
+               ret = smsc95xx_read_reg(dev, WUCSR, &val);
+               check_warn_return(ret, "Error reading WUCSR");
+
+               val &= ~(WUCSR_MPEN_ | WUCSR_WAKE_EN_);
+
+               ret = smsc95xx_write_reg(dev, WUCSR, val);
+               check_warn_return(ret, "Error writing WUCSR");
+
+               ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
+               check_warn_return(ret, "Error reading PM_CTRL");
+
+               val &= ~(PM_CTL_ED_EN_ | PM_CTL_WOL_EN_);
+
+               ret = smsc95xx_write_reg(dev, PM_CTRL, val);
+               check_warn_return(ret, "Error writing PM_CTRL");
+
+               /* enter suspend2 mode */
+               ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
+               check_warn_return(ret, "Error reading PM_CTRL");
+
+               val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_);
+               val |= PM_CTL_SUS_MODE_2;
+
+               ret = smsc95xx_write_reg(dev, PM_CTRL, val);
+               check_warn_return(ret, "Error writing PM_CTRL");
+
+               return 0;
+       }
+
+       if (pdata->wolopts & WAKE_MAGIC) {
+               /* clear any pending magic packet status */
+               ret = smsc95xx_read_reg(dev, WUCSR, &val);
+               check_warn_return(ret, "Error reading WUCSR");
+
+               val |= WUCSR_MPR_;
+
+               ret = smsc95xx_write_reg(dev, WUCSR, val);
+               check_warn_return(ret, "Error writing WUCSR");
+       }
+
+       /* enable/disable magic packup wake */
+       ret = smsc95xx_read_reg(dev, WUCSR, &val);
+       check_warn_return(ret, "Error reading WUCSR");
+
+       if (pdata->wolopts & WAKE_MAGIC) {
+               netdev_info(dev->net, "enabling magic packet wakeup");
+               val |= WUCSR_MPEN_;
+       } else {
+               netdev_info(dev->net, "disabling magic packet wakeup");
+               val &= ~WUCSR_MPEN_;
+       }
+
+       ret = smsc95xx_write_reg(dev, WUCSR, val);
+       check_warn_return(ret, "Error writing WUCSR");
+
+       /* enable wol wakeup source */
+       ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
+       check_warn_return(ret, "Error reading PM_CTRL");
+
+       val |= PM_CTL_WOL_EN_;
+
+       ret = smsc95xx_write_reg(dev, PM_CTRL, val);
+       check_warn_return(ret, "Error writing PM_CTRL");
+
+       /* enable receiver */
+       smsc95xx_start_rx_path(dev);
+
+       /* some wol options are enabled, so enter SUSPEND0 */
+       netdev_info(dev->net, "entering SUSPEND0 mode");
+
+       ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
+       check_warn_return(ret, "Error reading PM_CTRL");
+
+       val &= (~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_));
+       val |= PM_CTL_SUS_MODE_0;
+
+       ret = smsc95xx_write_reg(dev, PM_CTRL, val);
+       check_warn_return(ret, "Error writing PM_CTRL");
+
+       /* clear wol status */
+       val &= ~PM_CTL_WUPS_;
+       val |= PM_CTL_WUPS_WOL_;
+       ret = smsc95xx_write_reg(dev, PM_CTRL, val);
+       check_warn_return(ret, "Error writing PM_CTRL");
+
+       /* read back PM_CTRL */
+       ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
+       check_warn_return(ret, "Error reading PM_CTRL");
+
+       smsc95xx_set_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
+
+       return 0;
+}
+
+static int smsc95xx_resume(struct usb_interface *intf)
+{
+       struct usbnet *dev = usb_get_intfdata(intf);
+       struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+       int ret;
+       u32 val;
+
+       BUG_ON(!dev);
+
+       if (pdata->wolopts & WAKE_MAGIC) {
+               smsc95xx_clear_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
+
+               /* Disable magic packup wake */
+               ret = smsc95xx_read_reg(dev, WUCSR, &val);
+               check_warn_return(ret, "Error reading WUCSR");
+
+               val &= ~WUCSR_MPEN_;
+
+               ret = smsc95xx_write_reg(dev, WUCSR, val);
+               check_warn_return(ret, "Error writing WUCSR");
+
+               /* clear wake-up status */
+               ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
+               check_warn_return(ret, "Error reading PM_CTRL");
+
+               val &= ~PM_CTL_WOL_EN_;
+               val |= PM_CTL_WUPS_;
+
+               ret = smsc95xx_write_reg(dev, PM_CTRL, val);
+               check_warn_return(ret, "Error writing PM_CTRL");
+       }
+
+       return usbnet_resume(intf);
+       check_warn_return(ret, "usbnet_resume error");
+
+       return 0;
+}
+
 static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
 {
        skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2);
@@ -1326,8 +1475,9 @@ static struct usb_driver smsc95xx_driver = {
        .name           = "smsc95xx",
        .id_table       = products,
        .probe          = usbnet_probe,
-       .suspend        = usbnet_suspend,
-       .resume         = usbnet_resume,
+       .suspend        = smsc95xx_suspend,
+       .resume         = smsc95xx_resume,
+       .reset_resume   = smsc95xx_resume,
        .disconnect     = usbnet_disconnect,
        .disable_hub_initiated_lpm = 1,
 };
index 86bc44977fbd98372844b622cf171153a4c60d32..2ff9815aa27c5e9e098dd587e8291398f2dce25f 100644 (file)
@@ -63,6 +63,7 @@
 #define INT_STS_TDFO_                  (0x00001000)
 #define INT_STS_RXDF_                  (0x00000800)
 #define INT_STS_GPIOS_                 (0x000007FF)
+#define INT_STS_CLEAR_ALL_             (0xFFFFFFFF)
 
 #define RX_CFG                         (0x0C)
 #define RX_FIFO_FLUSH_                 (0x00000001)
 #define HW_CFG_BCE_                    (0x00000002)
 #define HW_CFG_SRST_                   (0x00000001)
 
+#define RX_FIFO_INF                    (0x18)
+
 #define PM_CTRL                                (0x20)
+#define PM_CTL_RES_CLR_WKP_STS         (0x00000200)
 #define PM_CTL_DEV_RDY_                        (0x00000080)
 #define PM_CTL_SUS_MODE_               (0x00000060)
 #define PM_CTL_SUS_MODE_0              (0x00000000)
 #define PM_CTL_SUS_MODE_1              (0x00000020)
-#define PM_CTL_SUS_MODE_2              (0x00000060)
+#define PM_CTL_SUS_MODE_2              (0x00000040)
+#define PM_CTL_SUS_MODE_3              (0x00000060)
 #define PM_CTL_PHY_RST_                        (0x00000010)
 #define PM_CTL_WOL_EN_                 (0x00000008)
 #define PM_CTL_ED_EN_                  (0x00000004)
 #define WUFF                           (0x128)
 
 #define WUCSR                          (0x12C)
+#define WUCSR_GUE_                     (0x00000200)
+#define WUCSR_WUFR_                    (0x00000040)
+#define WUCSR_MPR_                     (0x00000020)
+#define WUCSR_WAKE_EN_                 (0x00000004)
+#define WUCSR_MPEN_                    (0x00000002)
 
 #define COE_CR                         (0x130)
 #define Tx_COE_EN_                     (0x00010000)
index 5852361032c459735e915db5443ac08d560f2c7c..e522ff70444cd0d7e8f1ce34132055e438ded7ce 100644 (file)
@@ -348,6 +348,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
        if (tbp[IFLA_ADDRESS] == NULL)
                eth_hw_addr_random(peer);
 
+       if (ifmp && (dev->ifindex != 0))
+               peer->ifindex = ifmp->ifi_index;
+
        err = register_netdevice(peer);
        put_net(net);
        net = NULL;
index 9650c413e11f2e6408ad93737b5ba93915e952d5..cbf8b06253528e9e5557c31e844581d7334d7e21 100644 (file)
@@ -993,7 +993,7 @@ static void virtnet_config_changed_work(struct work_struct *work)
                goto done;
 
        if (v & VIRTIO_NET_S_ANNOUNCE) {
-               netif_notify_peers(vi->dev);
+               netdev_notify_peers(vi->dev);
                virtnet_ack_link_announce(vi);
        }
 
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
new file mode 100644 (file)
index 0000000..51de9ed
--- /dev/null
@@ -0,0 +1,1219 @@
+/*
+ * VXLAN: Virtual eXtensiable Local Area Network
+ *
+ * Copyright (c) 2012 Vyatta Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * TODO
+ *  - use IANA UDP port number (when defined)
+ *  - IPv6 (not in RFC)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/rculist.h>
+#include <linux/netdevice.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/igmp.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/version.h>
+#include <linux/hash.h>
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/udp.h>
+#include <net/rtnetlink.h>
+#include <net/route.h>
+#include <net/dsfield.h>
+#include <net/inet_ecn.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+
+#define VXLAN_VERSION  "0.1"
+
+#define VNI_HASH_BITS  10
+#define VNI_HASH_SIZE  (1<<VNI_HASH_BITS)
+#define FDB_HASH_BITS  8
+#define FDB_HASH_SIZE  (1<<FDB_HASH_BITS)
+#define FDB_AGE_DEFAULT 300 /* 5 min */
+#define FDB_AGE_INTERVAL (10 * HZ)     /* rescan interval */
+
+#define VXLAN_N_VID    (1u << 24)
+#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
+/* VLAN + IP header + UDP + VXLAN */
+#define VXLAN_HEADROOM (4 + 20 + 8 + 8)
+
+#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
+
+/* VXLAN protocol header */
+struct vxlanhdr {
+       __be32 vx_flags;
+       __be32 vx_vni;
+};
+
+/* UDP port for VXLAN traffic. */
+static unsigned int vxlan_port __read_mostly = 8472;
+module_param_named(udp_port, vxlan_port, uint, 0444);
+MODULE_PARM_DESC(udp_port, "Destination UDP port");
+
+static bool log_ecn_error = true;
+module_param(log_ecn_error, bool, 0644);
+MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
+
+/* per-net private data for this module */
+static unsigned int vxlan_net_id;
+struct vxlan_net {
+       struct socket     *sock;        /* UDP encap socket */
+       struct hlist_head vni_list[VNI_HASH_SIZE];
+};
+
+/* Forwarding table entry */
+struct vxlan_fdb {
+       struct hlist_node hlist;        /* linked list of entries */
+       struct rcu_head   rcu;
+       unsigned long     updated;      /* jiffies */
+       unsigned long     used;
+       __be32            remote_ip;
+       u16               state;        /* see ndm_state */
+       u8                eth_addr[ETH_ALEN];
+};
+
+/* Per-cpu network traffic stats */
+struct vxlan_stats {
+       u64                     rx_packets;
+       u64                     rx_bytes;
+       u64                     tx_packets;
+       u64                     tx_bytes;
+       struct u64_stats_sync   syncp;
+};
+
+/* Pseudo network device */
+struct vxlan_dev {
+       struct hlist_node hlist;
+       struct net_device *dev;
+       struct vxlan_stats __percpu *stats;
+       __u32             vni;          /* virtual network id */
+       __be32            gaddr;        /* multicast group */
+       __be32            saddr;        /* source address */
+       unsigned int      link;         /* link to multicast over */
+       __u8              tos;          /* TOS override */
+       __u8              ttl;
+       bool              learn;
+
+       unsigned long     age_interval;
+       struct timer_list age_timer;
+       spinlock_t        hash_lock;
+       unsigned int      addrcnt;
+       unsigned int      addrmax;
+       unsigned int      addrexceeded;
+
+       struct hlist_head fdb_head[FDB_HASH_SIZE];
+};
+
+/* salt for hash table */
+static u32 vxlan_salt __read_mostly;
+
+static inline struct hlist_head *vni_head(struct net *net, u32 id)
+{
+       struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+
+       return &vn->vni_list[hash_32(id, VNI_HASH_BITS)];
+}
+
+/* Look up VNI in a per net namespace table */
+static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id)
+{
+       struct vxlan_dev *vxlan;
+       struct hlist_node *node;
+
+       hlist_for_each_entry_rcu(vxlan, node, vni_head(net, id), hlist) {
+               if (vxlan->vni == id)
+                       return vxlan;
+       }
+
+       return NULL;
+}
+
+/* Fill in neighbour message in skbuff. */
+static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
+                          const struct vxlan_fdb *fdb,
+                          u32 portid, u32 seq, int type, unsigned int flags)
+{
+       unsigned long now = jiffies;
+       struct nda_cacheinfo ci;
+       struct nlmsghdr *nlh;
+       struct ndmsg *ndm;
+
+       nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
+       if (nlh == NULL)
+               return -EMSGSIZE;
+
+       ndm = nlmsg_data(nlh);
+       memset(ndm, 0, sizeof(*ndm));
+       ndm->ndm_family = AF_BRIDGE;
+       ndm->ndm_state = fdb->state;
+       ndm->ndm_ifindex = vxlan->dev->ifindex;
+       ndm->ndm_flags = NTF_SELF;
+       ndm->ndm_type = NDA_DST;
+
+       if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
+               goto nla_put_failure;
+
+       if (nla_put_be32(skb, NDA_DST, fdb->remote_ip))
+               goto nla_put_failure;
+
+       ci.ndm_used      = jiffies_to_clock_t(now - fdb->used);
+       ci.ndm_confirmed = 0;
+       ci.ndm_updated   = jiffies_to_clock_t(now - fdb->updated);
+       ci.ndm_refcnt    = 0;
+
+       if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
+               goto nla_put_failure;
+
+       return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+       nlmsg_cancel(skb, nlh);
+       return -EMSGSIZE;
+}
+
+static inline size_t vxlan_nlmsg_size(void)
+{
+       return NLMSG_ALIGN(sizeof(struct ndmsg))
+               + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
+               + nla_total_size(sizeof(__be32)) /* NDA_DST */
+               + nla_total_size(sizeof(struct nda_cacheinfo));
+}
+
+static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
+                            const struct vxlan_fdb *fdb, int type)
+{
+       struct net *net = dev_net(vxlan->dev);
+       struct sk_buff *skb;
+       int err = -ENOBUFS;
+
+       skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
+       if (skb == NULL)
+               goto errout;
+
+       err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0);
+       if (err < 0) {
+               /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
+               WARN_ON(err == -EMSGSIZE);
+               kfree_skb(skb);
+               goto errout;
+       }
+
+       rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
+       return;
+errout:
+       if (err < 0)
+               rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
+}
+
+/* Hash Ethernet address */
+static u32 eth_hash(const unsigned char *addr)
+{
+       u64 value = get_unaligned((u64 *)addr);
+
+       /* only want 6 bytes */
+#ifdef __BIG_ENDIAN
+       value <<= 16;
+#else
+       value >>= 16;
+#endif
+       return hash_64(value, FDB_HASH_BITS);
+}
+
+/* Hash chain to use given mac address */
+static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
+                                               const u8 *mac)
+{
+       return &vxlan->fdb_head[eth_hash(mac)];
+}
+
+/* Look up Ethernet address in forwarding table */
+static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
+                                       const u8 *mac)
+
+{
+       struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
+       struct vxlan_fdb *f;
+       struct hlist_node *node;
+
+       hlist_for_each_entry_rcu(f, node, head, hlist) {
+               if (compare_ether_addr(mac, f->eth_addr) == 0)
+                       return f;
+       }
+
+       return NULL;
+}
+
+/* Add new entry to forwarding table -- assumes lock held */
+static int vxlan_fdb_create(struct vxlan_dev *vxlan,
+                           const u8 *mac, __be32 ip,
+                           __u16 state, __u16 flags)
+{
+       struct vxlan_fdb *f;
+       int notify = 0;
+
+       f = vxlan_find_mac(vxlan, mac);
+       if (f) {
+               if (flags & NLM_F_EXCL) {
+                       netdev_dbg(vxlan->dev,
+                                  "lost race to create %pM\n", mac);
+                       return -EEXIST;
+               }
+               if (f->state != state) {
+                       f->state = state;
+                       f->updated = jiffies;
+                       notify = 1;
+               }
+       } else {
+               if (!(flags & NLM_F_CREATE))
+                       return -ENOENT;
+
+               if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
+                       return -ENOSPC;
+
+               netdev_dbg(vxlan->dev, "add %pM -> %pI4\n", mac, &ip);
+               f = kmalloc(sizeof(*f), GFP_ATOMIC);
+               if (!f)
+                       return -ENOMEM;
+
+               notify = 1;
+               f->remote_ip = ip;
+               f->state = state;
+               f->updated = f->used = jiffies;
+               memcpy(f->eth_addr, mac, ETH_ALEN);
+
+               ++vxlan->addrcnt;
+               hlist_add_head_rcu(&f->hlist,
+                                  vxlan_fdb_head(vxlan, mac));
+       }
+
+       if (notify)
+               vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH);
+
+       return 0;
+}
+
+static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
+{
+       netdev_dbg(vxlan->dev,
+                   "delete %pM\n", f->eth_addr);
+
+       --vxlan->addrcnt;
+       vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH);
+
+       hlist_del_rcu(&f->hlist);
+       kfree_rcu(f, rcu);
+}
+
+/* Add static entry (via netlink) */
+static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+                        struct net_device *dev,
+                        const unsigned char *addr, u16 flags)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       __be32 ip;
+       int err;
+
+       if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
+               pr_info("RTM_NEWNEIGH with invalid state %#x\n",
+                       ndm->ndm_state);
+               return -EINVAL;
+       }
+
+       if (tb[NDA_DST] == NULL)
+               return -EINVAL;
+
+       if (nla_len(tb[NDA_DST]) != sizeof(__be32))
+               return -EAFNOSUPPORT;
+
+       ip = nla_get_be32(tb[NDA_DST]);
+
+       spin_lock_bh(&vxlan->hash_lock);
+       err = vxlan_fdb_create(vxlan, addr, ip, ndm->ndm_state, flags);
+       spin_unlock_bh(&vxlan->hash_lock);
+
+       return err;
+}
+
+/* Delete entry (via netlink) */
+static int vxlan_fdb_delete(struct ndmsg *ndm, struct net_device *dev,
+                           const unsigned char *addr)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct vxlan_fdb *f;
+       int err = -ENOENT;
+
+       spin_lock_bh(&vxlan->hash_lock);
+       f = vxlan_find_mac(vxlan, addr);
+       if (f) {
+               vxlan_fdb_destroy(vxlan, f);
+               err = 0;
+       }
+       spin_unlock_bh(&vxlan->hash_lock);
+
+       return err;
+}
+
+/* Dump forwarding table */
+static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
+                         struct net_device *dev, int idx)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       unsigned int h;
+
+       for (h = 0; h < FDB_HASH_SIZE; ++h) {
+               struct vxlan_fdb *f;
+               struct hlist_node *n;
+               int err;
+
+               hlist_for_each_entry_rcu(f, n, &vxlan->fdb_head[h], hlist) {
+                       if (idx < cb->args[0])
+                               goto skip;
+
+                       err = vxlan_fdb_info(skb, vxlan, f,
+                                            NETLINK_CB(cb->skb).portid,
+                                            cb->nlh->nlmsg_seq,
+                                            RTM_NEWNEIGH,
+                                            NLM_F_MULTI);
+                       if (err < 0)
+                               break;
+skip:
+                       ++idx;
+               }
+       }
+
+       return idx;
+}
+
+/* Watch incoming packets to learn mapping between Ethernet address
+ * and Tunnel endpoint.
+ */
+static void vxlan_snoop(struct net_device *dev,
+                       __be32 src_ip, const u8 *src_mac)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct vxlan_fdb *f;
+       int err;
+
+       f = vxlan_find_mac(vxlan, src_mac);
+       if (likely(f)) {
+               f->used = jiffies;
+               if (likely(f->remote_ip == src_ip))
+                       return;
+
+               if (net_ratelimit())
+                       netdev_info(dev,
+                                   "%pM migrated from %pI4 to %pI4\n",
+                                   src_mac, &f->remote_ip, &src_ip);
+
+               f->remote_ip = src_ip;
+               f->updated = jiffies;
+       } else {
+               /* learned new entry */
+               spin_lock(&vxlan->hash_lock);
+               err = vxlan_fdb_create(vxlan, src_mac, src_ip,
+                                      NUD_REACHABLE,
+                                      NLM_F_EXCL|NLM_F_CREATE);
+               spin_unlock(&vxlan->hash_lock);
+       }
+}
+
+
+/* See if multicast group is already in use by other ID */
+static bool vxlan_group_used(struct vxlan_net *vn,
+                            const struct vxlan_dev *this)
+{
+       const struct vxlan_dev *vxlan;
+       struct hlist_node *node;
+       unsigned h;
+
+       for (h = 0; h < VNI_HASH_SIZE; ++h)
+               hlist_for_each_entry(vxlan, node, &vn->vni_list[h], hlist) {
+                       if (vxlan == this)
+                               continue;
+
+                       if (!netif_running(vxlan->dev))
+                               continue;
+
+                       if (vxlan->gaddr == this->gaddr)
+                               return true;
+               }
+
+       return false;
+}
+
+/* kernel equivalent to IP_ADD_MEMBERSHIP */
+static int vxlan_join_group(struct net_device *dev)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
+       struct sock *sk = vn->sock->sk;
+       struct ip_mreqn mreq = {
+               .imr_multiaddr.s_addr = vxlan->gaddr,
+       };
+       int err;
+
+       /* Already a member of group */
+       if (vxlan_group_used(vn, vxlan))
+               return 0;
+
+       /* Need to drop RTNL to call multicast join */
+       rtnl_unlock();
+       lock_sock(sk);
+       err = ip_mc_join_group(sk, &mreq);
+       release_sock(sk);
+       rtnl_lock();
+
+       return err;
+}
+
+
+/* kernel equivalent to IP_DROP_MEMBERSHIP */
+static int vxlan_leave_group(struct net_device *dev)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
+       int err = 0;
+       struct sock *sk = vn->sock->sk;
+       struct ip_mreqn mreq = {
+               .imr_multiaddr.s_addr = vxlan->gaddr,
+       };
+
+       /* Only leave group when last vxlan is done. */
+       if (vxlan_group_used(vn, vxlan))
+               return 0;
+
+       /* Need to drop RTNL to call multicast leave */
+       rtnl_unlock();
+       lock_sock(sk);
+       err = ip_mc_leave_group(sk, &mreq);
+       release_sock(sk);
+       rtnl_lock();
+
+       return err;
+}
+
+/* Callback from net/ipv4/udp.c to receive packets */
+static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+{
+       struct iphdr *oip;
+       struct vxlanhdr *vxh;
+       struct vxlan_dev *vxlan;
+       struct vxlan_stats *stats;
+       __u32 vni;
+       int err;
+
+       /* pop off outer UDP header */
+       __skb_pull(skb, sizeof(struct udphdr));
+
+       /* Need Vxlan and inner Ethernet header to be present */
+       if (!pskb_may_pull(skb, sizeof(struct vxlanhdr)))
+               goto error;
+
+       /* Drop packets with reserved bits set */
+       vxh = (struct vxlanhdr *) skb->data;
+       if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
+           (vxh->vx_vni & htonl(0xff))) {
+               netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
+                          ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
+               goto error;
+       }
+
+       __skb_pull(skb, sizeof(struct vxlanhdr));
+       skb_postpull_rcsum(skb, eth_hdr(skb), sizeof(struct vxlanhdr));
+
+       /* Is this VNI defined? */
+       vni = ntohl(vxh->vx_vni) >> 8;
+       vxlan = vxlan_find_vni(sock_net(sk), vni);
+       if (!vxlan) {
+               netdev_dbg(skb->dev, "unknown vni %d\n", vni);
+               goto drop;
+       }
+
+       if (!pskb_may_pull(skb, ETH_HLEN)) {
+               vxlan->dev->stats.rx_length_errors++;
+               vxlan->dev->stats.rx_errors++;
+               goto drop;
+       }
+
+       /* Re-examine inner Ethernet packet */
+       oip = ip_hdr(skb);
+       skb->protocol = eth_type_trans(skb, vxlan->dev);
+       skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+
+       /* Ignore packet loops (and multicast echo) */
+       if (compare_ether_addr(eth_hdr(skb)->h_source,
+                              vxlan->dev->dev_addr) == 0)
+               goto drop;
+
+       if (vxlan->learn)
+               vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source);
+
+       __skb_tunnel_rx(skb, vxlan->dev);
+       skb_reset_network_header(skb);
+
+       err = IP_ECN_decapsulate(oip, skb);
+       if (unlikely(err)) {
+               if (log_ecn_error)
+                       net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
+                                            &oip->saddr, oip->tos);
+               if (err > 1) {
+                       ++vxlan->dev->stats.rx_frame_errors;
+                       ++vxlan->dev->stats.rx_errors;
+                       goto drop;
+               }
+       }
+
+       stats = this_cpu_ptr(vxlan->stats);
+       u64_stats_update_begin(&stats->syncp);
+       stats->rx_packets++;
+       stats->rx_bytes += skb->len;
+       u64_stats_update_end(&stats->syncp);
+
+       netif_rx(skb);
+
+       return 0;
+error:
+       /* Put UDP header back */
+       __skb_push(skb, sizeof(struct udphdr));
+
+       return 1;
+drop:
+       /* Consume bad packet */
+       kfree_skb(skb);
+       return 0;
+}
+
+/* Extract dsfield from inner protocol */
+static inline u8 vxlan_get_dsfield(const struct iphdr *iph,
+                                  const struct sk_buff *skb)
+{
+       if (skb->protocol == htons(ETH_P_IP))
+               return iph->tos;
+       else if (skb->protocol == htons(ETH_P_IPV6))
+               return ipv6_get_dsfield((const struct ipv6hdr *)iph);
+       else
+               return 0;
+}
+
+/* Propogate ECN bits out */
+static inline u8 vxlan_ecn_encap(u8 tos,
+                                const struct iphdr *iph,
+                                const struct sk_buff *skb)
+{
+       u8 inner = vxlan_get_dsfield(iph, skb);
+
+       return INET_ECN_encapsulate(tos, inner);
+}
+
+/* Transmit local packets over Vxlan
+ *
+ * Outer IP header inherits ECN and DF from inner header.
+ * Outer UDP destination is the VXLAN assigned port.
+ *           source port is based on hash of flow if available
+ *                       otherwise use a random value
+ */
+static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct rtable *rt;
+       const struct ethhdr *eth;
+       const struct iphdr *old_iph;
+       struct iphdr *iph;
+       struct vxlanhdr *vxh;
+       struct udphdr *uh;
+       struct flowi4 fl4;
+       struct vxlan_fdb *f;
+       unsigned int pkt_len = skb->len;
+       u32 hash;
+       __be32 dst;
+       __be16 df = 0;
+       __u8 tos, ttl;
+       int err;
+
+       /* Need space for new headers (invalidates iph ptr) */
+       if (skb_cow_head(skb, VXLAN_HEADROOM))
+               goto drop;
+
+       eth = (void *)skb->data;
+       old_iph = ip_hdr(skb);
+
+       if (!is_multicast_ether_addr(eth->h_dest) &&
+           (f = vxlan_find_mac(vxlan, eth->h_dest)))
+               dst = f->remote_ip;
+       else if (vxlan->gaddr) {
+               dst = vxlan->gaddr;
+       } else
+               goto drop;
+
+       ttl = vxlan->ttl;
+       if (!ttl && IN_MULTICAST(ntohl(dst)))
+               ttl = 1;
+
+       tos = vxlan->tos;
+       if (tos == 1)
+               tos = vxlan_get_dsfield(old_iph, skb);
+
+       hash = skb_get_rxhash(skb);
+
+       rt = ip_route_output_gre(dev_net(dev), &fl4, dst,
+                                vxlan->saddr, vxlan->vni,
+                                RT_TOS(tos), vxlan->link);
+       if (IS_ERR(rt)) {
+               netdev_dbg(dev, "no route to %pI4\n", &dst);
+               dev->stats.tx_carrier_errors++;
+               goto tx_error;
+       }
+
+       if (rt->dst.dev == dev) {
+               netdev_dbg(dev, "circular route to %pI4\n", &dst);
+               ip_rt_put(rt);
+               dev->stats.collisions++;
+               goto tx_error;
+       }
+
+       memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+       IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
+                             IPSKB_REROUTED);
+       skb_dst_drop(skb);
+       skb_dst_set(skb, &rt->dst);
+
+       vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
+       vxh->vx_flags = htonl(VXLAN_FLAGS);
+       vxh->vx_vni = htonl(vxlan->vni << 8);
+
+       __skb_push(skb, sizeof(*uh));
+       skb_reset_transport_header(skb);
+       uh = udp_hdr(skb);
+
+       uh->dest = htons(vxlan_port);
+       uh->source = hash ? :random32();
+
+       uh->len = htons(skb->len);
+       uh->check = 0;
+
+       __skb_push(skb, sizeof(*iph));
+       skb_reset_network_header(skb);
+       iph             = ip_hdr(skb);
+       iph->version    = 4;
+       iph->ihl        = sizeof(struct iphdr) >> 2;
+       iph->frag_off   = df;
+       iph->protocol   = IPPROTO_UDP;
+       iph->tos        = vxlan_ecn_encap(tos, old_iph, skb);
+       iph->daddr      = fl4.daddr;
+       iph->saddr      = fl4.saddr;
+       iph->ttl        = ttl ? : ip4_dst_hoplimit(&rt->dst);
+
+       /* See __IPTUNNEL_XMIT */
+       skb->ip_summed = CHECKSUM_NONE;
+       ip_select_ident(iph, &rt->dst, NULL);
+
+       err = ip_local_out(skb);
+       if (likely(net_xmit_eval(err) == 0)) {
+               struct vxlan_stats *stats = this_cpu_ptr(vxlan->stats);
+
+               u64_stats_update_begin(&stats->syncp);
+               stats->tx_packets++;
+               stats->tx_bytes += pkt_len;
+               u64_stats_update_end(&stats->syncp);
+       } else {
+               dev->stats.tx_errors++;
+               dev->stats.tx_aborted_errors++;
+       }
+       return NETDEV_TX_OK;
+
+drop:
+       dev->stats.tx_dropped++;
+       goto tx_free;
+
+tx_error:
+       dev->stats.tx_errors++;
+tx_free:
+       dev_kfree_skb(skb);
+       return NETDEV_TX_OK;
+}
+
+/* Walk the forwarding table and purge stale entries */
+static void vxlan_cleanup(unsigned long arg)
+{
+       struct vxlan_dev *vxlan = (struct vxlan_dev *) arg;
+       unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
+       unsigned int h;
+
+       if (!netif_running(vxlan->dev))
+               return;
+
+       spin_lock_bh(&vxlan->hash_lock);
+       for (h = 0; h < FDB_HASH_SIZE; ++h) {
+               struct hlist_node *p, *n;
+               hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
+                       struct vxlan_fdb *f
+                               = container_of(p, struct vxlan_fdb, hlist);
+                       unsigned long timeout;
+
+                       if (f->state == NUD_PERMANENT)
+                               continue;
+
+                       timeout = f->used + vxlan->age_interval * HZ;
+                       if (time_before_eq(timeout, jiffies)) {
+                               netdev_dbg(vxlan->dev,
+                                          "garbage collect %pM\n",
+                                          f->eth_addr);
+                               f->state = NUD_STALE;
+                               vxlan_fdb_destroy(vxlan, f);
+                       } else if (time_before(timeout, next_timer))
+                               next_timer = timeout;
+               }
+       }
+       spin_unlock_bh(&vxlan->hash_lock);
+
+       mod_timer(&vxlan->age_timer, next_timer);
+}
+
+/* Setup stats when device is created */
+static int vxlan_init(struct net_device *dev)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+
+       vxlan->stats = alloc_percpu(struct vxlan_stats);
+       if (!vxlan->stats)
+               return -ENOMEM;
+
+       return 0;
+}
+
+/* Start ageing timer and join group when device is brought up */
+static int vxlan_open(struct net_device *dev)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       int err;
+
+       if (vxlan->gaddr) {
+               err = vxlan_join_group(dev);
+               if (err)
+                       return err;
+       }
+
+       if (vxlan->age_interval)
+               mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
+
+       return 0;
+}
+
+/* Purge the forwarding table */
+static void vxlan_flush(struct vxlan_dev *vxlan)
+{
+       unsigned h;
+
+       spin_lock_bh(&vxlan->hash_lock);
+       for (h = 0; h < FDB_HASH_SIZE; ++h) {
+               struct hlist_node *p, *n;
+               hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
+                       struct vxlan_fdb *f
+                               = container_of(p, struct vxlan_fdb, hlist);
+                       vxlan_fdb_destroy(vxlan, f);
+               }
+       }
+       spin_unlock_bh(&vxlan->hash_lock);
+}
+
+/* Cleanup timer and forwarding table on shutdown */
+static int vxlan_stop(struct net_device *dev)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+
+       if (vxlan->gaddr)
+               vxlan_leave_group(dev);
+
+       del_timer_sync(&vxlan->age_timer);
+
+       vxlan_flush(vxlan);
+
+       return 0;
+}
+
+/* Merge per-cpu statistics */
+static struct rtnl_link_stats64 *vxlan_stats64(struct net_device *dev,
+                                              struct rtnl_link_stats64 *stats)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct vxlan_stats tmp, sum = { 0 };
+       unsigned int cpu;
+
+       for_each_possible_cpu(cpu) {
+               unsigned int start;
+               const struct vxlan_stats *stats
+                       = per_cpu_ptr(vxlan->stats, cpu);
+
+               do {
+                       start = u64_stats_fetch_begin_bh(&stats->syncp);
+                       memcpy(&tmp, stats, sizeof(tmp));
+               } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
+
+               sum.tx_bytes   += tmp.tx_bytes;
+               sum.tx_packets += tmp.tx_packets;
+               sum.rx_bytes   += tmp.rx_bytes;
+               sum.rx_packets += tmp.rx_packets;
+       }
+
+       stats->tx_bytes   = sum.tx_bytes;
+       stats->tx_packets = sum.tx_packets;
+       stats->rx_bytes   = sum.rx_bytes;
+       stats->rx_packets = sum.rx_packets;
+
+       stats->multicast = dev->stats.multicast;
+       stats->rx_length_errors = dev->stats.rx_length_errors;
+       stats->rx_frame_errors = dev->stats.rx_frame_errors;
+       stats->rx_errors = dev->stats.rx_errors;
+
+       stats->tx_dropped = dev->stats.tx_dropped;
+       stats->tx_carrier_errors  = dev->stats.tx_carrier_errors;
+       stats->tx_aborted_errors  = dev->stats.tx_aborted_errors;
+       stats->collisions  = dev->stats.collisions;
+       stats->tx_errors = dev->stats.tx_errors;
+
+       return stats;
+}
+
+/* Stub, nothing needs to be done. */
+static void vxlan_set_multicast_list(struct net_device *dev)
+{
+}
+
+static const struct net_device_ops vxlan_netdev_ops = {
+       .ndo_init               = vxlan_init,
+       .ndo_open               = vxlan_open,
+       .ndo_stop               = vxlan_stop,
+       .ndo_start_xmit         = vxlan_xmit,
+       .ndo_get_stats64        = vxlan_stats64,
+       .ndo_set_rx_mode        = vxlan_set_multicast_list,
+       .ndo_change_mtu         = eth_change_mtu,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_fdb_add            = vxlan_fdb_add,
+       .ndo_fdb_del            = vxlan_fdb_delete,
+       .ndo_fdb_dump           = vxlan_fdb_dump,
+};
+
+/* Info for udev, that this is a virtual tunnel endpoint */
+static struct device_type vxlan_type = {
+       .name = "vxlan",
+};
+
+static void vxlan_free(struct net_device *dev)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+
+       free_percpu(vxlan->stats);
+       free_netdev(dev);
+}
+
+/* Initialize the device structure. */
+static void vxlan_setup(struct net_device *dev)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       unsigned h;
+
+       eth_hw_addr_random(dev);
+       ether_setup(dev);
+
+       dev->netdev_ops = &vxlan_netdev_ops;
+       dev->destructor = vxlan_free;
+       SET_NETDEV_DEVTYPE(dev, &vxlan_type);
+
+       dev->tx_queue_len = 0;
+       dev->features   |= NETIF_F_LLTX;
+       dev->features   |= NETIF_F_NETNS_LOCAL;
+       dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+
+       spin_lock_init(&vxlan->hash_lock);
+
+       init_timer_deferrable(&vxlan->age_timer);
+       vxlan->age_timer.function = vxlan_cleanup;
+       vxlan->age_timer.data = (unsigned long) vxlan;
+
+       vxlan->dev = dev;
+
+       for (h = 0; h < FDB_HASH_SIZE; ++h)
+               INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
+}
+
+static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
+       [IFLA_VXLAN_ID]         = { .type = NLA_U32 },
+       [IFLA_VXLAN_GROUP]      = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
+       [IFLA_VXLAN_LINK]       = { .type = NLA_U32 },
+       [IFLA_VXLAN_LOCAL]      = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
+       [IFLA_VXLAN_TOS]        = { .type = NLA_U8 },
+       [IFLA_VXLAN_TTL]        = { .type = NLA_U8 },
+       [IFLA_VXLAN_LEARNING]   = { .type = NLA_U8 },
+       [IFLA_VXLAN_AGEING]     = { .type = NLA_U32 },
+       [IFLA_VXLAN_LIMIT]      = { .type = NLA_U32 },
+};
+
+static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+       if (tb[IFLA_ADDRESS]) {
+               if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
+                       pr_debug("invalid link address (not ethernet)\n");
+                       return -EINVAL;
+               }
+
+               if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
+                       pr_debug("invalid all zero ethernet address\n");
+                       return -EADDRNOTAVAIL;
+               }
+       }
+
+       if (!data)
+               return -EINVAL;
+
+       if (data[IFLA_VXLAN_ID]) {
+               __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
+               if (id >= VXLAN_VID_MASK)
+                       return -ERANGE;
+       }
+
+       if (data[IFLA_VXLAN_GROUP]) {
+               __be32 gaddr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
+               if (!IN_MULTICAST(ntohl(gaddr))) {
+                       pr_debug("group address is not IPv4 multicast\n");
+                       return -EADDRNOTAVAIL;
+               }
+       }
+       return 0;
+}
+
+static int vxlan_newlink(struct net *net, struct net_device *dev,
+                        struct nlattr *tb[], struct nlattr *data[])
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       __u32 vni;
+       int err;
+
+       if (!data[IFLA_VXLAN_ID])
+               return -EINVAL;
+
+       vni = nla_get_u32(data[IFLA_VXLAN_ID]);
+       if (vxlan_find_vni(net, vni)) {
+               pr_info("duplicate VNI %u\n", vni);
+               return -EEXIST;
+       }
+       vxlan->vni = vni;
+
+       if (data[IFLA_VXLAN_GROUP])
+               vxlan->gaddr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
+
+       if (data[IFLA_VXLAN_LOCAL])
+               vxlan->saddr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
+
+       if (data[IFLA_VXLAN_LINK]) {
+               vxlan->link = nla_get_u32(data[IFLA_VXLAN_LINK]);
+
+               if (!tb[IFLA_MTU]) {
+                       struct net_device *lowerdev;
+                       lowerdev = __dev_get_by_index(net, vxlan->link);
+                       dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
+               }
+       }
+
+       if (data[IFLA_VXLAN_TOS])
+               vxlan->tos  = nla_get_u8(data[IFLA_VXLAN_TOS]);
+
+       if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
+               vxlan->learn = true;
+
+       if (data[IFLA_VXLAN_AGEING])
+               vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
+       else
+               vxlan->age_interval = FDB_AGE_DEFAULT;
+
+       if (data[IFLA_VXLAN_LIMIT])
+               vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
+
+       err = register_netdevice(dev);
+       if (!err)
+               hlist_add_head_rcu(&vxlan->hlist, vni_head(net, vxlan->vni));
+
+       return err;
+}
+
+static void vxlan_dellink(struct net_device *dev, struct list_head *head)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+
+       hlist_del_rcu(&vxlan->hlist);
+
+       unregister_netdevice_queue(dev, head);
+}
+
+static size_t vxlan_get_size(const struct net_device *dev)
+{
+
+       return nla_total_size(sizeof(__u32)) +  /* IFLA_VXLAN_ID */
+               nla_total_size(sizeof(__be32)) +/* IFLA_VXLAN_GROUP */
+               nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
+               nla_total_size(sizeof(__be32))+ /* IFLA_VXLAN_LOCAL */
+               nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_TTL */
+               nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_TOS */
+               nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_LEARNING */
+               nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
+               nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
+               0;
+}
+
+static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+       const struct vxlan_dev *vxlan = netdev_priv(dev);
+
+       if (nla_put_u32(skb, IFLA_VXLAN_ID, vxlan->vni))
+               goto nla_put_failure;
+
+       if (vxlan->gaddr && nla_put_u32(skb, IFLA_VXLAN_GROUP, vxlan->gaddr))
+               goto nla_put_failure;
+
+       if (vxlan->link && nla_put_u32(skb, IFLA_VXLAN_LINK, vxlan->link))
+               goto nla_put_failure;
+
+       if (vxlan->saddr && nla_put_u32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr))
+               goto nla_put_failure;
+
+       if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
+           nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
+           nla_put_u8(skb, IFLA_VXLAN_LEARNING, vxlan->learn) ||
+           nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
+           nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax))
+               goto nla_put_failure;
+
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
+       .kind           = "vxlan",
+       .maxtype        = IFLA_VXLAN_MAX,
+       .policy         = vxlan_policy,
+       .priv_size      = sizeof(struct vxlan_dev),
+       .setup          = vxlan_setup,
+       .validate       = vxlan_validate,
+       .newlink        = vxlan_newlink,
+       .dellink        = vxlan_dellink,
+       .get_size       = vxlan_get_size,
+       .fill_info      = vxlan_fill_info,
+};
+
+static __net_init int vxlan_init_net(struct net *net)
+{
+       struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+       struct sock *sk;
+       struct sockaddr_in vxlan_addr = {
+               .sin_family = AF_INET,
+               .sin_addr.s_addr = htonl(INADDR_ANY),
+       };
+       int rc;
+       unsigned h;
+
+       /* Create UDP socket for encapsulation receive. */
+       rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vn->sock);
+       if (rc < 0) {
+               pr_debug("UDP socket create failed\n");
+               return rc;
+       }
+       /* Put in proper namespace */
+       sk = vn->sock->sk;
+       sk_change_net(sk, net);
+
+       vxlan_addr.sin_port = htons(vxlan_port);
+
+       rc = kernel_bind(vn->sock, (struct sockaddr *) &vxlan_addr,
+                        sizeof(vxlan_addr));
+       if (rc < 0) {
+               pr_debug("bind for UDP socket %pI4:%u (%d)\n",
+                        &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
+               sk_release_kernel(sk);
+               vn->sock = NULL;
+               return rc;
+       }
+
+       /* Disable multicast loopback */
+       inet_sk(sk)->mc_loop = 0;
+
+       /* Mark socket as an encapsulation socket. */
+       udp_sk(sk)->encap_type = 1;
+       udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
+       udp_encap_enable();
+
+       for (h = 0; h < VNI_HASH_SIZE; ++h)
+               INIT_HLIST_HEAD(&vn->vni_list[h]);
+
+       return 0;
+}
+
+static __net_exit void vxlan_exit_net(struct net *net)
+{
+       struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+
+       if (vn->sock) {
+               sk_release_kernel(vn->sock->sk);
+               vn->sock = NULL;
+       }
+}
+
+static struct pernet_operations vxlan_net_ops = {
+       .init = vxlan_init_net,
+       .exit = vxlan_exit_net,
+       .id   = &vxlan_net_id,
+       .size = sizeof(struct vxlan_net),
+};
+
+static int __init vxlan_init_module(void)
+{
+       int rc;
+
+       get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
+
+       rc = register_pernet_device(&vxlan_net_ops);
+       if (rc)
+               goto out1;
+
+       rc = rtnl_link_register(&vxlan_link_ops);
+       if (rc)
+               goto out2;
+
+       return 0;
+
+out2:
+       unregister_pernet_device(&vxlan_net_ops);
+out1:
+       return rc;
+}
+module_init(vxlan_init_module);
+
+static void __exit vxlan_cleanup_module(void)
+{
+       rtnl_link_unregister(&vxlan_link_ops);
+       unregister_pernet_device(&vxlan_net_ops);
+}
+module_exit(vxlan_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION(VXLAN_VERSION);
+MODULE_AUTHOR("Stephen Hemminger <shemminger@vyatta.com>");
+MODULE_ALIAS_RTNL_LINK("vxlan");
index 025426132754a4f584f3cd7d5eda4362c176bab8..9c34d2fccfac61508705a4021f436e9a9024e936 100644 (file)
@@ -222,7 +222,6 @@ int i2400m_check_mac_addr(struct i2400m *i2400m)
        struct sk_buff *skb;
        const struct i2400m_tlv_detailed_device_info *ddi;
        struct net_device *net_dev = i2400m->wimax_dev.net_dev;
-       const unsigned char zeromac[ETH_ALEN] = { 0 };
 
        d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
        skb = i2400m_get_device_info(i2400m);
@@ -244,7 +243,7 @@ int i2400m_check_mac_addr(struct i2400m *i2400m)
                 "to that of boot mode's\n");
        dev_warn(dev, "device reports     %pM\n", ddi->mac_address);
        dev_warn(dev, "boot mode reported %pM\n", net_dev->perm_addr);
-       if (!memcmp(zeromac, ddi->mac_address, sizeof(zeromac)))
+       if (is_zero_ether_addr(ddi->mac_address))
                dev_err(dev, "device reports an invalid MAC address, "
                        "not updating\n");
        else {
index 689a71c1af71b049e5f9cfa9ecfec806a4f678f3..154a4965be4fd9922e62684d73467b87fa46194e 100644 (file)
@@ -1661,7 +1661,9 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
 }
 
 /* Put adm8211_tx_hdr on skb and transmit */
-static void adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void adm8211_tx(struct ieee80211_hw *dev,
+                      struct ieee80211_tx_control *control,
+                      struct sk_buff *skb)
 {
        struct adm8211_tx_hdr *txhdr;
        size_t payload_len, hdrlen;
index c586f78c307ff82e8f4e2b2eec6a6d7035547c80..3cd05a7173f6ce37945c04237d3dc493aa080647 100644 (file)
@@ -87,7 +87,6 @@ static struct pci_driver airo_driver = {
 /* Include Wireless Extension definition and check version - Jean II */
 #include <linux/wireless.h>
 #define WIRELESS_SPY           /* enable iwspy support */
-#include <net/iw_handler.h>    /* New driver API */
 
 #define CISCO_EXT              /* enable Cisco extensions */
 #ifdef CISCO_EXT
@@ -5984,13 +5983,11 @@ static int airo_set_wap(struct net_device *dev,
        Cmd cmd;
        Resp rsp;
        APListRid APList_rid;
-       static const u8 any[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
-       static const u8 off[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
 
        if (awrq->sa_family != ARPHRD_ETHER)
                return -EINVAL;
-       else if (!memcmp(any, awrq->sa_data, ETH_ALEN) ||
-                !memcmp(off, awrq->sa_data, ETH_ALEN)) {
+       else if (is_broadcast_ether_addr(awrq->sa_data) ||
+                is_zero_ether_addr(awrq->sa_data)) {
                memset(&cmd, 0, sizeof(cmd));
                cmd.cmd=CMD_LOSE_SYNC;
                if (down_interruptible(&local->sem))
index 88b8d64c90f1b49a302deaca3af07788ee4b27c1..99b9ddf21273b2244b2a8f9e03847a68a736bb61 100644 (file)
@@ -498,36 +498,6 @@ exit:
        return ret;
 }
 
-#define HEX2STR_BUFFERS 4
-#define HEX2STR_MAX_LEN 64
-
-/* Convert binary data into hex string */
-static char *hex2str(void *buf, size_t len)
-{
-       static atomic_t a = ATOMIC_INIT(0);
-       static char bufs[HEX2STR_BUFFERS][3 * HEX2STR_MAX_LEN + 1];
-       char *ret = bufs[atomic_inc_return(&a) & (HEX2STR_BUFFERS - 1)];
-       char *obuf = ret;
-       u8 *ibuf = buf;
-
-       if (len > HEX2STR_MAX_LEN)
-               len = HEX2STR_MAX_LEN;
-
-       if (len == 0)
-               goto exit;
-
-       while (len--) {
-               obuf = hex_byte_pack(obuf, *ibuf++);
-               *obuf++ = '-';
-       }
-       obuf--;
-
-exit:
-       *obuf = '\0';
-
-       return ret;
-}
-
 /* LED trigger */
 static int tx_activity;
 static void at76_ledtrig_tx_timerfunc(unsigned long data);
@@ -1004,9 +974,9 @@ static void at76_dump_mib_mac_wep(struct at76_priv *priv)
            WEP_SMALL_KEY_LEN : WEP_LARGE_KEY_LEN;
 
        for (i = 0; i < WEP_KEYS; i++)
-               at76_dbg(DBG_MIB, "%s: MIB MAC_WEP: key %d: %s",
+               at76_dbg(DBG_MIB, "%s: MIB MAC_WEP: key %d: %*phD",
                         wiphy_name(priv->hw->wiphy), i,
-                        hex2str(m->wep_default_keyvalue[i], key_len));
+                        key_len, m->wep_default_keyvalue[i]);
 exit:
        kfree(m);
 }
@@ -1031,7 +1001,7 @@ static void at76_dump_mib_mac_mgmt(struct at76_priv *priv)
        at76_dbg(DBG_MIB, "%s: MIB MAC_MGMT: beacon_period %d CFP_max_duration "
                 "%d medium_occupancy_limit %d station_id 0x%x ATIM_window %d "
                 "CFP_mode %d privacy_opt_impl %d DTIM_period %d CFP_period %d "
-                "current_bssid %pM current_essid %s current_bss_type %d "
+                "current_bssid %pM current_essid %*phD current_bss_type %d "
                 "pm_mode %d ibss_change %d res %d "
                 "multi_domain_capability_implemented %d "
                 "international_roaming %d country_string %.3s",
@@ -1041,7 +1011,7 @@ static void at76_dump_mib_mac_mgmt(struct at76_priv *priv)
                 le16_to_cpu(m->station_id), le16_to_cpu(m->ATIM_window),
                 m->CFP_mode, m->privacy_option_implemented, m->DTIM_period,
                 m->CFP_period, m->current_bssid,
-                hex2str(m->current_essid, IW_ESSID_MAX_SIZE),
+                IW_ESSID_MAX_SIZE, m->current_essid,
                 m->current_bss_type, m->power_mgmt_mode, m->ibss_change,
                 m->res, m->multi_domain_capability_implemented,
                 m->multi_domain_capability_enabled, m->country_string);
@@ -1069,7 +1039,7 @@ static void at76_dump_mib_mac(struct at76_priv *priv)
                 "cwmin %d cwmax %d short_retry_time %d long_retry_time %d "
                 "scan_type %d scan_channel %d probe_delay %u "
                 "min_channel_time %d max_channel_time %d listen_int %d "
-                "desired_ssid %s desired_bssid %pM desired_bsstype %d",
+                "desired_ssid %*phD desired_bssid %pM desired_bsstype %d",
                 wiphy_name(priv->hw->wiphy),
                 le32_to_cpu(m->max_tx_msdu_lifetime),
                 le32_to_cpu(m->max_rx_lifetime),
@@ -1080,7 +1050,7 @@ static void at76_dump_mib_mac(struct at76_priv *priv)
                 le16_to_cpu(m->min_channel_time),
                 le16_to_cpu(m->max_channel_time),
                 le16_to_cpu(m->listen_interval),
-                hex2str(m->desired_ssid, IW_ESSID_MAX_SIZE),
+                IW_ESSID_MAX_SIZE, m->desired_ssid,
                 m->desired_bssid, m->desired_bsstype);
 exit:
        kfree(m);
@@ -1160,13 +1130,13 @@ static void at76_dump_mib_mdomain(struct at76_priv *priv)
                goto exit;
        }
 
-       at76_dbg(DBG_MIB, "%s: MIB MDOMAIN: channel_list %s",
+       at76_dbg(DBG_MIB, "%s: MIB MDOMAIN: channel_list %*phD",
                 wiphy_name(priv->hw->wiphy),
-                hex2str(m->channel_list, sizeof(m->channel_list)));
+                (int)sizeof(m->channel_list), m->channel_list);
 
-       at76_dbg(DBG_MIB, "%s: MIB MDOMAIN: tx_powerlevel %s",
+       at76_dbg(DBG_MIB, "%s: MIB MDOMAIN: tx_powerlevel %*phD",
                 wiphy_name(priv->hw->wiphy),
-                hex2str(m->tx_powerlevel, sizeof(m->tx_powerlevel)));
+                (int)sizeof(m->tx_powerlevel), m->tx_powerlevel);
 exit:
        kfree(m);
 }
@@ -1369,9 +1339,9 @@ static int at76_startup_device(struct at76_priv *priv)
        int ret;
 
        at76_dbg(DBG_PARAMS,
-                "%s param: ssid %.*s (%s) mode %s ch %d wep %s key %d "
+                "%s param: ssid %.*s (%*phD) mode %s ch %d wep %s key %d "
                 "keylen %d", wiphy_name(priv->hw->wiphy), priv->essid_size,
-                priv->essid, hex2str(priv->essid, IW_ESSID_MAX_SIZE),
+                priv->essid, IW_ESSID_MAX_SIZE, priv->essid,
                 priv->iw_mode == IW_MODE_ADHOC ? "adhoc" : "infra",
                 priv->channel, priv->wep_enabled ? "enabled" : "disabled",
                 priv->wep_key_id, priv->wep_keys_len[priv->wep_key_id]);
@@ -1726,7 +1696,9 @@ static void at76_mac80211_tx_callback(struct urb *urb)
        ieee80211_wake_queues(priv->hw);
 }
 
-static void at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void at76_mac80211_tx(struct ieee80211_hw *hw,
+                            struct ieee80211_tx_control *control,
+                            struct sk_buff *skb)
 {
        struct at76_priv *priv = hw->priv;
        struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer;
index 6169fbd23ed10a527e26973c2e00f2bfe6bd82af..4521342c62cc37654ee1889b9ee177e395706f27 100644 (file)
@@ -159,6 +159,7 @@ struct ath_common {
 
        bool btcoex_enabled;
        bool disable_ani;
+       bool antenna_diversity;
 };
 
 struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
index 64a453a6dfe442d22c533435787df147b86101fe..3150def17193b72652bb068c8fde9ca99e0a6e5f 100644 (file)
@@ -1331,7 +1331,6 @@ struct ath5k_hw {
        unsigned int            nexttbtt;       /* next beacon time in TU */
        struct ath5k_txq        *cabq;          /* content after beacon */
 
-       int                     power_level;    /* Requested tx power in dBm */
        bool                    assoc;          /* associate state */
        bool                    enable_beacon;  /* true if beacons are on */
 
@@ -1425,6 +1424,7 @@ struct ath5k_hw {
                /* Value in dB units */
                s16             txp_cck_ofdm_pwr_delta;
                bool            txp_setup;
+               int             txp_requested;  /* Requested tx power in dBm */
        } ah_txpower;
 
        struct ath5k_nfcal_hist ah_nfcal_hist;
index 2aab20ee9f387f8c89289ca409e6ead0fa83722c..9fd6d9a9942ec298b81be9a8e62697df1d13c037 100644 (file)
@@ -723,7 +723,7 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
        ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
                ieee80211_get_hdrlen_from_skb(skb), padsize,
                get_hw_packet_type(skb),
-               (ah->power_level * 2),
+               (ah->ah_txpower.txp_requested * 2),
                hw_rate,
                info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
                cts_rate, duration);
@@ -1778,7 +1778,8 @@ ath5k_beacon_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
        ds->ds_data = bf->skbaddr;
        ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
                        ieee80211_get_hdrlen_from_skb(skb), padsize,
-                       AR5K_PKT_TYPE_BEACON, (ah->power_level * 2),
+                       AR5K_PKT_TYPE_BEACON,
+                       (ah->ah_txpower.txp_requested * 2),
                        ieee80211_get_tx_rate(ah->hw, info)->hw_value,
                        1, AR5K_TXKEYIX_INVALID,
                        antenna, flags, 0, 0);
@@ -2445,6 +2446,7 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
        hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
                        IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
                        IEEE80211_HW_SIGNAL_DBM |
+                       IEEE80211_HW_MFP_CAPABLE |
                        IEEE80211_HW_REPORTS_TX_ACK_STATUS;
 
        hw->wiphy->interface_modes =
index d56453e43d7e353e0890d3962e264448d7185771..7a28538e6e05ba6c001e9b04aa913955a339ac44 100644 (file)
@@ -55,7 +55,8 @@
 \********************/
 
 static void
-ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+ath5k_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+        struct sk_buff *skb)
 {
        struct ath5k_hw *ah = hw->priv;
        u16 qnum = skb_get_queue_mapping(skb);
@@ -207,8 +208,8 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
        }
 
        if ((changed & IEEE80211_CONF_CHANGE_POWER) &&
-       (ah->power_level != conf->power_level)) {
-               ah->power_level = conf->power_level;
+       (ah->ah_txpower.txp_requested != conf->power_level)) {
+               ah->ah_txpower.txp_requested = conf->power_level;
 
                /* Half dB steps */
                ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2));
@@ -488,6 +489,9 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
        if (ath5k_modparam_nohwcrypt)
                return -EOPNOTSUPP;
 
+       if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT)
+               return -EOPNOTSUPP;
+
        if (vif->type == NL80211_IFTYPE_ADHOC &&
            (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
             key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
@@ -522,7 +526,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                        if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
                                key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
                        if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
-                               key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
+                               key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
                        ret = 0;
                }
                break;
index 8b71a2d947e0c9348c1e1b402b4d6092e4d0b587..ab363f34b4df71c76f1fa9198c245e0bac28da7a 100644 (file)
@@ -1975,11 +1975,13 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
                        spur_delta_phase = (spur_offset << 18) / 25;
                        spur_freq_sigma_delta = (spur_delta_phase >> 10);
                        symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 2;
+                       break;
                case AR5K_BWMODE_5MHZ:
                        /* Both sample_freq and chip_freq are 10MHz (?) */
                        spur_delta_phase = (spur_offset << 19) / 25;
                        spur_freq_sigma_delta = (spur_delta_phase >> 10);
                        symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 4;
+                       break;
                default:
                        if (channel->band == IEEE80211_BAND_5GHZ) {
                                /* Both sample_freq and chip_freq are 40MHz */
@@ -3516,6 +3518,7 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
 {
        unsigned int i;
        u16 *rates;
+       s16 rate_idx_scaled = 0;
 
        /* max_pwr is power level we got from driver/user in 0.5dB
         * units, switch to 0.25dB units so we can compare */
@@ -3562,20 +3565,32 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
                for (i = 8; i <= 15; i++)
                        rates[i] -= ah->ah_txpower.txp_cck_ofdm_gainf_delta;
 
+       /* Save min/max and current tx power for this channel
+        * in 0.25dB units.
+        *
+        * Note: We use rates[0] for current tx power because
+        * it covers most of the rates, in most cases. It's our
+        * tx power limit and what the user expects to see. */
+       ah->ah_txpower.txp_min_pwr = 2 * rates[7];
+       ah->ah_txpower.txp_cur_pwr = 2 * rates[0];
+
+       /* Set max txpower for correct OFDM operation on all rates
+        * -that is the txpower for 54Mbit-, it's used for the PAPD
+        * gain probe and it's in 0.5dB units */
+       ah->ah_txpower.txp_ofdm = rates[7];
+
        /* Now that we have all rates setup use table offset to
         * match the power range set by user with the power indices
         * on PCDAC/PDADC table */
        for (i = 0; i < 16; i++) {
-               rates[i] += ah->ah_txpower.txp_offset;
+               rate_idx_scaled = rates[i] + ah->ah_txpower.txp_offset;
                /* Don't get out of bounds */
-               if (rates[i] > 63)
-                       rates[i] = 63;
+               if (rate_idx_scaled > 63)
+                       rate_idx_scaled = 63;
+               if (rate_idx_scaled < 0)
+                       rate_idx_scaled = 0;
+               rates[i] = rate_idx_scaled;
        }
-
-       /* Min/max in 0.25dB units */
-       ah->ah_txpower.txp_min_pwr = 2 * rates[7];
-       ah->ah_txpower.txp_cur_pwr = 2 * rates[0];
-       ah->ah_txpower.txp_ofdm = rates[7];
 }
 
 
@@ -3639,10 +3654,17 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
        if (!ah->ah_txpower.txp_setup ||
            (channel->hw_value != curr_channel->hw_value) ||
            (channel->center_freq != curr_channel->center_freq)) {
-               /* Reset TX power values */
+               /* Reset TX power values but preserve requested
+                * tx power from above */
+               int requested_txpower = ah->ah_txpower.txp_requested;
+
                memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower));
+
+               /* Restore TPC setting and requested tx power */
                ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
 
+               ah->ah_txpower.txp_requested = requested_txpower;
+
                /* Calculate the powertable */
                ret = ath5k_setup_channel_powertable(ah, channel,
                                                        ee_mode, type);
@@ -3789,8 +3811,9 @@ ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
         * RF buffer settings on 5211/5212+ so that we
         * properly set curve indices.
         */
-       ret = ath5k_hw_txpower(ah, channel, ah->ah_txpower.txp_cur_pwr ?
-                       ah->ah_txpower.txp_cur_pwr / 2 : AR5K_TUNE_MAX_TXPOWER);
+       ret = ath5k_hw_txpower(ah, channel, ah->ah_txpower.txp_requested ?
+                                       ah->ah_txpower.txp_requested * 2 :
+                                       AR5K_TUNE_MAX_TXPOWER);
        if (ret)
                return ret;
 
index 86aeef4b9d7ee9295fe04c5533d3959a19474c9c..7089f8160ad5bb7f2a7377bbc0adae68229274b2 100644 (file)
@@ -1488,7 +1488,7 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
 }
 
 static struct wireless_dev *ath6kl_cfg80211_add_iface(struct wiphy *wiphy,
-                                                     char *name,
+                                                     const char *name,
                                                      enum nl80211_iftype type,
                                                      u32 *flags,
                                                      struct vif_params *params)
@@ -3477,7 +3477,7 @@ void ath6kl_cfg80211_vif_cleanup(struct ath6kl_vif *vif)
        ar->num_vif--;
 }
 
-struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, char *name,
+struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
                                          enum nl80211_iftype type,
                                          u8 fw_vif_idx, u8 nw_type)
 {
index 56b1ebe79812d0d90b2fc6292592809fae9637c1..780f77775a9152ca078922cbd2a7754c9a019d24 100644 (file)
@@ -25,7 +25,7 @@ enum ath6kl_cfg_suspend_mode {
        ATH6KL_CFG_SUSPEND_SCHED_SCAN,
 };
 
-struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, char *name,
+struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
                                          enum nl80211_iftype type,
                                          u8 fw_vif_idx, u8 nw_type);
 void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
index ff007f500feba8176794ca200e2bfd0523d657ff..e09ec40ce71ab6c25bd801a0661c61f446496aed 100644 (file)
@@ -237,7 +237,7 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel,
                                     entry_cck->fir_step_level);
 
        /* Skip MRC CCK for pre AR9003 families */
-       if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9485(ah))
+       if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah))
                return;
 
        if (aniState->mrcCCK != entry_cck->mrc_cck_on)
index bbcfeb3b2a60ac90d046110d59161dd118b15e4a..664844c5d3d51ae8752514bb3976fa2074ba0afe 100644 (file)
@@ -311,6 +311,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
                                          struct ath_ant_comb *antcomb,
                                          int alt_ratio)
 {
+       ant_conf->main_gaintb = 0;
+       ant_conf->alt_gaintb = 0;
+
        if (ant_conf->div_group == 0) {
                /* Adjust the fast_div_bias based on main and alt lna conf */
                switch ((ant_conf->main_lna_conf << 4) |
@@ -360,18 +363,12 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
                        ant_conf->alt_lna_conf) {
                case 0x01: /* A-B LNA2 */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x02: /* A-B LNA1 */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x03: /* A-B A+B */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x10: /* LNA2 A-B */
                        if (!(antcomb->scan) &&
@@ -379,13 +376,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
                                ant_conf->fast_div_bias = 0x3f;
                        else
                                ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x12: /* LNA2 LNA1 */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x13: /* LNA2 A+B */
                        if (!(antcomb->scan) &&
@@ -393,8 +386,6 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
                                ant_conf->fast_div_bias = 0x3f;
                        else
                                ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x20: /* LNA1 A-B */
                        if (!(antcomb->scan) &&
@@ -402,13 +393,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
                                ant_conf->fast_div_bias = 0x3f;
                        else
                                ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x21: /* LNA1 LNA2 */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x23: /* LNA1 A+B */
                        if (!(antcomb->scan) &&
@@ -416,23 +403,15 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
                                ant_conf->fast_div_bias = 0x3f;
                        else
                                ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x30: /* A+B A-B */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x31: /* A+B LNA2 */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x32: /* A+B LNA1 */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                default:
                        break;
@@ -443,18 +422,12 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
                                ant_conf->alt_lna_conf) {
                case 0x01: /* A-B LNA2 */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x02: /* A-B LNA1 */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x03: /* A-B A+B */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x10: /* LNA2 A-B */
                        if (!(antcomb->scan) &&
@@ -462,13 +435,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
                                ant_conf->fast_div_bias = 0x1;
                        else
                                ant_conf->fast_div_bias = 0x2;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x12: /* LNA2 LNA1 */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x13: /* LNA2 A+B */
                        if (!(antcomb->scan) &&
@@ -476,8 +445,6 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
                                ant_conf->fast_div_bias = 0x1;
                        else
                                ant_conf->fast_div_bias = 0x2;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x20: /* LNA1 A-B */
                        if (!(antcomb->scan) &&
@@ -485,13 +452,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
                                ant_conf->fast_div_bias = 0x1;
                        else
                                ant_conf->fast_div_bias = 0x2;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x21: /* LNA1 LNA2 */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x23: /* LNA1 A+B */
                        if (!(antcomb->scan) &&
@@ -499,23 +462,77 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
                                ant_conf->fast_div_bias = 0x1;
                        else
                                ant_conf->fast_div_bias = 0x2;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x30: /* A+B A-B */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x31: /* A+B LNA2 */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x32: /* A+B LNA1 */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
+                       break;
+               default:
+                       break;
+               }
+       } else if (ant_conf->div_group == 3) {
+               switch ((ant_conf->main_lna_conf << 4) |
+                       ant_conf->alt_lna_conf) {
+               case 0x01: /* A-B LNA2 */
+                       ant_conf->fast_div_bias = 0x1;
+                       break;
+               case 0x02: /* A-B LNA1 */
+                       ant_conf->fast_div_bias = 0x39;
+                       break;
+               case 0x03: /* A-B A+B */
+                       ant_conf->fast_div_bias = 0x1;
+                       break;
+               case 0x10: /* LNA2 A-B */
+                       if ((antcomb->scan == 0) &&
+                           (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
+                               ant_conf->fast_div_bias = 0x3f;
+                       } else {
+                               ant_conf->fast_div_bias = 0x1;
+                       }
+                       break;
+               case 0x12: /* LNA2 LNA1 */
+                       ant_conf->fast_div_bias = 0x39;
+                       break;
+               case 0x13: /* LNA2 A+B */
+                       if ((antcomb->scan == 0) &&
+                           (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
+                               ant_conf->fast_div_bias = 0x3f;
+                       } else {
+                               ant_conf->fast_div_bias = 0x1;
+                       }
+                       break;
+               case 0x20: /* LNA1 A-B */
+                       if ((antcomb->scan == 0) &&
+                           (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
+                               ant_conf->fast_div_bias = 0x3f;
+                       } else {
+                               ant_conf->fast_div_bias = 0x4;
+                       }
+                       break;
+               case 0x21: /* LNA1 LNA2 */
+                       ant_conf->fast_div_bias = 0x6;
+                       break;
+               case 0x23: /* LNA1 A+B */
+                       if ((antcomb->scan == 0) &&
+                           (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
+                               ant_conf->fast_div_bias = 0x3f;
+                       } else {
+                               ant_conf->fast_div_bias = 0x6;
+                       }
+                       break;
+               case 0x30: /* A+B A-B */
+                       ant_conf->fast_div_bias = 0x1;
+                       break;
+               case 0x31: /* A+B LNA2 */
+                       ant_conf->fast_div_bias = 0x6;
+                       break;
+               case 0x32: /* A+B LNA1 */
+                       ant_conf->fast_div_bias = 0x1;
                        break;
                default:
                        break;
@@ -759,6 +776,7 @@ div_comb_done:
 void ath_ant_comb_update(struct ath_softc *sc)
 {
        struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
        struct ath_hw_antcomb_conf div_ant_conf;
        u8 lna_conf;
 
@@ -773,4 +791,7 @@ void ath_ant_comb_update(struct ath_softc *sc)
        div_ant_conf.alt_lna_conf = lna_conf;
 
        ath9k_hw_antdiv_comb_conf_set(ah, &div_ant_conf);
+
+       if (common->antenna_diversity)
+               ath9k_hw_antctrl_shared_chain_lnadiv(ah, true);
 }
index d066f2516e4753617aa55f1522427eb96f4a1115..5bbe5057ba18ae35408c2aa78dfbdf4d5cefdf0f 100644 (file)
@@ -138,7 +138,8 @@ static const struct ar9300_eeprom ar9300_default = {
         },
        .base_ext1 = {
                .ant_div_control = 0,
-               .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+               .future = {0, 0, 0},
+               .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
        },
        .calFreqPier2G = {
                FREQ2FBIN(2412, 1),
@@ -713,7 +714,8 @@ static const struct ar9300_eeprom ar9300_x113 = {
         },
         .base_ext1 = {
                .ant_div_control = 0,
-               .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+               .future = {0, 0, 0},
+               .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
         },
        .calFreqPier2G = {
                FREQ2FBIN(2412, 1),
@@ -1289,7 +1291,8 @@ static const struct ar9300_eeprom ar9300_h112 = {
        },
        .base_ext1 = {
                .ant_div_control = 0,
-               .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+               .future = {0, 0, 0},
+               .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
        },
        .calFreqPier2G = {
                FREQ2FBIN(2412, 1),
@@ -1865,7 +1868,8 @@ static const struct ar9300_eeprom ar9300_x112 = {
        },
        .base_ext1 = {
                .ant_div_control = 0,
-               .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+               .future = {0, 0, 0},
+               .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
        },
        .calFreqPier2G = {
                FREQ2FBIN(2412, 1),
@@ -2440,7 +2444,8 @@ static const struct ar9300_eeprom ar9300_h116 = {
         },
         .base_ext1 = {
                .ant_div_control = 0,
-               .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+               .future = {0, 0, 0},
+               .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
         },
        .calFreqPier2G = {
                FREQ2FBIN(2412, 1),
@@ -3524,7 +3529,7 @@ static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
 
        if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah))
                REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias);
-       else if (AR_SREV_9462(ah) || AR_SREV_9550(ah))
+       else if (AR_SREV_9462(ah) || AR_SREV_9550(ah) || AR_SREV_9565(ah))
                REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
        else {
                REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
@@ -3561,9 +3566,9 @@ static u16 ar9003_hw_ant_ctrl_chain_get(struct ath_hw *ah, int chain,
 
 static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
 {
+       struct ath9k_hw_capabilities *pCap = &ah->caps;
        int chain;
        u32 regval;
-       u32 ant_div_ctl1;
        static const u32 switch_chain_reg[AR9300_MAX_CHAINS] = {
                        AR_PHY_SWITCH_CHAIN_0,
                        AR_PHY_SWITCH_CHAIN_1,
@@ -3572,7 +3577,7 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
 
        u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
 
-       if (AR_SREV_9462(ah)) {
+       if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
                REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
                                AR_SWITCH_TABLE_COM_AR9462_ALL, value);
        } else if (AR_SREV_9550(ah)) {
@@ -3616,7 +3621,7 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
                }
        }
 
-       if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) {
+       if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
                value = ath9k_hw_ar9300_get_eeprom(ah, EEP_ANT_DIV_CTL1);
                /*
                 * main_lnaconf, alt_lnaconf, main_tb, alt_tb
@@ -3626,41 +3631,44 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
                regval &= (~AR_ANT_DIV_CTRL_ALL);
                regval |= (value & 0x3f) << AR_ANT_DIV_CTRL_ALL_S;
                /* enable_lnadiv */
-               regval &= (~AR_PHY_9485_ANT_DIV_LNADIV);
-               regval |= ((value >> 6) & 0x1) <<
-                               AR_PHY_9485_ANT_DIV_LNADIV_S;
+               regval &= (~AR_PHY_ANT_DIV_LNADIV);
+               regval |= ((value >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S;
+
+               if (AR_SREV_9565(ah)) {
+                       if (ah->shared_chain_lnadiv) {
+                               regval |= (1 << AR_PHY_ANT_SW_RX_PROT_S);
+                       } else {
+                               regval &= ~(1 << AR_PHY_ANT_DIV_LNADIV_S);
+                               regval &= ~(1 << AR_PHY_ANT_SW_RX_PROT_S);
+                       }
+               }
+
                REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
 
                /*enable fast_div */
                regval = REG_READ(ah, AR_PHY_CCK_DETECT);
                regval &= (~AR_FAST_DIV_ENABLE);
-               regval |= ((value >> 7) & 0x1) <<
-                               AR_FAST_DIV_ENABLE_S;
+               regval |= ((value >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
                REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
-               ant_div_ctl1 =
-                       ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
-               /* check whether antenna diversity is enabled */
-               if ((ant_div_ctl1 >> 0x6) == 0x3) {
+
+               if (pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
                        regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
                        /*
                         * clear bits 25-30 main_lnaconf, alt_lnaconf,
                         * main_tb, alt_tb
                         */
-                       regval &= (~(AR_PHY_9485_ANT_DIV_MAIN_LNACONF |
-                                       AR_PHY_9485_ANT_DIV_ALT_LNACONF |
-                                       AR_PHY_9485_ANT_DIV_ALT_GAINTB |
-                                       AR_PHY_9485_ANT_DIV_MAIN_GAINTB));
+                       regval &= (~(AR_PHY_ANT_DIV_MAIN_LNACONF |
+                                    AR_PHY_ANT_DIV_ALT_LNACONF |
+                                    AR_PHY_ANT_DIV_ALT_GAINTB |
+                                    AR_PHY_ANT_DIV_MAIN_GAINTB));
                        /* by default use LNA1 for the main antenna */
-                       regval |= (AR_PHY_9485_ANT_DIV_LNA1 <<
-                                       AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S);
-                       regval |= (AR_PHY_9485_ANT_DIV_LNA2 <<
-                                       AR_PHY_9485_ANT_DIV_ALT_LNACONF_S);
+                       regval |= (AR_PHY_ANT_DIV_LNA1 <<
+                                  AR_PHY_ANT_DIV_MAIN_LNACONF_S);
+                       regval |= (AR_PHY_ANT_DIV_LNA2 <<
+                                  AR_PHY_ANT_DIV_ALT_LNACONF_S);
                        REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
                }
-
-
        }
-
 }
 
 static void ar9003_hw_drive_strength_apply(struct ath_hw *ah)
@@ -3847,7 +3855,7 @@ void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
                        REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set);
                        if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set))
                                return;
-               } else if (AR_SREV_9462(ah)) {
+               } else if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
                        reg_val = le32_to_cpu(pBase->swreg);
                        REG_WRITE(ah, AR_PHY_PMU1, reg_val);
                } else {
@@ -3878,7 +3886,7 @@ void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
                        while (!REG_READ_FIELD(ah, AR_PHY_PMU2,
                                                AR_PHY_PMU2_PGM))
                                udelay(10);
-               } else if (AR_SREV_9462(ah))
+               } else if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
                        REG_RMW_FIELD(ah, AR_PHY_PMU1, AR_PHY_PMU1_PWD, 0x1);
                else {
                        reg_val = REG_READ(ah, AR_RTC_SLEEP_CLK) |
@@ -3981,6 +3989,62 @@ static void ar9003_hw_xlna_bias_strength_apply(struct ath_hw *ah, bool is2ghz)
                      bias & 0x3);
 }
 
+static int ar9003_hw_get_thermometer(struct ath_hw *ah)
+{
+       struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+       struct ar9300_base_eep_hdr *pBase = &eep->baseEepHeader;
+       int thermometer =  (pBase->miscConfiguration >> 1) & 0x3;
+
+       return --thermometer;
+}
+
+static void ar9003_hw_thermometer_apply(struct ath_hw *ah)
+{
+       int thermometer = ar9003_hw_get_thermometer(ah);
+       u8 therm_on = (thermometer < 0) ? 0 : 1;
+
+       REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4,
+                     AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on);
+       if (ah->caps.tx_chainmask & BIT(1))
+               REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4,
+                             AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on);
+       if (ah->caps.tx_chainmask & BIT(2))
+               REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4,
+                             AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on);
+
+       therm_on = (thermometer < 0) ? 0 : (thermometer == 0);
+       REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4,
+                     AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
+       if (ah->caps.tx_chainmask & BIT(1)) {
+               therm_on = (thermometer < 0) ? 0 : (thermometer == 1);
+               REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4,
+                             AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
+       }
+       if (ah->caps.tx_chainmask & BIT(2)) {
+               therm_on = (thermometer < 0) ? 0 : (thermometer == 2);
+               REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4,
+                             AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
+       }
+}
+
+static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah)
+{
+       u32 data, ko, kg;
+
+       if (!AR_SREV_9462_20(ah))
+               return;
+       ar9300_otp_read_word(ah, 1, &data);
+       ko = data & 0xff;
+       kg = (data >> 8) & 0xff;
+       if (ko || kg) {
+               REG_RMW_FIELD(ah, AR_PHY_BB_THERM_ADC_3,
+                             AR_PHY_BB_THERM_ADC_3_THERM_ADC_OFFSET, ko);
+               REG_RMW_FIELD(ah, AR_PHY_BB_THERM_ADC_3,
+                             AR_PHY_BB_THERM_ADC_3_THERM_ADC_SCALE_GAIN,
+                             kg + 256);
+       }
+}
+
 static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
                                             struct ath9k_channel *chan)
 {
@@ -3996,6 +4060,8 @@ static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
                ar9003_hw_internal_regulator_apply(ah);
        ar9003_hw_apply_tuning_caps(ah);
        ar9003_hw_txend_to_xpa_off_apply(ah, is2ghz);
+       ar9003_hw_thermometer_apply(ah);
+       ar9003_hw_thermo_cal_apply(ah);
 }
 
 static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah,
@@ -4532,7 +4598,7 @@ static int ar9003_hw_power_control_override(struct ath_hw *ah,
 {
        int tempSlope = 0;
        struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
-       int f[3], t[3];
+       int f[8], t[8], i;
 
        REG_RMW(ah, AR_PHY_TPC_11_B0,
                (correction[0] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
@@ -4565,7 +4631,14 @@ static int ar9003_hw_power_control_override(struct ath_hw *ah,
         */
        if (frequency < 4000)
                tempSlope = eep->modalHeader2G.tempSlope;
-       else if (eep->base_ext2.tempSlopeLow != 0) {
+       else if ((eep->baseEepHeader.miscConfiguration & 0x20) != 0) {
+               for (i = 0; i < 8; i++) {
+                       t[i] = eep->base_ext1.tempslopextension[i];
+                       f[i] = FBIN2FREQ(eep->calFreqPier5G[i], 0);
+               }
+               tempSlope = ar9003_hw_power_interpolate((s32) frequency,
+                                                       f, t, 8);
+       } else if (eep->base_ext2.tempSlopeLow != 0) {
                t[0] = eep->base_ext2.tempSlopeLow;
                f[0] = 5180;
                t[1] = eep->modalHeader5G.tempSlope;
@@ -4905,90 +4978,79 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
                                i, cfgCtl, pCtlMode[ctlMode], ctlIndex[i],
                                chan->channel);
 
-                               /*
-                                * compare test group from regulatory
-                                * channel list with test mode from pCtlMode
-                                * list
-                                */
-                               if ((((cfgCtl & ~CTL_MODE_M) |
-                                      (pCtlMode[ctlMode] & CTL_MODE_M)) ==
-                                       ctlIndex[i]) ||
-                                   (((cfgCtl & ~CTL_MODE_M) |
-                                      (pCtlMode[ctlMode] & CTL_MODE_M)) ==
-                                    ((ctlIndex[i] & CTL_MODE_M) |
-                                      SD_NO_CTL))) {
-                                       twiceMinEdgePower =
-                                         ar9003_hw_get_max_edge_power(pEepData,
-                                                                      freq, i,
-                                                                      is2ghz);
-
-                                       if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL)
-                                               /*
-                                                * Find the minimum of all CTL
-                                                * edge powers that apply to
-                                                * this channel
-                                                */
-                                               twiceMaxEdgePower =
-                                                       min(twiceMaxEdgePower,
-                                                           twiceMinEdgePower);
-                                               else {
-                                                       /* specific */
-                                                       twiceMaxEdgePower =
-                                                         twiceMinEdgePower;
-                                                       break;
-                                               }
+                       /*
+                        * compare test group from regulatory
+                        * channel list with test mode from pCtlMode
+                        * list
+                        */
+                       if ((((cfgCtl & ~CTL_MODE_M) |
+                              (pCtlMode[ctlMode] & CTL_MODE_M)) ==
+                               ctlIndex[i]) ||
+                           (((cfgCtl & ~CTL_MODE_M) |
+                              (pCtlMode[ctlMode] & CTL_MODE_M)) ==
+                            ((ctlIndex[i] & CTL_MODE_M) |
+                              SD_NO_CTL))) {
+                               twiceMinEdgePower =
+                                 ar9003_hw_get_max_edge_power(pEepData,
+                                                              freq, i,
+                                                              is2ghz);
+
+                               if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL)
+                                       /*
+                                        * Find the minimum of all CTL
+                                        * edge powers that apply to
+                                        * this channel
+                                        */
+                                       twiceMaxEdgePower =
+                                               min(twiceMaxEdgePower,
+                                                   twiceMinEdgePower);
+                               else {
+                                       /* specific */
+                                       twiceMaxEdgePower = twiceMinEdgePower;
+                                       break;
                                }
                        }
+               }
 
-                       minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower);
+               minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower);
 
-                       ath_dbg(common, REGULATORY,
-                               "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n",
-                               ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
-                               scaledPower, minCtlPower);
-
-                       /* Apply ctl mode to correct target power set */
-                       switch (pCtlMode[ctlMode]) {
-                       case CTL_11B:
-                               for (i = ALL_TARGET_LEGACY_1L_5L;
-                                    i <= ALL_TARGET_LEGACY_11S; i++)
-                                       pPwrArray[i] =
-                                         (u8)min((u16)pPwrArray[i],
-                                                 minCtlPower);
-                               break;
-                       case CTL_11A:
-                       case CTL_11G:
-                               for (i = ALL_TARGET_LEGACY_6_24;
-                                    i <= ALL_TARGET_LEGACY_54; i++)
-                                       pPwrArray[i] =
-                                         (u8)min((u16)pPwrArray[i],
-                                                 minCtlPower);
-                               break;
-                       case CTL_5GHT20:
-                       case CTL_2GHT20:
-                               for (i = ALL_TARGET_HT20_0_8_16;
-                                    i <= ALL_TARGET_HT20_21; i++)
-                                       pPwrArray[i] =
-                                         (u8)min((u16)pPwrArray[i],
-                                                 minCtlPower);
-                               pPwrArray[ALL_TARGET_HT20_22] =
-                                 (u8)min((u16)pPwrArray[ALL_TARGET_HT20_22],
-                                         minCtlPower);
-                               pPwrArray[ALL_TARGET_HT20_23] =
-                                 (u8)min((u16)pPwrArray[ALL_TARGET_HT20_23],
-                                          minCtlPower);
-                               break;
-                       case CTL_5GHT40:
-                       case CTL_2GHT40:
-                               for (i = ALL_TARGET_HT40_0_8_16;
-                                    i <= ALL_TARGET_HT40_23; i++)
-                                       pPwrArray[i] =
-                                         (u8)min((u16)pPwrArray[i],
-                                                 minCtlPower);
-                               break;
-                       default:
-                           break;
-                       }
+               ath_dbg(common, REGULATORY,
+                       "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n",
+                       ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
+                       scaledPower, minCtlPower);
+
+               /* Apply ctl mode to correct target power set */
+               switch (pCtlMode[ctlMode]) {
+               case CTL_11B:
+                       for (i = ALL_TARGET_LEGACY_1L_5L;
+                            i <= ALL_TARGET_LEGACY_11S; i++)
+                               pPwrArray[i] = (u8)min((u16)pPwrArray[i],
+                                                      minCtlPower);
+                       break;
+               case CTL_11A:
+               case CTL_11G:
+                       for (i = ALL_TARGET_LEGACY_6_24;
+                            i <= ALL_TARGET_LEGACY_54; i++)
+                               pPwrArray[i] = (u8)min((u16)pPwrArray[i],
+                                                      minCtlPower);
+                       break;
+               case CTL_5GHT20:
+               case CTL_2GHT20:
+                       for (i = ALL_TARGET_HT20_0_8_16;
+                            i <= ALL_TARGET_HT20_23; i++)
+                               pPwrArray[i] = (u8)min((u16)pPwrArray[i],
+                                                      minCtlPower);
+                       break;
+               case CTL_5GHT40:
+               case CTL_2GHT40:
+                       for (i = ALL_TARGET_HT40_0_8_16;
+                            i <= ALL_TARGET_HT40_23; i++)
+                               pPwrArray[i] = (u8)min((u16)pPwrArray[i],
+                                                      minCtlPower);
+                       break;
+               default:
+                       break;
+               }
        } /* end ctl mode checking */
 }
 
index 3a1ff55bceb9011eac0610e78e42c4e5e4193f6e..41b1a75e6bec7c120264f526ccd1426cca4828a3 100644 (file)
@@ -267,7 +267,8 @@ struct cal_ctl_data_5g {
 
 struct ar9300_BaseExtension_1 {
        u8 ant_div_control;
-       u8 future[11];
+       u8 future[3];
+       u8 tempslopextension[8];
        int8_t quick_drop_low;
        int8_t quick_drop_high;
 } __packed;
index 1e8a4da5952f5217866765c52892e4d0a03555d8..1a36fa26263966e34bc6b952ffc64a0426387b11 100644 (file)
@@ -24,6 +24,7 @@
 #include "ar955x_1p0_initvals.h"
 #include "ar9580_1p0_initvals.h"
 #include "ar9462_2p0_initvals.h"
+#include "ar9565_1p0_initvals.h"
 
 /* General hardware code for the AR9003 hadware family */
 
  */
 static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
 {
-#define PCIE_PLL_ON_CREQ_DIS_L1_2P0 \
-               ar9462_pciephy_pll_on_clkreq_disable_L1_2p0
-
 #define AR9462_BB_CTX_COEFJ(x) \
                ar9462_##x##_baseband_core_txfir_coeff_japan_2484
 
 #define AR9462_BBC_TXIFR_COEFFJ \
                ar9462_2p0_baseband_core_txfir_coeff_japan_2484
+
        if (AR_SREV_9330_11(ah)) {
                /* mac */
                INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -220,10 +219,10 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
 
                /* Awake -> Sleep Setting */
                INIT_INI_ARRAY(&ah->iniPcieSerdes,
-                               PCIE_PLL_ON_CREQ_DIS_L1_2P0);
+                              ar9462_pciephy_pll_on_clkreq_disable_L1_2p0);
                /* Sleep -> Awake Setting */
                INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
-                               PCIE_PLL_ON_CREQ_DIS_L1_2P0);
+                              ar9462_pciephy_pll_on_clkreq_disable_L1_2p0);
 
                /* Fast clock modal settings */
                INIT_INI_ARRAY(&ah->iniModesFastClock,
@@ -302,6 +301,39 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
 
                INIT_INI_ARRAY(&ah->iniModesFastClock,
                                ar9580_1p0_modes_fast_clock);
+       } else if (AR_SREV_9565(ah)) {
+               INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+                              ar9565_1p0_mac_core);
+               INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+                              ar9565_1p0_mac_postamble);
+
+               INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+                              ar9565_1p0_baseband_core);
+               INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+                              ar9565_1p0_baseband_postamble);
+
+               INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+                              ar9565_1p0_radio_core);
+               INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+                              ar9565_1p0_radio_postamble);
+
+               INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+                              ar9565_1p0_soc_preamble);
+               INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+                              ar9565_1p0_soc_postamble);
+
+               INIT_INI_ARRAY(&ah->iniModesRxGain,
+                              ar9565_1p0_Common_rx_gain_table);
+               INIT_INI_ARRAY(&ah->iniModesTxGain,
+                              ar9565_1p0_Modes_lowest_ob_db_tx_gain_table);
+
+               INIT_INI_ARRAY(&ah->iniPcieSerdes,
+                              ar9565_1p0_pciephy_pll_on_clkreq_disable_L1);
+               INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+                              ar9565_1p0_pciephy_pll_on_clkreq_disable_L1);
+
+               INIT_INI_ARRAY(&ah->iniModesFastClock,
+                               ar9565_1p0_modes_fast_clock);
        } else {
                /* mac */
                INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -374,6 +406,9 @@ static void ar9003_tx_gain_table_mode0(struct ath_hw *ah)
        else if (AR_SREV_9462_20(ah))
                INIT_INI_ARRAY(&ah->iniModesTxGain,
                        ar9462_modes_low_ob_db_tx_gain_table_2p0);
+       else if (AR_SREV_9565(ah))
+               INIT_INI_ARRAY(&ah->iniModesTxGain,
+                              ar9565_1p0_modes_low_ob_db_tx_gain_table);
        else
                INIT_INI_ARRAY(&ah->iniModesTxGain,
                        ar9300Modes_lowest_ob_db_tx_gain_table_2p2);
@@ -402,6 +437,9 @@ static void ar9003_tx_gain_table_mode1(struct ath_hw *ah)
        else if (AR_SREV_9462_20(ah))
                INIT_INI_ARRAY(&ah->iniModesTxGain,
                        ar9462_modes_high_ob_db_tx_gain_table_2p0);
+       else if (AR_SREV_9565(ah))
+               INIT_INI_ARRAY(&ah->iniModesTxGain,
+                              ar9565_1p0_modes_high_ob_db_tx_gain_table);
        else
                INIT_INI_ARRAY(&ah->iniModesTxGain,
                        ar9300Modes_high_ob_db_tx_gain_table_2p2);
@@ -424,6 +462,9 @@ static void ar9003_tx_gain_table_mode2(struct ath_hw *ah)
        else if (AR_SREV_9580(ah))
                INIT_INI_ARRAY(&ah->iniModesTxGain,
                        ar9580_1p0_low_ob_db_tx_gain_table);
+       else if (AR_SREV_9565(ah))
+               INIT_INI_ARRAY(&ah->iniModesTxGain,
+                              ar9565_1p0_modes_low_ob_db_tx_gain_table);
        else
                INIT_INI_ARRAY(&ah->iniModesTxGain,
                        ar9300Modes_low_ob_db_tx_gain_table_2p2);
@@ -446,6 +487,9 @@ static void ar9003_tx_gain_table_mode3(struct ath_hw *ah)
        else if (AR_SREV_9580(ah))
                INIT_INI_ARRAY(&ah->iniModesTxGain,
                        ar9580_1p0_high_power_tx_gain_table);
+       else if (AR_SREV_9565(ah))
+               INIT_INI_ARRAY(&ah->iniModesTxGain,
+                              ar9565_1p0_modes_high_power_tx_gain_table);
        else
                INIT_INI_ARRAY(&ah->iniModesTxGain,
                        ar9300Modes_high_power_tx_gain_table_2p2);
@@ -538,6 +582,9 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
        } else if (AR_SREV_9580(ah))
                INIT_INI_ARRAY(&ah->iniModesRxGain,
                        ar9580_1p0_wo_xlna_rx_gain_table);
+       else if (AR_SREV_9565(ah))
+               INIT_INI_ARRAY(&ah->iniModesRxGain,
+                              ar9565_1p0_common_wo_xlna_rx_gain_table);
        else
                INIT_INI_ARRAY(&ah->iniModesRxGain,
                        ar9300Common_wo_xlna_rx_gain_table_2p2);
index 78816b8b2173cf8e238d56e1fcf981bf027ff009..301bf72c53bf5ce3de62825d87f025b0a5fb82e5 100644 (file)
@@ -31,7 +31,7 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
        u32 val, ctl12, ctl17;
        u8 desc_len;
 
-       desc_len = (AR_SREV_9462(ah) ? 0x18 : 0x17);
+       desc_len = ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x18 : 0x17);
 
        val = (ATHEROS_VENDOR_ID << AR_DescId_S) |
              (1 << AR_TxRxDesc_S) |
@@ -182,6 +182,7 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
        struct ath9k_hw_capabilities *pCap = &ah->caps;
        struct ath_common *common = ath9k_hw_common(ah);
        u32 sync_cause = 0, async_cause, async_mask = AR_INTR_MAC_IRQ;
+       bool fatal_int;
 
        if (ath9k_hw_mci_is_enabled(ah))
                async_mask |= AR_INTR_ASYNC_MASK_MCI;
@@ -310,6 +311,22 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
 
        if (sync_cause) {
                ath9k_debug_sync_cause(common, sync_cause);
+               fatal_int =
+                       (sync_cause &
+                        (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
+                       ? true : false;
+
+               if (fatal_int) {
+                       if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
+                               ath_dbg(common, ANY,
+                                       "received PCI FATAL interrupt\n");
+                       }
+                       if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
+                               ath_dbg(common, ANY,
+                                       "received PCI PERR interrupt\n");
+                       }
+                       *masked |= ATH9K_INT_FATAL;
+               }
 
                if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
                        REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
@@ -531,7 +548,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
                                rxs->rs_status |= ATH9K_RXERR_PHY;
                                rxs->rs_phyerr = phyerr;
                        }
-               };
+               }
        }
 
        if (rxsp->status11 & AR_KeyMiss)
index 9a34fcaae3ff59621d38f69bfafa58cdcb6a2d03..44c202ce6c66bb12544ddd6673abd6405962f5cc 100644 (file)
@@ -714,6 +714,7 @@ bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan)
 
        return true;
 }
+EXPORT_SYMBOL(ar9003_mci_start_reset);
 
 int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
                         struct ath9k_hw_cal_data *caldata)
@@ -812,8 +813,8 @@ static void ar9003_mci_osla_setup(struct ath_hw *ah, bool enable)
                      AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN, 1);
 }
 
-void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
-                     bool is_full_sleep)
+int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
+                    bool is_full_sleep)
 {
        struct ath_common *common = ath9k_hw_common(ah);
        struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
@@ -823,14 +824,13 @@ void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
                is_full_sleep, is_2g);
 
        if (!mci->gpm_addr && !mci->sched_addr) {
-               ath_dbg(common, MCI,
-                       "MCI GPM and schedule buffers are not allocated\n");
-               return;
+               ath_err(common, "MCI GPM and schedule buffers are not allocated\n");
+               return -ENOMEM;
        }
 
        if (REG_READ(ah, AR_BTCOEX_CTRL) == 0xdeadbeef) {
-               ath_dbg(common, MCI, "BTCOEX control register is dead\n");
-               return;
+               ath_err(common, "BTCOEX control register is dead\n");
+               return -EINVAL;
        }
 
        /* Program MCI DMA related registers */
@@ -912,6 +912,8 @@ void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
 
        if (en_int)
                ar9003_mci_enable_interrupt(ah);
+
+       return 0;
 }
 
 void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep)
@@ -1026,6 +1028,7 @@ void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force)
 
                if (!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA))
                        ar9003_mci_osla_setup(ah, true);
+               REG_WRITE(ah, AR_SELFGEN_MASK, 0x02);
        } else {
                ar9003_mci_send_lna_take(ah, true);
                udelay(5);
@@ -1142,8 +1145,8 @@ void ar9003_mci_init_cal_done(struct ath_hw *ah)
        ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
 }
 
-void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
-                     u16 len, u32 sched_addr)
+int ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
+                    u16 len, u32 sched_addr)
 {
        struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
 
@@ -1152,7 +1155,7 @@ void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
        mci->gpm_len = len;
        mci->sched_addr = sched_addr;
 
-       ar9003_mci_reset(ah, true, true, true);
+       return ar9003_mci_reset(ah, true, true, true);
 }
 EXPORT_SYMBOL(ar9003_mci_setup);
 
@@ -1201,12 +1204,6 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
 
                ar9003_mci_2g5g_switch(ah, false);
                break;
-       case MCI_STATE_SET_BT_CAL_START:
-               mci->bt_state = MCI_BT_CAL_START;
-               break;
-       case MCI_STATE_SET_BT_CAL:
-               mci->bt_state = MCI_BT_CAL;
-               break;
        case MCI_STATE_RESET_REQ_WAKE:
                ar9003_mci_reset_req_wakeup(ah);
                mci->update_2g5g = true;
@@ -1240,6 +1237,10 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
        case MCI_STATE_NEED_FTP_STOMP:
                value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP);
                break;
+       case MCI_STATE_NEED_FLUSH_BT_INFO:
+               value = (!mci->unhalt_bt_gpm && mci->need_flush_btinfo) ? 1 : 0;
+               mci->need_flush_btinfo = false;
+               break;
        default:
                break;
        }
@@ -1289,7 +1290,7 @@ void ar9003_mci_set_power_awake(struct ath_hw *ah)
        }
        REG_WRITE(ah, AR_DIAG_SW, (diag_sw | BIT(27) | BIT(19) | BIT(18)));
        lna_ctrl = REG_READ(ah, AR_OBS_BUS_CTRL) & 0x3;
-       bt_sleep = REG_READ(ah, AR_MCI_RX_STATUS) & AR_MCI_RX_REMOTE_SLEEP;
+       bt_sleep = MS(REG_READ(ah, AR_MCI_RX_STATUS), AR_MCI_RX_REMOTE_SLEEP);
 
        REG_WRITE(ah, AR_BTCOEX_CTRL2, btcoex_ctrl2);
        REG_WRITE(ah, AR_DIAG_SW, diag_sw);
@@ -1327,6 +1328,10 @@ u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more)
 
        if (first) {
                gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
+
+               if (gpm_ptr >= mci->gpm_len)
+                       gpm_ptr = 0;
+
                mci->gpm_idx = gpm_ptr;
                return gpm_ptr;
        }
@@ -1371,6 +1376,10 @@ u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more)
                        more_gpm = MCI_GPM_NOMORE;
 
                temp_index = mci->gpm_idx;
+
+               if (temp_index >= mci->gpm_len)
+                       temp_index = 0;
+
                mci->gpm_idx++;
 
                if (mci->gpm_idx >= mci->gpm_len)
index d33b8e1288554dd502dc290d68c8c45684d2e1a8..2a2d01889613a610a8cb6084e6f0705234867123 100644 (file)
@@ -190,8 +190,6 @@ enum mci_bt_state {
 enum mci_state_type {
        MCI_STATE_ENABLE,
        MCI_STATE_SET_BT_AWAKE,
-       MCI_STATE_SET_BT_CAL_START,
-       MCI_STATE_SET_BT_CAL,
        MCI_STATE_LAST_SCHD_MSG_OFFSET,
        MCI_STATE_REMOTE_SLEEP,
        MCI_STATE_RESET_REQ_WAKE,
@@ -202,6 +200,7 @@ enum mci_state_type {
        MCI_STATE_RECOVER_RX,
        MCI_STATE_NEED_FTP_STOMP,
        MCI_STATE_DEBUG,
+       MCI_STATE_NEED_FLUSH_BT_INFO,
        MCI_STATE_MAX
 };
 
@@ -213,7 +212,8 @@ enum mci_gpm_coex_opcode {
        MCI_GPM_COEX_WLAN_CHANNELS,
        MCI_GPM_COEX_BT_PROFILE_INFO,
        MCI_GPM_COEX_BT_STATUS_UPDATE,
-       MCI_GPM_COEX_BT_UPDATE_FLAGS
+       MCI_GPM_COEX_BT_UPDATE_FLAGS,
+       MCI_GPM_COEX_NOOP,
 };
 
 #define MCI_GPM_NOMORE  0
@@ -249,8 +249,8 @@ bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
                             u32 *payload, u8 len, bool wait_done,
                             bool check_bt);
 u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type);
-void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
-                     u16 len, u32 sched_addr);
+int ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
+                    u16 len, u32 sched_addr);
 void ar9003_mci_cleanup(struct ath_hw *ah);
 void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
                              u32 *rx_msg_intr);
@@ -272,8 +272,8 @@ void ar9003_mci_check_bt(struct ath_hw *ah);
 bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan);
 int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
                         struct ath9k_hw_cal_data *caldata);
-void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
-                     bool is_full_sleep);
+int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
+                    bool is_full_sleep);
 void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked);
 void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah);
 void ar9003_mci_set_power_awake(struct ath_hw *ah);
index e476f9f92ce3bed0992873283e99a4f75e78a5be..759f5f5a715469bb43c054b45d4d8ed5f6917302 100644 (file)
@@ -88,7 +88,7 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
                        channelSel = (freq * 4) / div;
                        chan_frac = (((freq * 4) % div) * 0x20000) / div;
                        channelSel = (channelSel << 17) | chan_frac;
-               } else if (AR_SREV_9485(ah)) {
+               } else if (AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
                        u32 chan_frac;
 
                        /*
@@ -206,6 +206,7 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
        for (i = 0; i < max_spur_cnts; i++) {
                if (AR_SREV_9462(ah) && (i == 0 || i == 3))
                        continue;
+
                negative = 0;
                if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
                    AR_SREV_9550(ah))
@@ -301,7 +302,9 @@ static void ar9003_hw_spur_ofdm(struct ath_hw *ah,
                                int freq_offset,
                                int spur_freq_sd,
                                int spur_delta_phase,
-                               int spur_subchannel_sd)
+                               int spur_subchannel_sd,
+                               int range,
+                               int synth_freq)
 {
        int mask_index = 0;
 
@@ -316,8 +319,11 @@ static void ar9003_hw_spur_ofdm(struct ath_hw *ah,
                      AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD, spur_subchannel_sd);
        REG_RMW_FIELD(ah, AR_PHY_TIMING11,
                      AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC, 0x1);
-       REG_RMW_FIELD(ah, AR_PHY_TIMING11,
-                     AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR, 0x1);
+
+       if (!(AR_SREV_9565(ah) && range == 10 && synth_freq == 2437))
+               REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+                             AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR, 0x1);
+
        REG_RMW_FIELD(ah, AR_PHY_TIMING4,
                      AR_PHY_TIMING4_ENABLE_SPUR_RSSI, 0x1);
        REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
@@ -358,9 +364,44 @@ static void ar9003_hw_spur_ofdm(struct ath_hw *ah,
                      AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0xff);
 }
 
+static void ar9003_hw_spur_ofdm_9565(struct ath_hw *ah,
+                                    int freq_offset)
+{
+       int mask_index = 0;
+
+       mask_index = (freq_offset << 4) / 5;
+       if (mask_index < 0)
+               mask_index = mask_index - 1;
+
+       mask_index = mask_index & 0x7f;
+
+       REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
+                     AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_B,
+                     mask_index);
+
+       /* A == B */
+       REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_B,
+                     AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A,
+                     mask_index);
+
+       REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
+                     AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_B,
+                     mask_index);
+       REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
+                     AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_B, 0xe);
+       REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
+                     AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_B, 0xe);
+
+       /* A == B */
+       REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_B,
+                     AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0xa0);
+}
+
 static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah,
                                     struct ath9k_channel *chan,
-                                    int freq_offset)
+                                    int freq_offset,
+                                    int range,
+                                    int synth_freq)
 {
        int spur_freq_sd = 0;
        int spur_subchannel_sd = 0;
@@ -402,7 +443,8 @@ static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah,
                            freq_offset,
                            spur_freq_sd,
                            spur_delta_phase,
-                           spur_subchannel_sd);
+                           spur_subchannel_sd,
+                           range, synth_freq);
 }
 
 /* Spur mitigation for OFDM */
@@ -447,7 +489,17 @@ static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah,
                freq_offset = ath9k_hw_fbin2freq(spurChansPtr[i], mode);
                freq_offset -= synth_freq;
                if (abs(freq_offset) < range) {
-                       ar9003_hw_spur_ofdm_work(ah, chan, freq_offset);
+                       ar9003_hw_spur_ofdm_work(ah, chan, freq_offset,
+                                                range, synth_freq);
+
+                       if (AR_SREV_9565(ah) && (i < 4)) {
+                               freq_offset = ath9k_hw_fbin2freq(spurChansPtr[i + 1],
+                                                                mode);
+                               freq_offset -= synth_freq;
+                               if (abs(freq_offset) < range)
+                                       ar9003_hw_spur_ofdm_9565(ah, freq_offset);
+                       }
+
                        break;
                }
        }
@@ -456,7 +508,8 @@ static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah,
 static void ar9003_hw_spur_mitigate(struct ath_hw *ah,
                                    struct ath9k_channel *chan)
 {
-       ar9003_hw_spur_mitigate_mrc_cck(ah, chan);
+       if (!AR_SREV_9565(ah))
+               ar9003_hw_spur_mitigate_mrc_cck(ah, chan);
        ar9003_hw_spur_mitigate_ofdm(ah, chan);
 }
 
@@ -552,9 +605,6 @@ static void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
 
        if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && (tx == 0x7))
                REG_WRITE(ah, AR_SELFGEN_MASK, 0x3);
-       else if (AR_SREV_9462(ah))
-               /* xxx only when MCI support is enabled */
-               REG_WRITE(ah, AR_SELFGEN_MASK, 0x3);
        else
                REG_WRITE(ah, AR_SELFGEN_MASK, tx);
 
@@ -736,7 +786,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
        if (chan->channel == 2484)
                ar9003_hw_prog_ini(ah, &ah->ini_japan2484, 1);
 
-       if (AR_SREV_9462(ah))
+       if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
                REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
                          AR_GLB_SWREG_DISCONT_EN_BT_WLAN);
 
@@ -746,9 +796,9 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
        ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
        ath9k_hw_apply_txpower(ah, chan, false);
 
-       if (AR_SREV_9462(ah)) {
+       if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
                if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
-                               AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL))
+                                  AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL))
                        ah->enabled_cals |= TX_IQ_CAL;
                else
                        ah->enabled_cals &= ~TX_IQ_CAL;
@@ -1111,7 +1161,7 @@ static void ar9003_hw_set_nf_limits(struct ath_hw *ah)
        if (AR_SREV_9330(ah))
                ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9330_2GHZ;
 
-       if (AR_SREV_9462(ah)) {
+       if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
                ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9462_2GHZ;
                ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9462_2GHZ;
                ah->nf_5g.min = AR_PHY_CCA_MIN_GOOD_VAL_9462_5GHZ;
@@ -1223,17 +1273,17 @@ static void ar9003_hw_set_radar_conf(struct ath_hw *ah)
 }
 
 static void ar9003_hw_antdiv_comb_conf_get(struct ath_hw *ah,
-                                  struct ath_hw_antcomb_conf *antconf)
+                                          struct ath_hw_antcomb_conf *antconf)
 {
        u32 regval;
 
        regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
-       antconf->main_lna_conf = (regval & AR_PHY_9485_ANT_DIV_MAIN_LNACONF) >>
-                                 AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S;
-       antconf->alt_lna_conf = (regval & AR_PHY_9485_ANT_DIV_ALT_LNACONF) >>
-                                AR_PHY_9485_ANT_DIV_ALT_LNACONF_S;
-       antconf->fast_div_bias = (regval & AR_PHY_9485_ANT_FAST_DIV_BIAS) >>
-                                 AR_PHY_9485_ANT_FAST_DIV_BIAS_S;
+       antconf->main_lna_conf = (regval & AR_PHY_ANT_DIV_MAIN_LNACONF) >>
+                                 AR_PHY_ANT_DIV_MAIN_LNACONF_S;
+       antconf->alt_lna_conf = (regval & AR_PHY_ANT_DIV_ALT_LNACONF) >>
+                                AR_PHY_ANT_DIV_ALT_LNACONF_S;
+       antconf->fast_div_bias = (regval & AR_PHY_ANT_FAST_DIV_BIAS) >>
+                                 AR_PHY_ANT_FAST_DIV_BIAS_S;
 
        if (AR_SREV_9330_11(ah)) {
                antconf->lna1_lna2_delta = -9;
@@ -1241,6 +1291,9 @@ static void ar9003_hw_antdiv_comb_conf_get(struct ath_hw *ah,
        } else if (AR_SREV_9485(ah)) {
                antconf->lna1_lna2_delta = -9;
                antconf->div_group = 2;
+       } else if (AR_SREV_9565(ah)) {
+               antconf->lna1_lna2_delta = -3;
+               antconf->div_group = 3;
        } else {
                antconf->lna1_lna2_delta = -3;
                antconf->div_group = 0;
@@ -1253,26 +1306,84 @@ static void ar9003_hw_antdiv_comb_conf_set(struct ath_hw *ah,
        u32 regval;
 
        regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
-       regval &= ~(AR_PHY_9485_ANT_DIV_MAIN_LNACONF |
-                   AR_PHY_9485_ANT_DIV_ALT_LNACONF |
-                   AR_PHY_9485_ANT_FAST_DIV_BIAS |
-                   AR_PHY_9485_ANT_DIV_MAIN_GAINTB |
-                   AR_PHY_9485_ANT_DIV_ALT_GAINTB);
-       regval |= ((antconf->main_lna_conf <<
-                                       AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S)
-                  & AR_PHY_9485_ANT_DIV_MAIN_LNACONF);
-       regval |= ((antconf->alt_lna_conf << AR_PHY_9485_ANT_DIV_ALT_LNACONF_S)
-                  & AR_PHY_9485_ANT_DIV_ALT_LNACONF);
-       regval |= ((antconf->fast_div_bias << AR_PHY_9485_ANT_FAST_DIV_BIAS_S)
-                  & AR_PHY_9485_ANT_FAST_DIV_BIAS);
-       regval |= ((antconf->main_gaintb << AR_PHY_9485_ANT_DIV_MAIN_GAINTB_S)
-                  & AR_PHY_9485_ANT_DIV_MAIN_GAINTB);
-       regval |= ((antconf->alt_gaintb << AR_PHY_9485_ANT_DIV_ALT_GAINTB_S)
-                  & AR_PHY_9485_ANT_DIV_ALT_GAINTB);
+       regval &= ~(AR_PHY_ANT_DIV_MAIN_LNACONF |
+                   AR_PHY_ANT_DIV_ALT_LNACONF |
+                   AR_PHY_ANT_FAST_DIV_BIAS |
+                   AR_PHY_ANT_DIV_MAIN_GAINTB |
+                   AR_PHY_ANT_DIV_ALT_GAINTB);
+       regval |= ((antconf->main_lna_conf << AR_PHY_ANT_DIV_MAIN_LNACONF_S)
+                  & AR_PHY_ANT_DIV_MAIN_LNACONF);
+       regval |= ((antconf->alt_lna_conf << AR_PHY_ANT_DIV_ALT_LNACONF_S)
+                  & AR_PHY_ANT_DIV_ALT_LNACONF);
+       regval |= ((antconf->fast_div_bias << AR_PHY_ANT_FAST_DIV_BIAS_S)
+                  & AR_PHY_ANT_FAST_DIV_BIAS);
+       regval |= ((antconf->main_gaintb << AR_PHY_ANT_DIV_MAIN_GAINTB_S)
+                  & AR_PHY_ANT_DIV_MAIN_GAINTB);
+       regval |= ((antconf->alt_gaintb << AR_PHY_ANT_DIV_ALT_GAINTB_S)
+                  & AR_PHY_ANT_DIV_ALT_GAINTB);
 
        REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
 }
 
+static void ar9003_hw_antctrl_shared_chain_lnadiv(struct ath_hw *ah,
+                                                 bool enable)
+{
+       u8 ant_div_ctl1;
+       u32 regval;
+
+       if (!AR_SREV_9565(ah))
+               return;
+
+       ah->shared_chain_lnadiv = enable;
+       ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
+
+       regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
+       regval &= (~AR_ANT_DIV_CTRL_ALL);
+       regval |= (ant_div_ctl1 & 0x3f) << AR_ANT_DIV_CTRL_ALL_S;
+       regval &= ~AR_PHY_ANT_DIV_LNADIV;
+       regval |= ((ant_div_ctl1 >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S;
+
+       if (enable)
+               regval |= AR_ANT_DIV_ENABLE;
+
+       REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
+
+       regval = REG_READ(ah, AR_PHY_CCK_DETECT);
+       regval &= ~AR_FAST_DIV_ENABLE;
+       regval |= ((ant_div_ctl1 >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
+
+       if (enable)
+               regval |= AR_FAST_DIV_ENABLE;
+
+       REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
+
+       if (enable) {
+               REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+                           (1 << AR_PHY_ANT_SW_RX_PROT_S));
+               if (ah->curchan && IS_CHAN_2GHZ(ah->curchan))
+                       REG_SET_BIT(ah, AR_PHY_RESTART,
+                                   AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
+               REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
+                           AR_BTCOEX_WL_LNADIV_FORCE_ON);
+       } else {
+               REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_ENABLE);
+               REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+                           (1 << AR_PHY_ANT_SW_RX_PROT_S));
+               REG_CLR_BIT(ah, AR_PHY_CCK_DETECT, AR_FAST_DIV_ENABLE);
+               REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
+                           AR_BTCOEX_WL_LNADIV_FORCE_ON);
+
+               regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
+               regval &= ~(AR_PHY_ANT_DIV_MAIN_LNACONF |
+                       AR_PHY_ANT_DIV_ALT_LNACONF |
+                       AR_PHY_ANT_DIV_MAIN_GAINTB |
+                       AR_PHY_ANT_DIV_ALT_GAINTB);
+               regval |= (AR_PHY_ANT_DIV_LNA1 << AR_PHY_ANT_DIV_MAIN_LNACONF_S);
+               regval |= (AR_PHY_ANT_DIV_LNA2 << AR_PHY_ANT_DIV_ALT_LNACONF_S);
+               REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
+       }
+}
+
 static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
                                      struct ath9k_channel *chan,
                                      u8 *ini_reloaded)
@@ -1312,10 +1423,10 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
        ar9003_hw_prog_ini(ah, &ah->iniMac[ATH_INI_POST], modesIndex);
        ar9003_hw_prog_ini(ah, &ah->iniBB[ATH_INI_POST], modesIndex);
        ar9003_hw_prog_ini(ah, &ah->iniRadio[ATH_INI_POST], modesIndex);
+
        if (AR_SREV_9462_20(ah))
-               ar9003_hw_prog_ini(ah,
-                               &ah->ini_radio_post_sys2ant,
-                               modesIndex);
+               ar9003_hw_prog_ini(ah, &ah->ini_radio_post_sys2ant,
+                                  modesIndex);
 
        REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
 
@@ -1326,6 +1437,9 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
        if (IS_CHAN_A_FAST_CLOCK(ah, chan))
                REG_WRITE_ARRAY(&ah->iniModesFastClock, modesIndex, regWrites);
 
+       if (AR_SREV_9565(ah))
+               REG_WRITE_ARRAY(&ah->iniModesFastClock, 1, regWrites);
+
        REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites);
 
        ah->modes_index = modesIndex;
@@ -1368,6 +1482,7 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
 
        ops->antdiv_comb_conf_get = ar9003_hw_antdiv_comb_conf_get;
        ops->antdiv_comb_conf_set = ar9003_hw_antdiv_comb_conf_set;
+       ops->antctrl_shared_chain_lnadiv = ar9003_hw_antctrl_shared_chain_lnadiv;
 
        ar9003_hw_set_nf_limits(ah);
        ar9003_hw_set_radar_conf(ah);
index 84d3d49568616c5452692b1660253f6ae70468cf..9a48e3d2f231eadefcf15dc2604d891150b389fb 100644 (file)
 #define AR_PHY_ML_CNTL_2       (AR_MRC_BASE + 0x1c)
 #define AR_PHY_TST_ADC         (AR_MRC_BASE + 0x20)
 
-#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A              0x00000FE0
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A      0x00000FE0
 #define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A_S    5
-#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A                  0x1F
-#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A_S                0
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A          0x1F
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A_S        0
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_B      0x00FE0000
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_B_S    17
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_B          0x0001F000
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_B_S        12
 
 #define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A        0x00000FE0
 #define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A_S      5
 #define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A            0x1F
 #define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A_S         0
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_B       0x00FE0000
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_B_S     17
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_B           0x0001F000
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_B_S         12
+
 
 /*
  * MRC Feild Definitions
 #define AR_ANT_DIV_ENABLE_S    24
 
 
-#define AR_PHY_9485_ANT_FAST_DIV_BIAS                  0x00007e00
-#define AR_PHY_9485_ANT_FAST_DIV_BIAS_S                  9
-#define AR_PHY_9485_ANT_DIV_LNADIV                     0x01000000
-#define AR_PHY_9485_ANT_DIV_LNADIV_S                   24
-#define AR_PHY_9485_ANT_DIV_ALT_LNACONF                        0x06000000
-#define AR_PHY_9485_ANT_DIV_ALT_LNACONF_S              25
-#define AR_PHY_9485_ANT_DIV_MAIN_LNACONF               0x18000000
-#define AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S             27
-#define AR_PHY_9485_ANT_DIV_ALT_GAINTB                 0x20000000
-#define AR_PHY_9485_ANT_DIV_ALT_GAINTB_S               29
-#define AR_PHY_9485_ANT_DIV_MAIN_GAINTB                        0x40000000
-#define AR_PHY_9485_ANT_DIV_MAIN_GAINTB_S              30
-
-#define AR_PHY_9485_ANT_DIV_LNA1_MINUS_LNA2            0x0
-#define AR_PHY_9485_ANT_DIV_LNA2                       0x1
-#define AR_PHY_9485_ANT_DIV_LNA1                       0x2
-#define AR_PHY_9485_ANT_DIV_LNA1_PLUS_LNA2             0x3
+#define AR_PHY_ANT_FAST_DIV_BIAS                0x00007e00
+#define AR_PHY_ANT_FAST_DIV_BIAS_S              9
+#define AR_PHY_ANT_SW_RX_PROT                   0x00800000
+#define AR_PHY_ANT_SW_RX_PROT_S                 23
+#define AR_PHY_ANT_DIV_LNADIV                   0x01000000
+#define AR_PHY_ANT_DIV_LNADIV_S                 24
+#define AR_PHY_ANT_DIV_ALT_LNACONF              0x06000000
+#define AR_PHY_ANT_DIV_ALT_LNACONF_S            25
+#define AR_PHY_ANT_DIV_MAIN_LNACONF             0x18000000
+#define AR_PHY_ANT_DIV_MAIN_LNACONF_S           27
+#define AR_PHY_ANT_DIV_ALT_GAINTB               0x20000000
+#define AR_PHY_ANT_DIV_ALT_GAINTB_S             29
+#define AR_PHY_ANT_DIV_MAIN_GAINTB              0x40000000
+#define AR_PHY_ANT_DIV_MAIN_GAINTB_S            30
+
+#define AR_PHY_ANT_DIV_LNA1_MINUS_LNA2          0x0
+#define AR_PHY_ANT_DIV_LNA2                     0x1
+#define AR_PHY_ANT_DIV_LNA1                     0x2
+#define AR_PHY_ANT_DIV_LNA1_PLUS_LNA2           0x3
 
 #define AR_PHY_EXTCHN_PWRTHR1   (AR_AGC_BASE + 0x2c)
 #define AR_PHY_EXT_CHN_WIN      (AR_AGC_BASE + 0x30)
 #define AR_PHY_FIND_SIG_RELSTEP        0x1f
 #define AR_PHY_FIND_SIG_RELSTEP_S         0
 #define AR_PHY_FIND_SIG_RELSTEP_SIGN_BIT  5
+#define AR_PHY_RESTART_ENABLE_DIV_M2FLAG 0x00200000
+#define AR_PHY_RESTART_ENABLE_DIV_M2FLAG_S 21
 #define AR_PHY_RESTART_DIV_GC   0x001C0000
 #define AR_PHY_RESTART_DIV_GC_S 18
 #define AR_PHY_RESTART_ENA      0x01
 #define AR_PHY_BB_THERM_ADC_1_INIT_THERM               0x000000ff
 #define AR_PHY_BB_THERM_ADC_1_INIT_THERM_S             0
 
+#define AR_PHY_BB_THERM_ADC_3                          (AR_SM_BASE + 0x250)
+#define AR_PHY_BB_THERM_ADC_3_THERM_ADC_SCALE_GAIN     0x0001ff00
+#define AR_PHY_BB_THERM_ADC_3_THERM_ADC_SCALE_GAIN_S   8
+#define AR_PHY_BB_THERM_ADC_3_THERM_ADC_OFFSET         0x000000ff
+#define AR_PHY_BB_THERM_ADC_3_THERM_ADC_OFFSET_S       0
+
 #define AR_PHY_BB_THERM_ADC_4                          (AR_SM_BASE + 0x254)
 #define AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE       0x000000ff
 #define AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE_S     0
 #define AR_PHY_65NM_CH0_TXRF3_CAPDIV2G_S       1
 
 #define AR_PHY_65NM_CH0_SYNTH4      0x1608c
-#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT   (AR_SREV_9462(ah) ? 0x00000001 : 0x00000002)
-#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT_S (AR_SREV_9462(ah) ? 0 : 1)
+#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT   ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x00000001 : 0x00000002)
+#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT_S ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0 : 1)
 #define AR_PHY_65NM_CH0_SYNTH7      0x16098
 #define AR_PHY_65NM_CH0_BIAS1       0x160c0
 #define AR_PHY_65NM_CH0_BIAS2       0x160c4
 #define AR_PHY_65NM_CH2_RXTX4       0x1690c
 
 #define AR_CH0_TOP     (AR_SREV_9300(ah) ? 0x16288 : \
-                               ((AR_SREV_9462(ah) ? 0x1628c : 0x16280)))
+                        (((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x1628c : 0x16280)))
 #define AR_CH0_TOP_XPABIASLVL (AR_SREV_9550(ah) ? 0x3c0 : 0x300)
 #define AR_CH0_TOP_XPABIASLVL_S (AR_SREV_9550(ah) ? 6 : 8)
 
 #define AR_SWITCH_TABLE_ALL_S (0)
 
 #define AR_PHY_65NM_CH0_THERM       (AR_SREV_9300(ah) ? 0x16290 :\
-                                       (AR_SREV_9462(ah) ? 0x16294 : 0x1628c))
+                                    ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16294 : 0x1628c))
 
 #define AR_PHY_65NM_CH0_THERM_LOCAL   0x80000000
 #define AR_PHY_65NM_CH0_THERM_LOCAL_S 31
 #define AR_CH0_TOP2_XPABIASLVL_S       12
 
 #define AR_CH0_XTAL            (AR_SREV_9300(ah) ? 0x16294 : \
-                                       (AR_SREV_9462(ah) ? 0x16298 : 0x16290))
+                                ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16298 : 0x16290))
 #define AR_CH0_XTAL_CAPINDAC   0x7f000000
 #define AR_CH0_XTAL_CAPINDAC_S 24
 #define AR_CH0_XTAL_CAPOUTDAC  0x00fe0000
 #define AR_CH0_XTAL_CAPOUTDAC_S        17
 
-#define AR_PHY_PMU1            (AR_SREV_9462(ah) ? 0x16340 : 0x16c40)
+#define AR_PHY_PMU1            ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16340 : 0x16c40)
 #define AR_PHY_PMU1_PWD                0x1
 #define AR_PHY_PMU1_PWD_S      0
 
-#define AR_PHY_PMU2            (AR_SREV_9462(ah) ? 0x16344 : 0x16c44)
+#define AR_PHY_PMU2            ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16344 : 0x16c44)
 #define AR_PHY_PMU2_PGM                0x00200000
 #define AR_PHY_PMU2_PGM_S      21
 
 
 #define AR_PHY_65NM_CH0_RXTX4_THERM_ON          0x10000000
 #define AR_PHY_65NM_CH0_RXTX4_THERM_ON_S        28
+#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR      0x20000000
+#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR_S    29
 
 #define AR_PHY_65NM_RXTX4_XLNA_BIAS            0xC0000000
 #define AR_PHY_65NM_RXTX4_XLNA_BIAS_S          30
 #define AR_PHY_CL_TAB_CL_GAIN_MOD              0x1f
 #define AR_PHY_CL_TAB_CL_GAIN_MOD_S            0
 
+#define AR_BTCOEX_WL_LNADIV                                0x1a64
+#define AR_BTCOEX_WL_LNADIV_PREDICTED_PERIOD               0x00003FFF
+#define AR_BTCOEX_WL_LNADIV_PREDICTED_PERIOD_S             0
+#define AR_BTCOEX_WL_LNADIV_DPDT_IGNORE_PRIORITY           0x00004000
+#define AR_BTCOEX_WL_LNADIV_DPDT_IGNORE_PRIORITY_S         14
+#define AR_BTCOEX_WL_LNADIV_FORCE_ON                       0x00008000
+#define AR_BTCOEX_WL_LNADIV_FORCE_ON_S                     15
+#define AR_BTCOEX_WL_LNADIV_MODE_OPTION                    0x00030000
+#define AR_BTCOEX_WL_LNADIV_MODE_OPTION_S                  16
+#define AR_BTCOEX_WL_LNADIV_MODE                           0x007c0000
+#define AR_BTCOEX_WL_LNADIV_MODE_S                         18
+#define AR_BTCOEX_WL_LNADIV_ALLOWED_TX_ANTDIV_WL_TX_REQ    0x00800000
+#define AR_BTCOEX_WL_LNADIV_ALLOWED_TX_ANTDIV_WL_TX_REQ_S  23
+#define AR_BTCOEX_WL_LNADIV_DISABLE_TX_ANTDIV_ENABLE       0x01000000
+#define AR_BTCOEX_WL_LNADIV_DISABLE_TX_ANTDIV_ENABLE_S     24
+#define AR_BTCOEX_WL_LNADIV_CONTINUOUS_BT_ACTIVE_PROTECT   0x02000000
+#define AR_BTCOEX_WL_LNADIV_CONTINUOUS_BT_ACTIVE_PROTECT_S 25
+#define AR_BTCOEX_WL_LNADIV_BT_INACTIVE_THRESHOLD          0xFC000000
+#define AR_BTCOEX_WL_LNADIV_BT_INACTIVE_THRESHOLD_S        26
+
 #endif  /* AR9003_PHY_H */
index 4ef7dcccaa2f6bd114dfef8cd3741956ac63f328..58f30f65c6b62fa21acb46f2468f171cc7736125 100644 (file)
@@ -58,7 +58,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
        {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
        {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
        {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
-       {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
+       {0x00009e3c, 0xcf946222, 0xcf946222, 0xcfd5c782, 0xcfd5c282},
        {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
        {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
        {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
new file mode 100644 (file)
index 0000000..843e79f
--- /dev/null
@@ -0,0 +1,1231 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9565_1P0_H
+#define INITVALS_9565_1P0_H
+
+/* AR9565 1.0 */
+
+static const u32 ar9565_1p0_mac_core[][2] = {
+       /* Addr      allmodes  */
+       {0x00000008, 0x00000000},
+       {0x00000030, 0x000a0085},
+       {0x00000034, 0x00000005},
+       {0x00000040, 0x00000000},
+       {0x00000044, 0x00000000},
+       {0x00000048, 0x00000008},
+       {0x0000004c, 0x00000010},
+       {0x00000050, 0x00000000},
+       {0x00001040, 0x002ffc0f},
+       {0x00001044, 0x002ffc0f},
+       {0x00001048, 0x002ffc0f},
+       {0x0000104c, 0x002ffc0f},
+       {0x00001050, 0x002ffc0f},
+       {0x00001054, 0x002ffc0f},
+       {0x00001058, 0x002ffc0f},
+       {0x0000105c, 0x002ffc0f},
+       {0x00001060, 0x002ffc0f},
+       {0x00001064, 0x002ffc0f},
+       {0x000010f0, 0x00000100},
+       {0x00001270, 0x00000000},
+       {0x000012b0, 0x00000000},
+       {0x000012f0, 0x00000000},
+       {0x0000143c, 0x00000000},
+       {0x0000147c, 0x00000000},
+       {0x00001810, 0x0f000003},
+       {0x00008000, 0x00000000},
+       {0x00008004, 0x00000000},
+       {0x00008008, 0x00000000},
+       {0x0000800c, 0x00000000},
+       {0x00008018, 0x00000000},
+       {0x00008020, 0x00000000},
+       {0x00008038, 0x00000000},
+       {0x0000803c, 0x00000000},
+       {0x00008040, 0x00000000},
+       {0x00008044, 0x00000000},
+       {0x00008048, 0x00000000},
+       {0x00008054, 0x00000000},
+       {0x00008058, 0x00000000},
+       {0x0000805c, 0x000fc78f},
+       {0x00008060, 0x0000000f},
+       {0x00008064, 0x00000000},
+       {0x00008070, 0x00000310},
+       {0x00008074, 0x00000020},
+       {0x00008078, 0x00000000},
+       {0x0000809c, 0x0000000f},
+       {0x000080a0, 0x00000000},
+       {0x000080a4, 0x02ff0000},
+       {0x000080a8, 0x0e070605},
+       {0x000080ac, 0x0000000d},
+       {0x000080b0, 0x00000000},
+       {0x000080b4, 0x00000000},
+       {0x000080b8, 0x00000000},
+       {0x000080bc, 0x00000000},
+       {0x000080c0, 0x2a800000},
+       {0x000080c4, 0x06900168},
+       {0x000080c8, 0x13881c20},
+       {0x000080cc, 0x01f40000},
+       {0x000080d0, 0x00252500},
+       {0x000080d4, 0x00b00005},
+       {0x000080d8, 0x00400002},
+       {0x000080dc, 0x00000000},
+       {0x000080e0, 0xffffffff},
+       {0x000080e4, 0x0000ffff},
+       {0x000080e8, 0x3f3f3f3f},
+       {0x000080ec, 0x00000000},
+       {0x000080f0, 0x00000000},
+       {0x000080f4, 0x00000000},
+       {0x000080fc, 0x00020000},
+       {0x00008100, 0x00000000},
+       {0x00008108, 0x00000052},
+       {0x0000810c, 0x00000000},
+       {0x00008110, 0x00000000},
+       {0x00008114, 0x000007ff},
+       {0x00008118, 0x000000aa},
+       {0x0000811c, 0x00003210},
+       {0x00008124, 0x00000000},
+       {0x00008128, 0x00000000},
+       {0x0000812c, 0x00000000},
+       {0x00008130, 0x00000000},
+       {0x00008134, 0x00000000},
+       {0x00008138, 0x00000000},
+       {0x0000813c, 0x0000ffff},
+       {0x00008144, 0xffffffff},
+       {0x00008168, 0x00000000},
+       {0x0000816c, 0x00000000},
+       {0x00008170, 0x18486200},
+       {0x00008174, 0x33332210},
+       {0x00008178, 0x00000000},
+       {0x0000817c, 0x00020000},
+       {0x000081c4, 0x33332210},
+       {0x000081c8, 0x00000000},
+       {0x000081cc, 0x00000000},
+       {0x000081d4, 0x00000000},
+       {0x000081ec, 0x00000000},
+       {0x000081f0, 0x00000000},
+       {0x000081f4, 0x00000000},
+       {0x000081f8, 0x00000000},
+       {0x000081fc, 0x00000000},
+       {0x00008240, 0x00100000},
+       {0x00008244, 0x0010f424},
+       {0x00008248, 0x00000800},
+       {0x0000824c, 0x0001e848},
+       {0x00008250, 0x00000000},
+       {0x00008254, 0x00000000},
+       {0x00008258, 0x00000000},
+       {0x0000825c, 0x40000000},
+       {0x00008260, 0x00080922},
+       {0x00008264, 0x9d400010},
+       {0x00008268, 0xffffffff},
+       {0x0000826c, 0x0000ffff},
+       {0x00008270, 0x00000000},
+       {0x00008274, 0x40000000},
+       {0x00008278, 0x003e4180},
+       {0x0000827c, 0x00000004},
+       {0x00008284, 0x0000002c},
+       {0x00008288, 0x0000002c},
+       {0x0000828c, 0x000000ff},
+       {0x00008294, 0x00000000},
+       {0x00008298, 0x00000000},
+       {0x0000829c, 0x00000000},
+       {0x00008300, 0x00000140},
+       {0x00008314, 0x00000000},
+       {0x0000831c, 0x0000010d},
+       {0x00008328, 0x00000000},
+       {0x0000832c, 0x0000001f},
+       {0x00008330, 0x00000302},
+       {0x00008334, 0x00000700},
+       {0x00008338, 0xffff0000},
+       {0x0000833c, 0x02400000},
+       {0x00008340, 0x000107ff},
+       {0x00008344, 0xaa48105b},
+       {0x00008348, 0x008f0000},
+       {0x0000835c, 0x00000000},
+       {0x00008360, 0xffffffff},
+       {0x00008364, 0xffffffff},
+       {0x00008368, 0x00000000},
+       {0x00008370, 0x00000000},
+       {0x00008374, 0x000000ff},
+       {0x00008378, 0x00000000},
+       {0x0000837c, 0x00000000},
+       {0x00008380, 0xffffffff},
+       {0x00008384, 0xffffffff},
+       {0x00008390, 0xffffffff},
+       {0x00008394, 0xffffffff},
+       {0x00008398, 0x00000000},
+       {0x0000839c, 0x00000000},
+       {0x000083a4, 0x0000fa14},
+       {0x000083a8, 0x000f0c00},
+       {0x000083ac, 0x33332210},
+       {0x000083b0, 0x33332210},
+       {0x000083b4, 0x33332210},
+       {0x000083b8, 0x33332210},
+       {0x000083bc, 0x00000000},
+       {0x000083c0, 0x00000000},
+       {0x000083c4, 0x00000000},
+       {0x000083c8, 0x00000000},
+       {0x000083cc, 0x00000200},
+       {0x000083d0, 0x800301ff},
+};
+
+static const u32 ar9565_1p0_mac_postamble[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+       {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+       {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+       {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+       {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+       {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+       {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+       {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+};
+
+static const u32 ar9565_1p0_baseband_core[][2] = {
+       /* Addr      allmodes  */
+       {0x00009800, 0xafe68e30},
+       {0x00009804, 0xfd14e000},
+       {0x00009808, 0x9c0a8f6b},
+       {0x0000980c, 0x04800000},
+       {0x00009814, 0x9280c00a},
+       {0x00009818, 0x00000000},
+       {0x0000981c, 0x00020028},
+       {0x00009834, 0x6400a290},
+       {0x00009838, 0x0108ecff},
+       {0x0000983c, 0x0d000600},
+       {0x00009880, 0x201fff00},
+       {0x00009884, 0x00001042},
+       {0x000098a4, 0x00200400},
+       {0x000098b0, 0x32840bbe},
+       {0x000098d0, 0x004b6a8e},
+       {0x000098d4, 0x00000820},
+       {0x000098dc, 0x00000000},
+       {0x000098e4, 0x01ffffff},
+       {0x000098e8, 0x01ffffff},
+       {0x000098ec, 0x01ffffff},
+       {0x000098f0, 0x00000000},
+       {0x000098f4, 0x00000000},
+       {0x00009bf0, 0x80000000},
+       {0x00009c04, 0xff55ff55},
+       {0x00009c08, 0x0320ff55},
+       {0x00009c0c, 0x00000000},
+       {0x00009c10, 0x00000000},
+       {0x00009c14, 0x00046384},
+       {0x00009c18, 0x05b6b440},
+       {0x00009c1c, 0x00b6b440},
+       {0x00009d00, 0xc080a333},
+       {0x00009d04, 0x40206c10},
+       {0x00009d08, 0x009c4060},
+       {0x00009d0c, 0x1883800a},
+       {0x00009d10, 0x01834061},
+       {0x00009d14, 0x00c00400},
+       {0x00009d18, 0x00000000},
+       {0x00009e08, 0x0078230c},
+       {0x00009e24, 0x990bb515},
+       {0x00009e28, 0x126f0000},
+       {0x00009e30, 0x06336f77},
+       {0x00009e34, 0x6af6532f},
+       {0x00009e38, 0x0cc80c00},
+       {0x00009e40, 0x0d261820},
+       {0x00009e4c, 0x00001004},
+       {0x00009e50, 0x00ff03f1},
+       {0x00009e54, 0xe4c355c7},
+       {0x00009e5c, 0xe9198724},
+       {0x00009fc0, 0x823e4fc8},
+       {0x00009fc4, 0x0001efb5},
+       {0x00009fcc, 0x40000014},
+       {0x0000a20c, 0x00000000},
+       {0x0000a220, 0x00000000},
+       {0x0000a224, 0x00000000},
+       {0x0000a228, 0x10002310},
+       {0x0000a23c, 0x00000000},
+       {0x0000a244, 0x0c000000},
+       {0x0000a2a0, 0x00000001},
+       {0x0000a2c0, 0x00000001},
+       {0x0000a2c8, 0x00000000},
+       {0x0000a2cc, 0x18c43433},
+       {0x0000a2d4, 0x00000000},
+       {0x0000a2ec, 0x00000000},
+       {0x0000a2f0, 0x00000000},
+       {0x0000a2f4, 0x00000000},
+       {0x0000a2f8, 0x00000000},
+       {0x0000a344, 0x00000000},
+       {0x0000a34c, 0x00000000},
+       {0x0000a350, 0x0000a000},
+       {0x0000a364, 0x00000000},
+       {0x0000a370, 0x00000000},
+       {0x0000a390, 0x00000001},
+       {0x0000a394, 0x00000444},
+       {0x0000a398, 0x001f0e0f},
+       {0x0000a39c, 0x0075393f},
+       {0x0000a3a0, 0xb79f6427},
+       {0x0000a3a4, 0x00000000},
+       {0x0000a3a8, 0xaaaaaaaa},
+       {0x0000a3ac, 0x3c466478},
+       {0x0000a3c0, 0x20202020},
+       {0x0000a3c4, 0x22222220},
+       {0x0000a3c8, 0x20200020},
+       {0x0000a3cc, 0x20202020},
+       {0x0000a3d0, 0x20202020},
+       {0x0000a3d4, 0x20202020},
+       {0x0000a3d8, 0x20202020},
+       {0x0000a3dc, 0x20202020},
+       {0x0000a3e0, 0x20202020},
+       {0x0000a3e4, 0x20202020},
+       {0x0000a3e8, 0x20202020},
+       {0x0000a3ec, 0x20202020},
+       {0x0000a3f0, 0x00000000},
+       {0x0000a3f4, 0x00000006},
+       {0x0000a3f8, 0x0c9bd380},
+       {0x0000a3fc, 0x000f0f01},
+       {0x0000a400, 0x8fa91f01},
+       {0x0000a404, 0x00000000},
+       {0x0000a408, 0x0e79e5c6},
+       {0x0000a40c, 0x00820820},
+       {0x0000a414, 0x1ce739ce},
+       {0x0000a418, 0x2d001dce},
+       {0x0000a41c, 0x1ce739ce},
+       {0x0000a420, 0x000001ce},
+       {0x0000a424, 0x1ce739ce},
+       {0x0000a428, 0x000001ce},
+       {0x0000a42c, 0x1ce739ce},
+       {0x0000a430, 0x1ce739ce},
+       {0x0000a434, 0x00000000},
+       {0x0000a438, 0x00001801},
+       {0x0000a43c, 0x00000000},
+       {0x0000a440, 0x00000000},
+       {0x0000a444, 0x00000000},
+       {0x0000a448, 0x05000096},
+       {0x0000a44c, 0x00000001},
+       {0x0000a450, 0x00010000},
+       {0x0000a454, 0x03000000},
+       {0x0000a458, 0x00000000},
+       {0x0000a644, 0xbfad9d74},
+       {0x0000a648, 0x0048060a},
+       {0x0000a64c, 0x00003c37},
+       {0x0000a670, 0x03020100},
+       {0x0000a674, 0x09080504},
+       {0x0000a678, 0x0d0c0b0a},
+       {0x0000a67c, 0x13121110},
+       {0x0000a680, 0x31301514},
+       {0x0000a684, 0x35343332},
+       {0x0000a688, 0x00000036},
+       {0x0000a690, 0x00000838},
+       {0x0000a6b4, 0x00512c01},
+       {0x0000a7c0, 0x00000000},
+       {0x0000a7c4, 0xfffffffc},
+       {0x0000a7c8, 0x00000000},
+       {0x0000a7cc, 0x00000000},
+       {0x0000a7d0, 0x00000000},
+       {0x0000a7d4, 0x00000004},
+       {0x0000a7dc, 0x00000001},
+       {0x0000a7f0, 0x80000000},
+};
+
+static const u32 ar9565_1p0_baseband_postamble[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a800d},
+       {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae},
+       {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x63c640da},
+       {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x09143c81},
+       {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+       {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
+       {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
+       {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
+       {0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
+       {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8},
+       {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
+       {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
+       {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+       {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+       {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+       {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222},
+       {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+       {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+       {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+       {0x0000a204, 0x07318fc0, 0x07318fc4, 0x07318fc4, 0x07318fc0},
+       {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+       {0x0000a22c, 0x01026a2f, 0x01026a27, 0x01026a2f, 0x01026a2f},
+       {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
+       {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
+       {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+       {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+       {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+       {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+       {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+       {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
+       {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+       {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+       {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+       {0x0000a288, 0x00100510, 0x00100510, 0x00100510, 0x00100510},
+       {0x0000a28c, 0x00021551, 0x00021551, 0x00021551, 0x00021551},
+       {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
+       {0x0000a2d0, 0x00071982, 0x00071982, 0x00071982, 0x00071982},
+       {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
+       {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000ae04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+       {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+static const u32 ar9565_1p0_radio_core[][2] = {
+       /* Addr      allmodes  */
+       {0x00016000, 0x36db6db6},
+       {0x00016004, 0x6db6db40},
+       {0x00016008, 0x73f00000},
+       {0x0001600c, 0x00000000},
+       {0x00016010, 0x6d823601},
+       {0x00016040, 0x7f80fff8},
+       {0x0001604c, 0x1c99e04f},
+       {0x00016050, 0x6db6db6c},
+       {0x00016058, 0x6c200000},
+       {0x00016080, 0x000c0000},
+       {0x00016084, 0x9a68048c},
+       {0x00016088, 0x54214514},
+       {0x0001608c, 0x1203040b},
+       {0x00016090, 0x24926490},
+       {0x00016098, 0xd28b3330},
+       {0x000160a0, 0x0a108ffe},
+       {0x000160a4, 0x812fc491},
+       {0x000160a8, 0x423c8000},
+       {0x000160b4, 0x92000000},
+       {0x000160b8, 0x0285dddc},
+       {0x000160bc, 0x02908888},
+       {0x000160c0, 0x006db6d0},
+       {0x000160c4, 0x6dd6db60},
+       {0x000160c8, 0x6db6db6c},
+       {0x000160cc, 0x6de6c1b0},
+       {0x00016100, 0x3fffbe04},
+       {0x00016104, 0xfff80000},
+       {0x00016108, 0x00200400},
+       {0x00016110, 0x00000000},
+       {0x00016144, 0x02084080},
+       {0x00016148, 0x000080c0},
+       {0x00016280, 0x050a0001},
+       {0x00016284, 0x3d841440},
+       {0x00016288, 0x00000000},
+       {0x0001628c, 0xe3000000},
+       {0x00016290, 0xa1004080},
+       {0x00016294, 0x40000028},
+       {0x00016298, 0x55aa2900},
+       {0x00016340, 0x131c827a},
+       {0x00016344, 0x00300000},
+};
+
+static const u32 ar9565_1p0_radio_postamble[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x0001609c, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524},
+       {0x000160ac, 0xa4646c08, 0xa4646c08, 0xa4646c08, 0xa4646c08},
+       {0x000160b0, 0x01d67f70, 0x01d67f70, 0x01d67f70, 0x01d67f70},
+       {0x0001610c, 0x40000000, 0x40000000, 0x40000000, 0x40000000},
+       {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+};
+
+static const u32 ar9565_1p0_soc_preamble[][2] = {
+       /* Addr      allmodes  */
+       {0x00004078, 0x00000002},
+       {0x000040a4, 0x00a0c9c9},
+       {0x00007020, 0x00000000},
+       {0x00007034, 0x00000002},
+       {0x00007038, 0x000004c2},
+};
+
+static const u32 ar9565_1p0_soc_postamble[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x00007010, 0x00002233, 0x00002233, 0x00002233, 0x00002233},
+};
+
+static const u32 ar9565_1p0_Common_rx_gain_table[][2] = {
+       /* Addr      allmodes  */
+       {0x0000a000, 0x00010000},
+       {0x0000a004, 0x00030002},
+       {0x0000a008, 0x00050004},
+       {0x0000a00c, 0x00810080},
+       {0x0000a010, 0x00830082},
+       {0x0000a014, 0x01810180},
+       {0x0000a018, 0x01830182},
+       {0x0000a01c, 0x01850184},
+       {0x0000a020, 0x01890188},
+       {0x0000a024, 0x018b018a},
+       {0x0000a028, 0x018d018c},
+       {0x0000a02c, 0x01910190},
+       {0x0000a030, 0x01930192},
+       {0x0000a034, 0x01950194},
+       {0x0000a038, 0x038a0196},
+       {0x0000a03c, 0x038c038b},
+       {0x0000a040, 0x0390038d},
+       {0x0000a044, 0x03920391},
+       {0x0000a048, 0x03940393},
+       {0x0000a04c, 0x03960395},
+       {0x0000a050, 0x00000000},
+       {0x0000a054, 0x00000000},
+       {0x0000a058, 0x00000000},
+       {0x0000a05c, 0x00000000},
+       {0x0000a060, 0x00000000},
+       {0x0000a064, 0x00000000},
+       {0x0000a068, 0x00000000},
+       {0x0000a06c, 0x00000000},
+       {0x0000a070, 0x00000000},
+       {0x0000a074, 0x00000000},
+       {0x0000a078, 0x00000000},
+       {0x0000a07c, 0x00000000},
+       {0x0000a080, 0x22222229},
+       {0x0000a084, 0x1d1d1d1d},
+       {0x0000a088, 0x1d1d1d1d},
+       {0x0000a08c, 0x1d1d1d1d},
+       {0x0000a090, 0x171d1d1d},
+       {0x0000a094, 0x11111717},
+       {0x0000a098, 0x00030311},
+       {0x0000a09c, 0x00000000},
+       {0x0000a0a0, 0x00000000},
+       {0x0000a0a4, 0x00000000},
+       {0x0000a0a8, 0x00000000},
+       {0x0000a0ac, 0x00000000},
+       {0x0000a0b0, 0x00000000},
+       {0x0000a0b4, 0x00000000},
+       {0x0000a0b8, 0x00000000},
+       {0x0000a0bc, 0x00000000},
+       {0x0000a0c0, 0x001f0000},
+       {0x0000a0c4, 0x01000101},
+       {0x0000a0c8, 0x011e011f},
+       {0x0000a0cc, 0x011c011d},
+       {0x0000a0d0, 0x02030204},
+       {0x0000a0d4, 0x02010202},
+       {0x0000a0d8, 0x021f0200},
+       {0x0000a0dc, 0x0302021e},
+       {0x0000a0e0, 0x03000301},
+       {0x0000a0e4, 0x031e031f},
+       {0x0000a0e8, 0x0402031d},
+       {0x0000a0ec, 0x04000401},
+       {0x0000a0f0, 0x041e041f},
+       {0x0000a0f4, 0x0502041d},
+       {0x0000a0f8, 0x05000501},
+       {0x0000a0fc, 0x051e051f},
+       {0x0000a100, 0x06010602},
+       {0x0000a104, 0x061f0600},
+       {0x0000a108, 0x061d061e},
+       {0x0000a10c, 0x07020703},
+       {0x0000a110, 0x07000701},
+       {0x0000a114, 0x00000000},
+       {0x0000a118, 0x00000000},
+       {0x0000a11c, 0x00000000},
+       {0x0000a120, 0x00000000},
+       {0x0000a124, 0x00000000},
+       {0x0000a128, 0x00000000},
+       {0x0000a12c, 0x00000000},
+       {0x0000a130, 0x00000000},
+       {0x0000a134, 0x00000000},
+       {0x0000a138, 0x00000000},
+       {0x0000a13c, 0x00000000},
+       {0x0000a140, 0x001f0000},
+       {0x0000a144, 0x01000101},
+       {0x0000a148, 0x011e011f},
+       {0x0000a14c, 0x011c011d},
+       {0x0000a150, 0x02030204},
+       {0x0000a154, 0x02010202},
+       {0x0000a158, 0x021f0200},
+       {0x0000a15c, 0x0302021e},
+       {0x0000a160, 0x03000301},
+       {0x0000a164, 0x031e031f},
+       {0x0000a168, 0x0402031d},
+       {0x0000a16c, 0x04000401},
+       {0x0000a170, 0x041e041f},
+       {0x0000a174, 0x0502041d},
+       {0x0000a178, 0x05000501},
+       {0x0000a17c, 0x051e051f},
+       {0x0000a180, 0x06010602},
+       {0x0000a184, 0x061f0600},
+       {0x0000a188, 0x061d061e},
+       {0x0000a18c, 0x07020703},
+       {0x0000a190, 0x07000701},
+       {0x0000a194, 0x00000000},
+       {0x0000a198, 0x00000000},
+       {0x0000a19c, 0x00000000},
+       {0x0000a1a0, 0x00000000},
+       {0x0000a1a4, 0x00000000},
+       {0x0000a1a8, 0x00000000},
+       {0x0000a1ac, 0x00000000},
+       {0x0000a1b0, 0x00000000},
+       {0x0000a1b4, 0x00000000},
+       {0x0000a1b8, 0x00000000},
+       {0x0000a1bc, 0x00000000},
+       {0x0000a1c0, 0x00000000},
+       {0x0000a1c4, 0x00000000},
+       {0x0000a1c8, 0x00000000},
+       {0x0000a1cc, 0x00000000},
+       {0x0000a1d0, 0x00000000},
+       {0x0000a1d4, 0x00000000},
+       {0x0000a1d8, 0x00000000},
+       {0x0000a1dc, 0x00000000},
+       {0x0000a1e0, 0x00000000},
+       {0x0000a1e4, 0x00000000},
+       {0x0000a1e8, 0x00000000},
+       {0x0000a1ec, 0x00000000},
+       {0x0000a1f0, 0x00000396},
+       {0x0000a1f4, 0x00000396},
+       {0x0000a1f8, 0x00000396},
+       {0x0000a1fc, 0x00000196},
+       {0x0000b000, 0x00010000},
+       {0x0000b004, 0x00030002},
+       {0x0000b008, 0x00050004},
+       {0x0000b00c, 0x00810080},
+       {0x0000b010, 0x00830082},
+       {0x0000b014, 0x01810180},
+       {0x0000b018, 0x01830182},
+       {0x0000b01c, 0x01850184},
+       {0x0000b020, 0x02810280},
+       {0x0000b024, 0x02830282},
+       {0x0000b028, 0x02850284},
+       {0x0000b02c, 0x02890288},
+       {0x0000b030, 0x028b028a},
+       {0x0000b034, 0x0388028c},
+       {0x0000b038, 0x038a0389},
+       {0x0000b03c, 0x038c038b},
+       {0x0000b040, 0x0390038d},
+       {0x0000b044, 0x03920391},
+       {0x0000b048, 0x03940393},
+       {0x0000b04c, 0x03960395},
+       {0x0000b050, 0x00000000},
+       {0x0000b054, 0x00000000},
+       {0x0000b058, 0x00000000},
+       {0x0000b05c, 0x00000000},
+       {0x0000b060, 0x00000000},
+       {0x0000b064, 0x00000000},
+       {0x0000b068, 0x00000000},
+       {0x0000b06c, 0x00000000},
+       {0x0000b070, 0x00000000},
+       {0x0000b074, 0x00000000},
+       {0x0000b078, 0x00000000},
+       {0x0000b07c, 0x00000000},
+       {0x0000b080, 0x32323232},
+       {0x0000b084, 0x2f2f3232},
+       {0x0000b088, 0x23282a2d},
+       {0x0000b08c, 0x1c1e2123},
+       {0x0000b090, 0x14171919},
+       {0x0000b094, 0x0e0e1214},
+       {0x0000b098, 0x03050707},
+       {0x0000b09c, 0x00030303},
+       {0x0000b0a0, 0x00000000},
+       {0x0000b0a4, 0x00000000},
+       {0x0000b0a8, 0x00000000},
+       {0x0000b0ac, 0x00000000},
+       {0x0000b0b0, 0x00000000},
+       {0x0000b0b4, 0x00000000},
+       {0x0000b0b8, 0x00000000},
+       {0x0000b0bc, 0x00000000},
+       {0x0000b0c0, 0x003f0020},
+       {0x0000b0c4, 0x00400041},
+       {0x0000b0c8, 0x0140005f},
+       {0x0000b0cc, 0x0160015f},
+       {0x0000b0d0, 0x017e017f},
+       {0x0000b0d4, 0x02410242},
+       {0x0000b0d8, 0x025f0240},
+       {0x0000b0dc, 0x027f0260},
+       {0x0000b0e0, 0x0341027e},
+       {0x0000b0e4, 0x035f0340},
+       {0x0000b0e8, 0x037f0360},
+       {0x0000b0ec, 0x04400441},
+       {0x0000b0f0, 0x0460045f},
+       {0x0000b0f4, 0x0541047f},
+       {0x0000b0f8, 0x055f0540},
+       {0x0000b0fc, 0x057f0560},
+       {0x0000b100, 0x06400641},
+       {0x0000b104, 0x0660065f},
+       {0x0000b108, 0x067e067f},
+       {0x0000b10c, 0x07410742},
+       {0x0000b110, 0x075f0740},
+       {0x0000b114, 0x077f0760},
+       {0x0000b118, 0x07800781},
+       {0x0000b11c, 0x07a0079f},
+       {0x0000b120, 0x07c107bf},
+       {0x0000b124, 0x000007c0},
+       {0x0000b128, 0x00000000},
+       {0x0000b12c, 0x00000000},
+       {0x0000b130, 0x00000000},
+       {0x0000b134, 0x00000000},
+       {0x0000b138, 0x00000000},
+       {0x0000b13c, 0x00000000},
+       {0x0000b140, 0x003f0020},
+       {0x0000b144, 0x00400041},
+       {0x0000b148, 0x0140005f},
+       {0x0000b14c, 0x0160015f},
+       {0x0000b150, 0x017e017f},
+       {0x0000b154, 0x02410242},
+       {0x0000b158, 0x025f0240},
+       {0x0000b15c, 0x027f0260},
+       {0x0000b160, 0x0341027e},
+       {0x0000b164, 0x035f0340},
+       {0x0000b168, 0x037f0360},
+       {0x0000b16c, 0x04400441},
+       {0x0000b170, 0x0460045f},
+       {0x0000b174, 0x0541047f},
+       {0x0000b178, 0x055f0540},
+       {0x0000b17c, 0x057f0560},
+       {0x0000b180, 0x06400641},
+       {0x0000b184, 0x0660065f},
+       {0x0000b188, 0x067e067f},
+       {0x0000b18c, 0x07410742},
+       {0x0000b190, 0x075f0740},
+       {0x0000b194, 0x077f0760},
+       {0x0000b198, 0x07800781},
+       {0x0000b19c, 0x07a0079f},
+       {0x0000b1a0, 0x07c107bf},
+       {0x0000b1a4, 0x000007c0},
+       {0x0000b1a8, 0x00000000},
+       {0x0000b1ac, 0x00000000},
+       {0x0000b1b0, 0x00000000},
+       {0x0000b1b4, 0x00000000},
+       {0x0000b1b8, 0x00000000},
+       {0x0000b1bc, 0x00000000},
+       {0x0000b1c0, 0x00000000},
+       {0x0000b1c4, 0x00000000},
+       {0x0000b1c8, 0x00000000},
+       {0x0000b1cc, 0x00000000},
+       {0x0000b1d0, 0x00000000},
+       {0x0000b1d4, 0x00000000},
+       {0x0000b1d8, 0x00000000},
+       {0x0000b1dc, 0x00000000},
+       {0x0000b1e0, 0x00000000},
+       {0x0000b1e4, 0x00000000},
+       {0x0000b1e8, 0x00000000},
+       {0x0000b1ec, 0x00000000},
+       {0x0000b1f0, 0x00000396},
+       {0x0000b1f4, 0x00000396},
+       {0x0000b1f8, 0x00000396},
+       {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9565_1p0_Modes_lowest_ob_db_tx_gain_table[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x0000a2dc, 0xfc0a9380, 0xfc0a9380, 0xfdab5b52, 0xfdab5b52},
+       {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
+       {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
+       {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
+       {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+       {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+       {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+       {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+       {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+       {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+       {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
+       {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
+       {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+       {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+       {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+       {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+       {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+       {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+       {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+       {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+       {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+       {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
+       {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
+       {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
+       {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
+       {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
+       {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
+       {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
+       {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
+       {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a614, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a618, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a61c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a620, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a624, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a628, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a62c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a630, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a634, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a638, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a63c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x00016044, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
+       {0x00016048, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+static const u32 ar9565_1p0_pciephy_pll_on_clkreq_disable_L1[][2] = {
+       /* Addr      allmodes  */
+       {0x00018c00, 0x18212ede},
+       {0x00018c04, 0x000801d8},
+       {0x00018c08, 0x0003780c},
+};
+
+static const u32 ar9565_1p0_modes_fast_clock[][3] = {
+       /* Addr      5G_HT20     5G_HT40   */
+       {0x00001030, 0x00000268, 0x000004d0},
+       {0x00001070, 0x0000018c, 0x00000318},
+       {0x000010b0, 0x00000fd0, 0x00001fa0},
+       {0x00008014, 0x044c044c, 0x08980898},
+       {0x0000801c, 0x148ec02b, 0x148ec057},
+       {0x00008318, 0x000044c0, 0x00008980},
+       {0x00009e00, 0x03721821, 0x03721821},
+       {0x0000a230, 0x0000400b, 0x00004016},
+       {0x0000a254, 0x00000898, 0x00001130},
+};
+
+static const u32 ar9565_1p0_common_wo_xlna_rx_gain_table[][2] = {
+       /* Addr      allmodes  */
+       {0x0000a000, 0x00010000},
+       {0x0000a004, 0x00030002},
+       {0x0000a008, 0x00050004},
+       {0x0000a00c, 0x00810080},
+       {0x0000a010, 0x00830082},
+       {0x0000a014, 0x01810180},
+       {0x0000a018, 0x01830182},
+       {0x0000a01c, 0x01850184},
+       {0x0000a020, 0x01890188},
+       {0x0000a024, 0x018b018a},
+       {0x0000a028, 0x018d018c},
+       {0x0000a02c, 0x03820190},
+       {0x0000a030, 0x03840383},
+       {0x0000a034, 0x03880385},
+       {0x0000a038, 0x038a0389},
+       {0x0000a03c, 0x038c038b},
+       {0x0000a040, 0x0390038d},
+       {0x0000a044, 0x03920391},
+       {0x0000a048, 0x03940393},
+       {0x0000a04c, 0x03960395},
+       {0x0000a050, 0x00000000},
+       {0x0000a054, 0x00000000},
+       {0x0000a058, 0x00000000},
+       {0x0000a05c, 0x00000000},
+       {0x0000a060, 0x00000000},
+       {0x0000a064, 0x00000000},
+       {0x0000a068, 0x00000000},
+       {0x0000a06c, 0x00000000},
+       {0x0000a070, 0x00000000},
+       {0x0000a074, 0x00000000},
+       {0x0000a078, 0x00000000},
+       {0x0000a07c, 0x00000000},
+       {0x0000a080, 0x29292929},
+       {0x0000a084, 0x29292929},
+       {0x0000a088, 0x29292929},
+       {0x0000a08c, 0x29292929},
+       {0x0000a090, 0x22292929},
+       {0x0000a094, 0x1d1d2222},
+       {0x0000a098, 0x0c111117},
+       {0x0000a09c, 0x00030303},
+       {0x0000a0a0, 0x00000000},
+       {0x0000a0a4, 0x00000000},
+       {0x0000a0a8, 0x00000000},
+       {0x0000a0ac, 0x00000000},
+       {0x0000a0b0, 0x00000000},
+       {0x0000a0b4, 0x00000000},
+       {0x0000a0b8, 0x00000000},
+       {0x0000a0bc, 0x00000000},
+       {0x0000a0c0, 0x00bf00a0},
+       {0x0000a0c4, 0x11a011a1},
+       {0x0000a0c8, 0x11be11bf},
+       {0x0000a0cc, 0x11bc11bd},
+       {0x0000a0d0, 0x22632264},
+       {0x0000a0d4, 0x22612262},
+       {0x0000a0d8, 0x227f2260},
+       {0x0000a0dc, 0x4322227e},
+       {0x0000a0e0, 0x43204321},
+       {0x0000a0e4, 0x433e433f},
+       {0x0000a0e8, 0x4462433d},
+       {0x0000a0ec, 0x44604461},
+       {0x0000a0f0, 0x447e447f},
+       {0x0000a0f4, 0x5582447d},
+       {0x0000a0f8, 0x55805581},
+       {0x0000a0fc, 0x559e559f},
+       {0x0000a100, 0x66816682},
+       {0x0000a104, 0x669f6680},
+       {0x0000a108, 0x669d669e},
+       {0x0000a10c, 0x77627763},
+       {0x0000a110, 0x77607761},
+       {0x0000a114, 0x00000000},
+       {0x0000a118, 0x00000000},
+       {0x0000a11c, 0x00000000},
+       {0x0000a120, 0x00000000},
+       {0x0000a124, 0x00000000},
+       {0x0000a128, 0x00000000},
+       {0x0000a12c, 0x00000000},
+       {0x0000a130, 0x00000000},
+       {0x0000a134, 0x00000000},
+       {0x0000a138, 0x00000000},
+       {0x0000a13c, 0x00000000},
+       {0x0000a140, 0x00bf00a0},
+       {0x0000a144, 0x11a011a1},
+       {0x0000a148, 0x11be11bf},
+       {0x0000a14c, 0x11bc11bd},
+       {0x0000a150, 0x22632264},
+       {0x0000a154, 0x22612262},
+       {0x0000a158, 0x227f2260},
+       {0x0000a15c, 0x4322227e},
+       {0x0000a160, 0x43204321},
+       {0x0000a164, 0x433e433f},
+       {0x0000a168, 0x4462433d},
+       {0x0000a16c, 0x44604461},
+       {0x0000a170, 0x447e447f},
+       {0x0000a174, 0x5582447d},
+       {0x0000a178, 0x55805581},
+       {0x0000a17c, 0x559e559f},
+       {0x0000a180, 0x66816682},
+       {0x0000a184, 0x669f6680},
+       {0x0000a188, 0x669d669e},
+       {0x0000a18c, 0x77627763},
+       {0x0000a190, 0x77607761},
+       {0x0000a194, 0x00000000},
+       {0x0000a198, 0x00000000},
+       {0x0000a19c, 0x00000000},
+       {0x0000a1a0, 0x00000000},
+       {0x0000a1a4, 0x00000000},
+       {0x0000a1a8, 0x00000000},
+       {0x0000a1ac, 0x00000000},
+       {0x0000a1b0, 0x00000000},
+       {0x0000a1b4, 0x00000000},
+       {0x0000a1b8, 0x00000000},
+       {0x0000a1bc, 0x00000000},
+       {0x0000a1c0, 0x00000000},
+       {0x0000a1c4, 0x00000000},
+       {0x0000a1c8, 0x00000000},
+       {0x0000a1cc, 0x00000000},
+       {0x0000a1d0, 0x00000000},
+       {0x0000a1d4, 0x00000000},
+       {0x0000a1d8, 0x00000000},
+       {0x0000a1dc, 0x00000000},
+       {0x0000a1e0, 0x00000000},
+       {0x0000a1e4, 0x00000000},
+       {0x0000a1e8, 0x00000000},
+       {0x0000a1ec, 0x00000000},
+       {0x0000a1f0, 0x00000396},
+       {0x0000a1f4, 0x00000396},
+       {0x0000a1f8, 0x00000396},
+       {0x0000a1fc, 0x00000196},
+       {0x0000b000, 0x00010000},
+       {0x0000b004, 0x00030002},
+       {0x0000b008, 0x00050004},
+       {0x0000b00c, 0x00810080},
+       {0x0000b010, 0x00830082},
+       {0x0000b014, 0x01810180},
+       {0x0000b018, 0x01830182},
+       {0x0000b01c, 0x01850184},
+       {0x0000b020, 0x02810280},
+       {0x0000b024, 0x02830282},
+       {0x0000b028, 0x02850284},
+       {0x0000b02c, 0x02890288},
+       {0x0000b030, 0x028b028a},
+       {0x0000b034, 0x0388028c},
+       {0x0000b038, 0x038a0389},
+       {0x0000b03c, 0x038c038b},
+       {0x0000b040, 0x0390038d},
+       {0x0000b044, 0x03920391},
+       {0x0000b048, 0x03940393},
+       {0x0000b04c, 0x03960395},
+       {0x0000b050, 0x00000000},
+       {0x0000b054, 0x00000000},
+       {0x0000b058, 0x00000000},
+       {0x0000b05c, 0x00000000},
+       {0x0000b060, 0x00000000},
+       {0x0000b064, 0x00000000},
+       {0x0000b068, 0x00000000},
+       {0x0000b06c, 0x00000000},
+       {0x0000b070, 0x00000000},
+       {0x0000b074, 0x00000000},
+       {0x0000b078, 0x00000000},
+       {0x0000b07c, 0x00000000},
+       {0x0000b080, 0x32323232},
+       {0x0000b084, 0x2f2f3232},
+       {0x0000b088, 0x23282a2d},
+       {0x0000b08c, 0x1c1e2123},
+       {0x0000b090, 0x14171919},
+       {0x0000b094, 0x0e0e1214},
+       {0x0000b098, 0x03050707},
+       {0x0000b09c, 0x00030303},
+       {0x0000b0a0, 0x00000000},
+       {0x0000b0a4, 0x00000000},
+       {0x0000b0a8, 0x00000000},
+       {0x0000b0ac, 0x00000000},
+       {0x0000b0b0, 0x00000000},
+       {0x0000b0b4, 0x00000000},
+       {0x0000b0b8, 0x00000000},
+       {0x0000b0bc, 0x00000000},
+       {0x0000b0c0, 0x003f0020},
+       {0x0000b0c4, 0x00400041},
+       {0x0000b0c8, 0x0140005f},
+       {0x0000b0cc, 0x0160015f},
+       {0x0000b0d0, 0x017e017f},
+       {0x0000b0d4, 0x02410242},
+       {0x0000b0d8, 0x025f0240},
+       {0x0000b0dc, 0x027f0260},
+       {0x0000b0e0, 0x0341027e},
+       {0x0000b0e4, 0x035f0340},
+       {0x0000b0e8, 0x037f0360},
+       {0x0000b0ec, 0x04400441},
+       {0x0000b0f0, 0x0460045f},
+       {0x0000b0f4, 0x0541047f},
+       {0x0000b0f8, 0x055f0540},
+       {0x0000b0fc, 0x057f0560},
+       {0x0000b100, 0x06400641},
+       {0x0000b104, 0x0660065f},
+       {0x0000b108, 0x067e067f},
+       {0x0000b10c, 0x07410742},
+       {0x0000b110, 0x075f0740},
+       {0x0000b114, 0x077f0760},
+       {0x0000b118, 0x07800781},
+       {0x0000b11c, 0x07a0079f},
+       {0x0000b120, 0x07c107bf},
+       {0x0000b124, 0x000007c0},
+       {0x0000b128, 0x00000000},
+       {0x0000b12c, 0x00000000},
+       {0x0000b130, 0x00000000},
+       {0x0000b134, 0x00000000},
+       {0x0000b138, 0x00000000},
+       {0x0000b13c, 0x00000000},
+       {0x0000b140, 0x003f0020},
+       {0x0000b144, 0x00400041},
+       {0x0000b148, 0x0140005f},
+       {0x0000b14c, 0x0160015f},
+       {0x0000b150, 0x017e017f},
+       {0x0000b154, 0x02410242},
+       {0x0000b158, 0x025f0240},
+       {0x0000b15c, 0x027f0260},
+       {0x0000b160, 0x0341027e},
+       {0x0000b164, 0x035f0340},
+       {0x0000b168, 0x037f0360},
+       {0x0000b16c, 0x04400441},
+       {0x0000b170, 0x0460045f},
+       {0x0000b174, 0x0541047f},
+       {0x0000b178, 0x055f0540},
+       {0x0000b17c, 0x057f0560},
+       {0x0000b180, 0x06400641},
+       {0x0000b184, 0x0660065f},
+       {0x0000b188, 0x067e067f},
+       {0x0000b18c, 0x07410742},
+       {0x0000b190, 0x075f0740},
+       {0x0000b194, 0x077f0760},
+       {0x0000b198, 0x07800781},
+       {0x0000b19c, 0x07a0079f},
+       {0x0000b1a0, 0x07c107bf},
+       {0x0000b1a4, 0x000007c0},
+       {0x0000b1a8, 0x00000000},
+       {0x0000b1ac, 0x00000000},
+       {0x0000b1b0, 0x00000000},
+       {0x0000b1b4, 0x00000000},
+       {0x0000b1b8, 0x00000000},
+       {0x0000b1bc, 0x00000000},
+       {0x0000b1c0, 0x00000000},
+       {0x0000b1c4, 0x00000000},
+       {0x0000b1c8, 0x00000000},
+       {0x0000b1cc, 0x00000000},
+       {0x0000b1d0, 0x00000000},
+       {0x0000b1d4, 0x00000000},
+       {0x0000b1d8, 0x00000000},
+       {0x0000b1dc, 0x00000000},
+       {0x0000b1e0, 0x00000000},
+       {0x0000b1e4, 0x00000000},
+       {0x0000b1e8, 0x00000000},
+       {0x0000b1ec, 0x00000000},
+       {0x0000b1f0, 0x00000396},
+       {0x0000b1f4, 0x00000396},
+       {0x0000b1f8, 0x00000396},
+       {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9565_1p0_modes_low_ob_db_tx_gain_table[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x0000a2dc, 0xfc0a9380, 0xfc0a9380, 0xfdab5b52, 0xfdab5b52},
+       {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
+       {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
+       {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
+       {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+       {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+       {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+       {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+       {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+       {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+       {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
+       {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
+       {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+       {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+       {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+       {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+       {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+       {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+       {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+       {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+       {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+       {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
+       {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
+       {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
+       {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
+       {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
+       {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
+       {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
+       {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
+       {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a614, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a618, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a61c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a620, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a624, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a628, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a62c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a630, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a634, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a638, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a63c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x00016044, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
+       {0x00016048, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+static const u32 ar9565_1p0_modes_high_ob_db_tx_gain_table[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x0000a2dc, 0xfc0a9380, 0xfc0a9380, 0xfdab5b52, 0xfdab5b52},
+       {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
+       {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
+       {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
+       {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+       {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+       {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
+       {0x0000a508, 0x0b022220, 0x0b022220, 0x08000004, 0x08000004},
+       {0x0000a50c, 0x10022223, 0x10022223, 0x0c000200, 0x0c000200},
+       {0x0000a510, 0x15022620, 0x15022620, 0x10000202, 0x10000202},
+       {0x0000a514, 0x19022622, 0x19022622, 0x13000400, 0x13000400},
+       {0x0000a518, 0x1c022822, 0x1c022822, 0x17000402, 0x17000402},
+       {0x0000a51c, 0x21022842, 0x21022842, 0x1b000404, 0x1b000404},
+       {0x0000a520, 0x24022c41, 0x24022c41, 0x1e000603, 0x1e000603},
+       {0x0000a524, 0x29023042, 0x29023042, 0x23000a02, 0x23000a02},
+       {0x0000a528, 0x2d023044, 0x2d023044, 0x27000a04, 0x27000a04},
+       {0x0000a52c, 0x31023644, 0x31023644, 0x2a000a20, 0x2a000a20},
+       {0x0000a530, 0x36025643, 0x36025643, 0x2e000e20, 0x2e000e20},
+       {0x0000a534, 0x3a025a44, 0x3a025a44, 0x32000e22, 0x32000e22},
+       {0x0000a538, 0x3d025e45, 0x3d025e45, 0x36000e24, 0x36000e24},
+       {0x0000a53c, 0x43025e4a, 0x43025e4a, 0x3a001640, 0x3a001640},
+       {0x0000a540, 0x4a025e6c, 0x4a025e6c, 0x3e001660, 0x3e001660},
+       {0x0000a544, 0x50025e8e, 0x50025e8e, 0x41001861, 0x41001861},
+       {0x0000a548, 0x56025eb2, 0x56025eb2, 0x45001a81, 0x45001a81},
+       {0x0000a54c, 0x5c025eb5, 0x5c025eb5, 0x49001a83, 0x49001a83},
+       {0x0000a550, 0x62025ef6, 0x62025ef6, 0x4c001c84, 0x4c001c84},
+       {0x0000a554, 0x65025f56, 0x65025f56, 0x4f001ce3, 0x4f001ce3},
+       {0x0000a558, 0x69027f56, 0x69027f56, 0x53001ce5, 0x53001ce5},
+       {0x0000a55c, 0x6d029f56, 0x6d029f56, 0x57001ce9, 0x57001ce9},
+       {0x0000a560, 0x73049f56, 0x73049f56, 0x5b001ceb, 0x5b001ceb},
+       {0x0000a564, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
+       {0x0000a568, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
+       {0x0000a56c, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
+       {0x0000a570, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
+       {0x0000a574, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
+       {0x0000a578, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
+       {0x0000a57c, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
+       {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a60c, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+       {0x0000a610, 0x00804201, 0x00804201, 0x00000000, 0x00000000},
+       {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+       {0x0000a618, 0x00804201, 0x00804201, 0x01404501, 0x01404501},
+       {0x0000a61c, 0x02008201, 0x02008201, 0x02008501, 0x02008501},
+       {0x0000a620, 0x02c10a03, 0x02c10a03, 0x0280ca03, 0x0280ca03},
+       {0x0000a624, 0x04815205, 0x04815205, 0x02c10b04, 0x02c10b04},
+       {0x0000a628, 0x0581d406, 0x0581d406, 0x03814b04, 0x03814b04},
+       {0x0000a62c, 0x0581d607, 0x0581d607, 0x05018e05, 0x05018e05},
+       {0x0000a630, 0x0581d607, 0x0581d607, 0x05019406, 0x05019406},
+       {0x0000a634, 0x0581d607, 0x0581d607, 0x05019406, 0x05019406},
+       {0x0000a638, 0x0581d607, 0x0581d607, 0x05019406, 0x05019406},
+       {0x0000a63c, 0x0581d607, 0x0581d607, 0x05019406, 0x05019406},
+       {0x00016044, 0x056d82e4, 0x056d82e4, 0x056d82e4, 0x056d82e4},
+       {0x00016048, 0x8db49060, 0x8db49060, 0x8db49060, 0x8db49060},
+       {0x00016054, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
+};
+
+static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x0000a2dc, 0xfc0a9380, 0xfc0a9380, 0xfdab5b52, 0xfdab5b52},
+       {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
+       {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
+       {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
+       {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+       {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+       {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
+       {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
+       {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
+       {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
+       {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
+       {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
+       {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
+       {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
+       {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
+       {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
+       {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
+       {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
+       {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
+       {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
+       {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
+       {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
+       {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
+       {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
+       {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
+       {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
+       {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
+       {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
+       {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
+       {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
+       {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+       {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+       {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+       {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+       {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+       {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+       {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+       {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a614, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a618, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a61c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a620, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a624, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a628, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a62c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a630, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a634, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a638, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a63c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x00016044, 0x056d82e6, 0x056d82e6, 0x056d82e6, 0x056d82e6},
+       {0x00016048, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+#endif /* INITVALS_9565_1P0_H */
index b09285c36c4aaaeaa27ffb1f3be1adadb263dddc..dfe6a4707fd22684a5994de3b6c4d9dcadd456f2 100644 (file)
@@ -173,6 +173,8 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
 
 #define ATH_AN_2_TID(_an, _tidno)  (&(_an)->tid[(_tidno)])
 
+#define IS_CCK_RATE(rate) ((rate >= 0x18) && (rate <= 0x1e))
+
 #define ATH_TX_COMPLETE_POLL_INT       1000
 
 enum ATH_AGGR_STATUS {
@@ -280,6 +282,7 @@ struct ath_tx_control {
        struct ath_txq *txq;
        struct ath_node *an;
        u8 paprd;
+       struct ieee80211_sta *sta;
 };
 
 #define ATH_TX_ERROR        0x01
@@ -422,7 +425,6 @@ void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
 void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
 void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif);
 void ath9k_set_beacon(struct ath_softc *sc);
-void ath9k_set_beaconing_status(struct ath_softc *sc, bool status);
 
 /*******************/
 /* Link Monitoring */
@@ -472,7 +474,7 @@ struct ath_btcoex {
        unsigned long op_flags;
        int bt_stomp_type; /* Types of BT stomping */
        u32 btcoex_no_stomp; /* in usec */
-       u32 btcoex_period; /* in usec */
+       u32 btcoex_period; /* in msec */
        u32 btscan_no_stomp; /* in usec */
        u32 duty_cycle;
        u32 bt_wait_time;
@@ -537,6 +539,7 @@ struct ath9k_wow_pattern {
 #ifdef CONFIG_MAC80211_LEDS
 void ath_init_leds(struct ath_softc *sc);
 void ath_deinit_leds(struct ath_softc *sc);
+void ath_fill_led_pin(struct ath_softc *sc);
 #else
 static inline void ath_init_leds(struct ath_softc *sc)
 {
@@ -545,6 +548,9 @@ static inline void ath_init_leds(struct ath_softc *sc)
 static inline void ath_deinit_leds(struct ath_softc *sc)
 {
 }
+static inline void ath_fill_led_pin(struct ath_softc *sc)
+{
+}
 #endif
 
 /*******************************/
@@ -596,8 +602,6 @@ struct ath_ant_comb {
        int main_conf;
        enum ath9k_ant_div_comb_lna_conf first_quick_scan_conf;
        enum ath9k_ant_div_comb_lna_conf second_quick_scan_conf;
-       int first_bias;
-       int second_bias;
        bool first_ratio;
        bool second_ratio;
        unsigned long scan_start_time;
index acd437384fe47840852aeb3f967524ac52265975..419e9a3f2feda6c20fc7a504120f26b0820bb9ca 100644 (file)
@@ -43,8 +43,8 @@ static const u32 ar9003_wlan_weights[ATH_BTCOEX_STOMP_MAX]
        { 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* STOMP_NONE */
 };
 
-static const u32 ar9462_wlan_weights[ATH_BTCOEX_STOMP_MAX]
-                                   [AR9300_NUM_WLAN_WEIGHTS] = {
+static const u32 mci_wlan_weights[ATH_BTCOEX_STOMP_MAX]
+                                [AR9300_NUM_WLAN_WEIGHTS] = {
        { 0x01017d01, 0x41414101, 0x41414101, 0x41414141 }, /* STOMP_ALL */
        { 0x01017d01, 0x3b3b3b01, 0x3b3b3b01, 0x3b3b3b3b }, /* STOMP_LOW */
        { 0x01017d01, 0x01010101, 0x01010101, 0x01010101 }, /* STOMP_NONE */
@@ -208,14 +208,37 @@ static void ath9k_hw_btcoex_enable_2wire(struct ath_hw *ah)
                            AR_GPIO_OUTPUT_MUX_AS_TX_FRAME);
 }
 
+/*
+ * For AR9002, bt_weight/wlan_weight are used.
+ * For AR9003 and above, stomp_type is used.
+ */
 void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
                                u32 bt_weight,
-                               u32 wlan_weight)
+                               u32 wlan_weight,
+                               enum ath_stomp_type stomp_type)
 {
        struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
 
-       btcoex_hw->bt_coex_weights = SM(bt_weight, AR_BTCOEX_BT_WGHT) |
-                                    SM(wlan_weight, AR_BTCOEX_WL_WGHT);
+       if (AR_SREV_9300_20_OR_LATER(ah)) {
+               const u32 *weight = ar9003_wlan_weights[stomp_type];
+               int i;
+
+               if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+                       if ((stomp_type == ATH_BTCOEX_STOMP_LOW) &&
+                           btcoex_hw->mci.stomp_ftp)
+                               stomp_type = ATH_BTCOEX_STOMP_LOW_FTP;
+                       weight = mci_wlan_weights[stomp_type];
+               }
+
+               for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
+                       btcoex_hw->bt_weight[i] = AR9300_BT_WGHT;
+                       btcoex_hw->wlan_weight[i] = weight[i];
+               }
+       } else {
+               btcoex_hw->bt_coex_weights =
+                       SM(bt_weight, AR_BTCOEX_BT_WGHT) |
+                       SM(wlan_weight, AR_BTCOEX_WL_WGHT);
+       }
 }
 EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight);
 
@@ -282,7 +305,7 @@ void ath9k_hw_btcoex_enable(struct ath_hw *ah)
                ath9k_hw_btcoex_enable_2wire(ah);
                break;
        case ATH_BTCOEX_CFG_3WIRE:
-               if (AR_SREV_9462(ah)) {
+               if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
                        ath9k_hw_btcoex_enable_mci(ah);
                        return;
                }
@@ -304,7 +327,7 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah)
        int i;
 
        btcoex_hw->enabled = false;
-       if (AR_SREV_9462(ah)) {
+       if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
                ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
                for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
                        REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
@@ -332,26 +355,6 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah)
 }
 EXPORT_SYMBOL(ath9k_hw_btcoex_disable);
 
-static void ar9003_btcoex_bt_stomp(struct ath_hw *ah,
-                        enum ath_stomp_type stomp_type)
-{
-       struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
-       const u32 *weight = ar9003_wlan_weights[stomp_type];
-       int i;
-
-       if (AR_SREV_9462(ah)) {
-               if ((stomp_type == ATH_BTCOEX_STOMP_LOW) &&
-                   btcoex->mci.stomp_ftp)
-                       stomp_type = ATH_BTCOEX_STOMP_LOW_FTP;
-               weight = ar9462_wlan_weights[stomp_type];
-       }
-
-       for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
-               btcoex->bt_weight[i] = AR9300_BT_WGHT;
-               btcoex->wlan_weight[i] = weight[i];
-       }
-}
-
 /*
  * Configures appropriate weight based on stomp type.
  */
@@ -359,22 +362,22 @@ void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
                              enum ath_stomp_type stomp_type)
 {
        if (AR_SREV_9300_20_OR_LATER(ah)) {
-               ar9003_btcoex_bt_stomp(ah, stomp_type);
+               ath9k_hw_btcoex_set_weight(ah, 0, 0, stomp_type);
                return;
        }
 
        switch (stomp_type) {
        case ATH_BTCOEX_STOMP_ALL:
                ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
-                               AR_STOMP_ALL_WLAN_WGHT);
+                                          AR_STOMP_ALL_WLAN_WGHT, 0);
                break;
        case ATH_BTCOEX_STOMP_LOW:
                ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
-                               AR_STOMP_LOW_WLAN_WGHT);
+                                          AR_STOMP_LOW_WLAN_WGHT, 0);
                break;
        case ATH_BTCOEX_STOMP_NONE:
                ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
-                               AR_STOMP_NONE_WLAN_WGHT);
+                                          AR_STOMP_NONE_WLAN_WGHT, 0);
                break;
        default:
                ath_dbg(ath9k_hw_common(ah), BTCOEX, "Invalid Stomptype\n");
index 20092f98658f84b3f0ad2429543f168de2d2866b..385197ad79b006f494c3659dc88efb343038c184 100644 (file)
@@ -107,7 +107,8 @@ void ath9k_hw_btcoex_init_mci(struct ath_hw *ah);
 void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum);
 void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
                                u32 bt_weight,
-                               u32 wlan_weight);
+                               u32 wlan_weight,
+                               enum ath_stomp_type stomp_type);
 void ath9k_hw_btcoex_disable(struct ath_hw *ah);
 void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
                              enum ath_stomp_type stomp_type);
index c8ef30127adb53da590bf4d6350798fa261dffa6..6727b566d294a43073c2a4cf3cc532f0e633b34e 100644 (file)
@@ -222,6 +222,57 @@ static const struct file_operations fops_disable_ani = {
        .llseek = default_llseek,
 };
 
+static ssize_t read_file_ant_diversity(struct file *file, char __user *user_buf,
+                                      size_t count, loff_t *ppos)
+{
+       struct ath_softc *sc = file->private_data;
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       char buf[32];
+       unsigned int len;
+
+       len = sprintf(buf, "%d\n", common->antenna_diversity);
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_ant_diversity(struct file *file,
+                                       const char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct ath_softc *sc = file->private_data;
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       unsigned long antenna_diversity;
+       char buf[32];
+       ssize_t len;
+
+       len = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, len))
+               return -EFAULT;
+
+       if (!AR_SREV_9565(sc->sc_ah))
+               goto exit;
+
+       buf[len] = '\0';
+       if (strict_strtoul(buf, 0, &antenna_diversity))
+               return -EINVAL;
+
+       common->antenna_diversity = !!antenna_diversity;
+       ath9k_ps_wakeup(sc);
+       ath_ant_comb_update(sc);
+       ath_dbg(common, CONFIG, "Antenna diversity: %d\n",
+               common->antenna_diversity);
+       ath9k_ps_restore(sc);
+exit:
+       return count;
+}
+
+static const struct file_operations fops_ant_diversity = {
+       .read = read_file_ant_diversity,
+       .write = write_file_ant_diversity,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
 static ssize_t read_file_dma(struct file *file, char __user *user_buf,
                             size_t count, loff_t *ppos)
 {
@@ -373,6 +424,8 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
                sc->debug.stats.istats.tsfoor++;
        if (status & ATH9K_INT_MCI)
                sc->debug.stats.istats.mci++;
+       if (status & ATH9K_INT_GENTIMER)
+               sc->debug.stats.istats.gen_timer++;
 }
 
 static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
@@ -418,6 +471,7 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
        PR_IS("DTIM", dtim);
        PR_IS("TSFOOR", tsfoor);
        PR_IS("MCI", mci);
+       PR_IS("GENTIMER", gen_timer);
        PR_IS("TOTAL", total);
 
        len += snprintf(buf + len, mxlen - len,
@@ -1598,12 +1652,12 @@ int ath9k_init_debug(struct ath_hw *ah)
        debugfs_create_file("samples", S_IRUSR, sc->debug.debugfs_phy, sc,
                            &fops_samps);
 #endif
-
        debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR,
                           sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
-
        debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR,
                           sc->debug.debugfs_phy, &sc->sc_ah->gpio_val);
+       debugfs_create_file("diversity", S_IRUSR | S_IWUSR,
+                           sc->debug.debugfs_phy, sc, &fops_ant_diversity);
 
        return 0;
 }
index 8b9d080d89da7ae3b276589bd8398fc39d211bb8..2ed9785a38fa0467a8ab8be32d30c912a5194a87 100644 (file)
@@ -41,7 +41,6 @@ enum ath_reset_type {
        RESET_TYPE_PLL_HANG,
        RESET_TYPE_MAC_HANG,
        RESET_TYPE_BEACON_STUCK,
-       RESET_TYPE_MCI,
        __RESET_TYPE_MAX
 };
 
@@ -74,6 +73,8 @@ enum ath_reset_type {
  * from a beacon differs from the PCU's internal TSF by more than a
  * (programmable) threshold
  * @local_timeout: Internal bus timeout.
+ * @mci: MCI interrupt, specific to MCI based BTCOEX chipsets
+ * @gen_timer: Generic hardware timer interrupt
  */
 struct ath_interrupt_stats {
        u32 total;
@@ -100,6 +101,7 @@ struct ath_interrupt_stats {
        u32 bb_watchdog;
        u32 tsfoor;
        u32 mci;
+       u32 gen_timer;
 
        /* Sync-cause stats */
        u32 sync_cause_all;
index 484b313059061ac3b6fdc74f18787ef059e58933..319c651fa6c5298d66d6d4a44969b569aafcf5aa 100644 (file)
@@ -96,6 +96,7 @@
 
 #define ATH9K_POW_SM(_r, _s)   (((_r) & 0x3f) << (_s))
 #define FREQ2FBIN(x, y)                ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
+#define FBIN2FREQ(x, y)                ((y) ? (2300 + x) : (4800 + 5 * x))
 #define ath9k_hw_use_flash(_ah)        (!(_ah->ah_flags & AH_USE_EEPROM))
 
 #define AR5416_VER_MASK (eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK)
 #define EEP_RFSILENT_ENABLED_S      0
 #define EEP_RFSILENT_POLARITY       0x0002
 #define EEP_RFSILENT_POLARITY_S     1
-#define EEP_RFSILENT_GPIO_SEL       (AR_SREV_9462(ah) ? 0x00fc : 0x001c)
+#define EEP_RFSILENT_GPIO_SEL       ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x00fc : 0x001c)
 #define EEP_RFSILENT_GPIO_SEL_S     2
 
 #define AR5416_OPFLAGS_11A           0x01
index 9f83f71742a5ecb774f95c3d563f2e0dc7d37ab7..d9ed141a053e6a885fcdf031866f9daa1e5918d2 100644 (file)
@@ -44,25 +44,6 @@ void ath_init_leds(struct ath_softc *sc)
        if (AR_SREV_9100(sc->sc_ah))
                return;
 
-       if (sc->sc_ah->led_pin < 0) {
-               if (AR_SREV_9287(sc->sc_ah))
-                       sc->sc_ah->led_pin = ATH_LED_PIN_9287;
-               else if (AR_SREV_9485(sc->sc_ah))
-                       sc->sc_ah->led_pin = ATH_LED_PIN_9485;
-               else if (AR_SREV_9300(sc->sc_ah))
-                       sc->sc_ah->led_pin = ATH_LED_PIN_9300;
-               else if (AR_SREV_9462(sc->sc_ah))
-                       sc->sc_ah->led_pin = ATH_LED_PIN_9462;
-               else
-                       sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
-       }
-
-       /* Configure gpio 1 for output */
-       ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
-                           AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
-       /* LED off, active low */
-       ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
-
        if (!led_blink)
                sc->led_cdev.default_trigger =
                        ieee80211_get_radio_led_name(sc->hw);
@@ -78,6 +59,31 @@ void ath_init_leds(struct ath_softc *sc)
 
        sc->led_registered = true;
 }
+
+void ath_fill_led_pin(struct ath_softc *sc)
+{
+       struct ath_hw *ah = sc->sc_ah;
+
+       if (AR_SREV_9100(ah) || (ah->led_pin >= 0))
+               return;
+
+       if (AR_SREV_9287(ah))
+               ah->led_pin = ATH_LED_PIN_9287;
+       else if (AR_SREV_9485(sc->sc_ah))
+               ah->led_pin = ATH_LED_PIN_9485;
+       else if (AR_SREV_9300(sc->sc_ah))
+               ah->led_pin = ATH_LED_PIN_9300;
+       else if (AR_SREV_9462(sc->sc_ah) || AR_SREV_9565(sc->sc_ah))
+               ah->led_pin = ATH_LED_PIN_9462;
+       else
+               ah->led_pin = ATH_LED_PIN_DEF;
+
+       /* Configure gpio 1 for output */
+       ath9k_hw_cfg_output(ah, ah->led_pin, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+
+       /* LED off, active low */
+       ath9k_hw_set_gpio(ah, ah->led_pin, 1);
+}
 #endif
 
 /*******************/
@@ -228,7 +234,12 @@ static void ath_btcoex_period_timer(unsigned long data)
        ath9k_hw_btcoex_enable(ah);
        spin_unlock_bh(&btcoex->btcoex_lock);
 
-       if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
+       /*
+        * btcoex_period is in msec while (btocex/btscan_)no_stomp are in usec,
+        * ensure that we properly convert btcoex_period to usec
+        * for any comparision with (btcoex/btscan_)no_stomp.
+        */
+       if (btcoex->btcoex_period * 1000 != btcoex->btcoex_no_stomp) {
                if (btcoex->hw_timer_enabled)
                        ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
 
@@ -309,8 +320,10 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
        ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex timers\n");
 
        /* make sure duty cycle timer is also stopped when resuming */
-       if (btcoex->hw_timer_enabled)
+       if (btcoex->hw_timer_enabled) {
                ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
+               btcoex->hw_timer_enabled = false;
+       }
 
        btcoex->bt_priority_cnt = 0;
        btcoex->bt_priority_time = jiffies;
@@ -331,18 +344,20 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc)
 
        del_timer_sync(&btcoex->period_timer);
 
-       if (btcoex->hw_timer_enabled)
+       if (btcoex->hw_timer_enabled) {
                ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
-
-       btcoex->hw_timer_enabled = false;
+               btcoex->hw_timer_enabled = false;
+       }
 }
 
 void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc)
 {
        struct ath_btcoex *btcoex = &sc->btcoex;
 
-       if (btcoex->hw_timer_enabled)
+       if (btcoex->hw_timer_enabled) {
                ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
+               btcoex->hw_timer_enabled = false;
+       }
 }
 
 u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen)
@@ -380,7 +395,10 @@ void ath9k_start_btcoex(struct ath_softc *sc)
            !ah->btcoex_hw.enabled) {
                if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
                        ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
-                                                  AR_STOMP_LOW_WLAN_WGHT);
+                                                  AR_STOMP_LOW_WLAN_WGHT, 0);
+               else
+                       ath9k_hw_btcoex_set_weight(ah, 0, 0,
+                                                  ATH_BTCOEX_STOMP_NONE);
                ath9k_hw_btcoex_enable(ah);
 
                if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
@@ -397,7 +415,7 @@ void ath9k_stop_btcoex(struct ath_softc *sc)
                if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
                        ath9k_btcoex_timer_pause(sc);
                ath9k_hw_btcoex_disable(ah);
-               if (AR_SREV_9462(ah))
+               if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
                        ath_mci_flush_profile(&sc->btcoex.mci);
        }
 }
index aa327adcc3d8ffdf09261884580914ab41556ade..924c4616c3d990dc7f03e46ed299bdc94396d765 100644 (file)
@@ -973,8 +973,8 @@ static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
 static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
 {
        int transfer, err;
-       const void *data = hif_dev->firmware->data;
-       size_t len = hif_dev->firmware->size;
+       const void *data = hif_dev->fw_data;
+       size_t len = hif_dev->fw_size;
        u32 addr = AR9271_FIRMWARE;
        u8 *buf = kzalloc(4096, GFP_KERNEL);
        u32 firm_offset;
@@ -1017,7 +1017,7 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
                return -EIO;
 
        dev_info(&hif_dev->udev->dev, "ath9k_htc: Transferred FW: %s, size: %ld\n",
-                hif_dev->fw_name, (unsigned long) hif_dev->firmware->size);
+                hif_dev->fw_name, (unsigned long) hif_dev->fw_size);
 
        return 0;
 }
@@ -1072,14 +1072,15 @@ static void ath9k_hif_usb_dev_deinit(struct hif_device_usb *hif_dev)
  */
 static void ath9k_hif_usb_firmware_fail(struct hif_device_usb *hif_dev)
 {
-       struct device *parent = hif_dev->udev->dev.parent;
+       struct device *dev = &hif_dev->udev->dev;
+       struct device *parent = dev->parent;
 
        complete(&hif_dev->fw_done);
 
        if (parent)
                device_lock(parent);
 
-       device_release_driver(&hif_dev->udev->dev);
+       device_release_driver(dev);
 
        if (parent)
                device_unlock(parent);
@@ -1099,11 +1100,11 @@ static void ath9k_hif_usb_firmware_cb(const struct firmware *fw, void *context)
 
        hif_dev->htc_handle = ath9k_htc_hw_alloc(hif_dev, &hif_usb,
                                                 &hif_dev->udev->dev);
-       if (hif_dev->htc_handle == NULL) {
-               goto err_fw;
-       }
+       if (hif_dev->htc_handle == NULL)
+               goto err_dev_alloc;
 
-       hif_dev->firmware = fw;
+       hif_dev->fw_data = fw->data;
+       hif_dev->fw_size = fw->size;
 
        /* Proceed with initialization */
 
@@ -1121,6 +1122,8 @@ static void ath9k_hif_usb_firmware_cb(const struct firmware *fw, void *context)
                goto err_htc_hw_init;
        }
 
+       release_firmware(fw);
+       hif_dev->flags |= HIF_USB_READY;
        complete(&hif_dev->fw_done);
 
        return;
@@ -1129,8 +1132,8 @@ err_htc_hw_init:
        ath9k_hif_usb_dev_deinit(hif_dev);
 err_dev_init:
        ath9k_htc_hw_free(hif_dev->htc_handle);
+err_dev_alloc:
        release_firmware(fw);
-       hif_dev->firmware = NULL;
 err_fw:
        ath9k_hif_usb_firmware_fail(hif_dev);
 }
@@ -1277,11 +1280,10 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
 
        wait_for_completion(&hif_dev->fw_done);
 
-       if (hif_dev->firmware) {
+       if (hif_dev->flags & HIF_USB_READY) {
                ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
                ath9k_htc_hw_free(hif_dev->htc_handle);
                ath9k_hif_usb_dev_deinit(hif_dev);
-               release_firmware(hif_dev->firmware);
        }
 
        usb_set_intfdata(interface, NULL);
@@ -1317,13 +1319,23 @@ static int ath9k_hif_usb_resume(struct usb_interface *interface)
        struct hif_device_usb *hif_dev = usb_get_intfdata(interface);
        struct htc_target *htc_handle = hif_dev->htc_handle;
        int ret;
+       const struct firmware *fw;
 
        ret = ath9k_hif_usb_alloc_urbs(hif_dev);
        if (ret)
                return ret;
 
-       if (hif_dev->firmware) {
+       if (hif_dev->flags & HIF_USB_READY) {
+               /* request cached firmware during suspend/resume cycle */
+               ret = request_firmware(&fw, hif_dev->fw_name,
+                                      &hif_dev->udev->dev);
+               if (ret)
+                       goto fail_resume;
+
+               hif_dev->fw_data = fw->data;
+               hif_dev->fw_size = fw->size;
                ret = ath9k_hif_usb_download_fw(hif_dev);
+               release_firmware(fw);
                if (ret)
                        goto fail_resume;
        } else {
index 487ff658b4c1890f597045c739d919ff6dd16689..51496e74b83eaf3521230421f3a07350c385e3d4 100644 (file)
@@ -85,12 +85,14 @@ struct cmd_buf {
 };
 
 #define HIF_USB_START BIT(0)
+#define HIF_USB_READY BIT(1)
 
 struct hif_device_usb {
        struct usb_device *udev;
        struct usb_interface *interface;
        const struct usb_device_id *usb_device_id;
-       const struct firmware *firmware;
+       const void *fw_data;
+       size_t fw_size;
        struct completion fw_done;
        struct htc_target *htc_handle;
        struct hif_usb_tx tx;
index 936e920fb88e7cc0dfc40766f27b0e7ab1fa7ab3..b30596fcf73a57ed5e63d81d2b465b7e4e4b7f48 100644 (file)
@@ -542,6 +542,7 @@ void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv);
 
 int ath9k_tx_init(struct ath9k_htc_priv *priv);
 int ath9k_htc_tx_start(struct ath9k_htc_priv *priv,
+                      struct ieee80211_sta *sta,
                       struct sk_buff *skb, u8 slot, bool is_cab);
 void ath9k_tx_cleanup(struct ath9k_htc_priv *priv);
 bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int subtype);
index 77d541feb9102a9af2e8aabfea180da57fc5d317..f42d2eb6af99302f449ef44beac8f0120238e73d 100644 (file)
@@ -326,7 +326,7 @@ static void ath9k_htc_send_buffered(struct ath9k_htc_priv *priv,
                        goto next;
                }
 
-               ret = ath9k_htc_tx_start(priv, skb, tx_slot, true);
+               ret = ath9k_htc_tx_start(priv, NULL, skb, tx_slot, true);
                if (ret != 0) {
                        ath9k_htc_tx_clear_slot(priv, tx_slot);
                        dev_kfree_skb_any(skb);
index 07df279c8d467a0ee33bf4c44015a6b2d3f69dbf..0eacfc13c9155feb4af8cb7c4d1e4b1918c0cd39 100644 (file)
@@ -161,7 +161,7 @@ void ath9k_htc_start_btcoex(struct ath9k_htc_priv *priv)
 
        if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) {
                ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
-                                          AR_STOMP_LOW_WLAN_WGHT);
+                                          AR_STOMP_LOW_WLAN_WGHT, 0);
                ath9k_hw_btcoex_enable(ah);
                ath_htc_resume_btcoex_work(priv);
        }
@@ -173,17 +173,26 @@ void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv)
 
        if (ah->btcoex_hw.enabled &&
            ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
-               ath9k_hw_btcoex_disable(ah);
                if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
                        ath_htc_cancel_btcoex_work(priv);
+               ath9k_hw_btcoex_disable(ah);
        }
 }
 
 void ath9k_htc_init_btcoex(struct ath9k_htc_priv *priv, char *product)
 {
        struct ath_hw *ah = priv->ah;
+       struct ath_common *common = ath9k_hw_common(ah);
        int qnum;
 
+       /*
+        * Check if BTCOEX is globally disabled.
+        */
+       if (!common->btcoex_enabled) {
+               ah->btcoex_hw.scheme = ATH_BTCOEX_CFG_NONE;
+               return;
+       }
+
        if (product && strncmp(product, ATH_HTC_BTCOEX_PRODUCT_ID, 5) == 0) {
                ah->btcoex_hw.scheme = ATH_BTCOEX_CFG_3WIRE;
        }
index a035a380d669b6dcd723e9e3d8aabef70feba79b..d98255eb1b9aa4f1809df6d7c9b1b2d6eb2b141b 100644 (file)
@@ -30,6 +30,10 @@ int htc_modparam_nohwcrypt;
 module_param_named(nohwcrypt, htc_modparam_nohwcrypt, int, 0444);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
 
+static int ath9k_htc_btcoex_enable;
+module_param_named(btcoex_enable, ath9k_htc_btcoex_enable, int, 0444);
+MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
+
 #define CHAN2G(_freq, _idx)  { \
        .center_freq = (_freq), \
        .hw_value = (_idx), \
@@ -635,6 +639,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
        common->hw = priv->hw;
        common->priv = priv;
        common->debug_mask = ath9k_debug;
+       common->btcoex_enabled = ath9k_htc_btcoex_enable == 1;
 
        spin_lock_init(&priv->beacon_lock);
        spin_lock_init(&priv->tx.tx_lock);
index c785129692ff028db0954cf01fe2296c0a4e46c8..ca78e33ca23ec1393dd6fa9b109a7443cf865cf8 100644 (file)
@@ -489,24 +489,20 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
                ista = (struct ath9k_htc_sta *) sta->drv_priv;
                memcpy(&tsta.macaddr, sta->addr, ETH_ALEN);
                memcpy(&tsta.bssid, common->curbssid, ETH_ALEN);
-               tsta.is_vif_sta = 0;
                ista->index = sta_idx;
+               tsta.is_vif_sta = 0;
+               maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+                                sta->ht_cap.ampdu_factor);
+               tsta.maxampdu = cpu_to_be16(maxampdu);
        } else {
                memcpy(&tsta.macaddr, vif->addr, ETH_ALEN);
                tsta.is_vif_sta = 1;
+               tsta.maxampdu = cpu_to_be16(0xffff);
        }
 
        tsta.sta_index = sta_idx;
        tsta.vif_index = avp->index;
 
-       if (!sta) {
-               tsta.maxampdu = cpu_to_be16(0xffff);
-       } else {
-               maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
-                                sta->ht_cap.ampdu_factor);
-               tsta.maxampdu = cpu_to_be16(maxampdu);
-       }
-
        WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta);
        if (ret) {
                if (sta)
@@ -856,7 +852,9 @@ set_timer:
 /* mac80211 Callbacks */
 /**********************/
 
-static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void ath9k_htc_tx(struct ieee80211_hw *hw,
+                        struct ieee80211_tx_control *control,
+                        struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr;
        struct ath9k_htc_priv *priv = hw->priv;
@@ -883,7 +881,7 @@ static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
                goto fail_tx;
        }
 
-       ret = ath9k_htc_tx_start(priv, skb, slot, false);
+       ret = ath9k_htc_tx_start(priv, control->sta, skb, slot, false);
        if (ret != 0) {
                ath_dbg(common, XMIT, "Tx failed\n");
                goto clear_slot;
@@ -1331,6 +1329,34 @@ static int ath9k_htc_sta_remove(struct ieee80211_hw *hw,
        return ret;
 }
 
+static void ath9k_htc_sta_rc_update(struct ieee80211_hw *hw,
+                                   struct ieee80211_vif *vif,
+                                   struct ieee80211_sta *sta, u32 changed)
+{
+       struct ath9k_htc_priv *priv = hw->priv;
+       struct ath_common *common = ath9k_hw_common(priv->ah);
+       struct ath9k_htc_target_rate trate;
+
+       mutex_lock(&priv->mutex);
+       ath9k_htc_ps_wakeup(priv);
+
+       if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+               memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
+               ath9k_htc_setup_rate(priv, sta, &trate);
+               if (!ath9k_htc_send_rate_cmd(priv, &trate))
+                       ath_dbg(common, CONFIG,
+                               "Supported rates for sta: %pM updated, rate caps: 0x%X\n",
+                               sta->addr, be32_to_cpu(trate.capflags));
+               else
+                       ath_dbg(common, CONFIG,
+                               "Unable to update supported rates for sta: %pM\n",
+                               sta->addr);
+       }
+
+       ath9k_htc_ps_restore(priv);
+       mutex_unlock(&priv->mutex);
+}
+
 static int ath9k_htc_conf_tx(struct ieee80211_hw *hw,
                             struct ieee80211_vif *vif, u16 queue,
                             const struct ieee80211_tx_queue_params *params)
@@ -1419,7 +1445,7 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw,
                                key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
                        if (priv->ah->sw_mgmt_crypto &&
                            key->cipher == WLAN_CIPHER_SUITE_CCMP)
-                               key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
+                               key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
                        ret = 0;
                }
                break;
@@ -1758,6 +1784,7 @@ struct ieee80211_ops ath9k_htc_ops = {
        .sta_add            = ath9k_htc_sta_add,
        .sta_remove         = ath9k_htc_sta_remove,
        .conf_tx            = ath9k_htc_conf_tx,
+       .sta_rc_update      = ath9k_htc_sta_rc_update,
        .bss_info_changed   = ath9k_htc_bss_info_changed,
        .set_key            = ath9k_htc_set_key,
        .get_tsf            = ath9k_htc_get_tsf,
index 47e61d0da33bf1043b02dcc5e1422aa3c83e37cc..06cdcb772d786038b7f1e5219349a6a682b216c9 100644 (file)
@@ -333,12 +333,12 @@ static void ath9k_htc_tx_data(struct ath9k_htc_priv *priv,
 }
 
 int ath9k_htc_tx_start(struct ath9k_htc_priv *priv,
+                      struct ieee80211_sta *sta,
                       struct sk_buff *skb,
                       u8 slot, bool is_cab)
 {
        struct ieee80211_hdr *hdr;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_sta *sta = tx_info->control.sta;
        struct ieee80211_vif *vif = tx_info->control.vif;
        struct ath9k_htc_sta *ista;
        struct ath9k_htc_vif *avp = NULL;
index 265bf77598a268c60a4392165db2a12fde95324c..0f2b97f6b7390e32a920e0d645449e820d9d195a 100644 (file)
@@ -78,6 +78,13 @@ static inline void ath9k_hw_antdiv_comb_conf_set(struct ath_hw *ah,
        ath9k_hw_ops(ah)->antdiv_comb_conf_set(ah, antconf);
 }
 
+static inline void ath9k_hw_antctrl_shared_chain_lnadiv(struct ath_hw *ah,
+                                                       bool enable)
+{
+       if (ath9k_hw_ops(ah)->antctrl_shared_chain_lnadiv)
+               ath9k_hw_ops(ah)->antctrl_shared_chain_lnadiv(ah, enable);
+}
+
 /* Private hardware call ops */
 
 /* PHY ops */
index 4faf0a3958765bd07ae683fdd6dceb879a5adbfd..f9a6ec5cf4704818a6da783338ada8e3e737c759 100644 (file)
@@ -24,6 +24,7 @@
 #include "rc.h"
 #include "ar9003_mac.h"
 #include "ar9003_mci.h"
+#include "ar9003_phy.h"
 #include "debug.h"
 #include "ath9k.h"
 
@@ -355,7 +356,7 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
                        (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S;
                ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
 
-               if (AR_SREV_9462(ah))
+               if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
                        ah->is_pciexpress = true;
                else
                        ah->is_pciexpress = (val &
@@ -602,6 +603,11 @@ static int __ath9k_hw_init(struct ath_hw *ah)
        if (AR_SREV_9462(ah))
                ah->WARegVal &= ~AR_WA_D3_L1_DISABLE;
 
+       if (AR_SREV_9565(ah)) {
+               ah->WARegVal |= AR_WA_BIT22;
+               REG_WRITE(ah, AR_WA, ah->WARegVal);
+       }
+
        ath9k_hw_init_defaults(ah);
        ath9k_hw_init_config(ah);
 
@@ -647,6 +653,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
        case AR_SREV_VERSION_9340:
        case AR_SREV_VERSION_9462:
        case AR_SREV_VERSION_9550:
+       case AR_SREV_VERSION_9565:
                break;
        default:
                ath_err(common,
@@ -708,7 +715,7 @@ int ath9k_hw_init(struct ath_hw *ah)
        int ret;
        struct ath_common *common = ath9k_hw_common(ah);
 
-       /* These are all the AR5008/AR9001/AR9002 hardware family of chipsets */
+       /* These are all the AR5008/AR9001/AR9002/AR9003 hardware family of chipsets */
        switch (ah->hw_version.devid) {
        case AR5416_DEVID_PCI:
        case AR5416_DEVID_PCIE:
@@ -728,6 +735,7 @@ int ath9k_hw_init(struct ath_hw *ah)
        case AR9300_DEVID_AR9580:
        case AR9300_DEVID_AR9462:
        case AR9485_DEVID_AR1111:
+       case AR9300_DEVID_AR9565:
                break;
        default:
                if (common->bus_ops->ath_bus_type == ATH_USB)
@@ -800,8 +808,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
 {
        u32 pll;
 
-       if (AR_SREV_9485(ah)) {
-
+       if (AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
                /* program BB PLL ki and kd value, ki=0x4, kd=0x40 */
                REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
                              AR_CH0_BB_DPLL2_PLL_PWD, 0x1);
@@ -912,7 +919,8 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
        }
 
        pll = ath9k_hw_compute_pll_control(ah, chan);
-
+       if (AR_SREV_9565(ah))
+               pll |= 0x40000;
        REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
 
        if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
@@ -1726,12 +1734,12 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
        if (!ret)
                goto fail;
 
-       ath9k_hw_loadnf(ah, ah->curchan);
-       ath9k_hw_start_nfcal(ah, true);
-
        if (ath9k_hw_mci_is_enabled(ah))
                ar9003_mci_2g5g_switch(ah, false);
 
+       ath9k_hw_loadnf(ah, ah->curchan);
+       ath9k_hw_start_nfcal(ah, true);
+
        if (AR_SREV_9271(ah))
                ar9002_hw_load_ani_reg(ah, chan);
 
@@ -2018,6 +2026,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
 
        ath9k_hw_apply_gpio_override(ah);
 
+       if (AR_SREV_9565(ah) && ah->shared_chain_lnadiv)
+               REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV, AR_BTCOEX_WL_LNADIV_FORCE_ON);
+
        return 0;
 }
 EXPORT_SYMBOL(ath9k_hw_reset);
@@ -2034,7 +2045,7 @@ static void ath9k_set_power_sleep(struct ath_hw *ah)
 {
        REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
 
-       if (AR_SREV_9462(ah)) {
+       if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
                REG_CLR_BIT(ah, AR_TIMER_MODE, 0xff);
                REG_CLR_BIT(ah, AR_NDP2_TIMER_MODE, 0xff);
                REG_CLR_BIT(ah, AR_SLP32_INC, 0xfffff);
@@ -2401,7 +2412,10 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
        if (eeval & AR5416_OPFLAGS_11G)
                pCap->hw_caps |= ATH9K_HW_CAP_2GHZ;
 
-       if (AR_SREV_9485(ah) || AR_SREV_9285(ah) || AR_SREV_9330(ah))
+       if (AR_SREV_9485(ah) ||
+           AR_SREV_9285(ah) ||
+           AR_SREV_9330(ah) ||
+           AR_SREV_9565(ah))
                chip_chainmask = 1;
        else if (AR_SREV_9462(ah))
                chip_chainmask = 3;
@@ -2489,7 +2503,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
 
        if (AR_SREV_9300_20_OR_LATER(ah)) {
                pCap->hw_caps |= ATH9K_HW_CAP_EDMA | ATH9K_HW_CAP_FASTCLOCK;
-               if (!AR_SREV_9330(ah) && !AR_SREV_9485(ah))
+               if (!AR_SREV_9330(ah) && !AR_SREV_9485(ah) && !AR_SREV_9565(ah))
                        pCap->hw_caps |= ATH9K_HW_CAP_LDPC;
 
                pCap->rx_hp_qdepth = ATH9K_HW_RX_HP_QDEPTH;
@@ -2525,7 +2539,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
        }
 
 
-       if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) {
+       if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
                ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
                /*
                 * enable the diversity-combining algorithm only when
@@ -2568,14 +2582,12 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
                        ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
        }
 
-       if (AR_SREV_9462(ah)) {
-
+       if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
                if (!(ah->ent_mode & AR_ENT_OTP_49GHZ_DISABLE))
                        pCap->hw_caps |= ATH9K_HW_CAP_MCI;
 
                if (AR_SREV_9462_20(ah))
                        pCap->hw_caps |= ATH9K_HW_CAP_RTT;
-
        }
 
 
@@ -2741,7 +2753,7 @@ void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
 
        ENABLE_REGWRITE_BUFFER(ah);
 
-       if (AR_SREV_9462(ah))
+       if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
                bits |= ATH9K_RX_FILTER_CONTROL_WRAPPER;
 
        REG_WRITE(ah, AR_RX_FILTER, bits);
@@ -3038,7 +3050,7 @@ void ath9k_hw_gen_timer_start(struct ath_hw *ah,
        REG_SET_BIT(ah, gen_tmr_configuration[timer->index].mode_addr,
                    gen_tmr_configuration[timer->index].mode_mask);
 
-       if (AR_SREV_9462(ah)) {
+       if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
                /*
                 * Starting from AR9462, each generic timer can select which tsf
                 * to use. But we still follow the old rule, 0 - 7 use tsf and
@@ -3072,6 +3084,16 @@ void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
        REG_CLR_BIT(ah, gen_tmr_configuration[timer->index].mode_addr,
                        gen_tmr_configuration[timer->index].mode_mask);
 
+       if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+               /*
+                * Need to switch back to TSF if it was using TSF2.
+                */
+               if ((timer->index >= AR_GEN_TIMER_BANK_1_LEN)) {
+                       REG_CLR_BIT(ah, AR_MAC_PCU_GEN_TIMER_TSF_SEL,
+                                   (1 << timer->index));
+               }
+       }
+
        /* Disable both trigger and thresh interrupt masks */
        REG_CLR_BIT(ah, AR_IMR_S5,
                (SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) |
@@ -3153,6 +3175,7 @@ static struct {
        { AR_SREV_VERSION_9485,         "9485" },
        { AR_SREV_VERSION_9462,         "9462" },
        { AR_SREV_VERSION_9550,         "9550" },
+       { AR_SREV_VERSION_9565,         "9565" },
 };
 
 /* For devices with external radios */
index de6968fc64f42920304fe52dd93a493c3ce679c6..566a4ce4f156e8200c29fa4e71da7376a7e1df5d 100644 (file)
@@ -50,6 +50,7 @@
 #define AR9300_DEVID_AR9330    0x0035
 #define AR9300_DEVID_QCA955X   0x0038
 #define AR9485_DEVID_AR1111    0x0037
+#define AR9300_DEVID_AR9565     0x0036
 
 #define AR5416_AR9100_DEVID    0x000b
 
@@ -685,7 +686,7 @@ struct ath_hw_ops {
                        struct ath_hw_antcomb_conf *antconf);
        void (*antdiv_comb_conf_set)(struct ath_hw *ah,
                        struct ath_hw_antcomb_conf *antconf);
-
+       void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable);
 };
 
 struct ath_nf_limits {
@@ -729,6 +730,7 @@ struct ath_hw {
        bool aspm_enabled;
        bool is_monitoring;
        bool need_an_top2_fixup;
+       bool shared_chain_lnadiv;
        u16 tx_trig_level;
 
        u32 nf_regs[6];
index f33712140fa550aac98bcfac152c60366d597795..fad3ccd5cd91aa8ab5b96603701275303a3f6392 100644 (file)
@@ -46,6 +46,10 @@ static int ath9k_btcoex_enable;
 module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
 MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
 
+static int ath9k_enable_diversity;
+module_param_named(enable_diversity, ath9k_enable_diversity, int, 0444);
+MODULE_PARM_DESC(enable_diversity, "Enable Antenna diversity for AR9565");
+
 bool is_ath9k_unloaded;
 /* We use the hw_value as an index into our private channel structure */
 
@@ -258,7 +262,7 @@ static void setup_ht_cap(struct ath_softc *sc,
        ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
        ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
 
-       if (AR_SREV_9330(ah) || AR_SREV_9485(ah))
+       if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah))
                max_streams = 1;
        else if (AR_SREV_9462(ah))
                max_streams = 2;
@@ -546,6 +550,14 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
        common->debug_mask = ath9k_debug;
        common->btcoex_enabled = ath9k_btcoex_enable == 1;
        common->disable_ani = false;
+
+       /*
+        * Enable Antenna diversity only when BTCOEX is disabled
+        * and the user manually requests the feature.
+        */
+       if (!common->btcoex_enabled && ath9k_enable_diversity)
+               common->antenna_diversity = 1;
+
        spin_lock_init(&common->cc_lock);
 
        spin_lock_init(&sc->sc_serial_rw);
@@ -597,6 +609,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
 
        ath9k_cmn_init_crypto(sc->sc_ah);
        ath9k_init_misc(sc);
+       ath_fill_led_pin(sc);
 
        if (common->bus_ops->aspm_init)
                common->bus_ops->aspm_init(common);
index a22df749b8db3d8641b4ef8cb78ad99b9e34adbb..31ab82e3ba85fdee932f1c8b2037c262037b1f24 100644 (file)
@@ -696,7 +696,9 @@ mutex_unlock:
        return r;
 }
 
-static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void ath9k_tx(struct ieee80211_hw *hw,
+                    struct ieee80211_tx_control *control,
+                    struct sk_buff *skb)
 {
        struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -756,6 +758,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 
        memset(&txctl, 0, sizeof(struct ath_tx_control));
        txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)];
+       txctl.sta = control->sta;
 
        ath_dbg(common, XMIT, "transmitting packet, skb: %p\n", skb);
 
@@ -983,47 +986,21 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
        struct ath_softc *sc = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
-       int ret = 0;
 
-       ath9k_ps_wakeup(sc);
        mutex_lock(&sc->mutex);
 
-       switch (vif->type) {
-       case NL80211_IFTYPE_STATION:
-       case NL80211_IFTYPE_WDS:
-       case NL80211_IFTYPE_ADHOC:
-       case NL80211_IFTYPE_AP:
-       case NL80211_IFTYPE_MESH_POINT:
-               break;
-       default:
-               ath_err(common, "Interface type %d not yet supported\n",
-                       vif->type);
-               ret = -EOPNOTSUPP;
-               goto out;
-       }
-
-       if (ath9k_uses_beacons(vif->type)) {
-               if (sc->nbcnvifs >= ATH_BCBUF) {
-                       ath_err(common, "Not enough beacon buffers when adding"
-                               " new interface of type: %i\n",
-                               vif->type);
-                       ret = -ENOBUFS;
-                       goto out;
-               }
-       }
-
        ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
-
        sc->nvifs++;
 
+       ath9k_ps_wakeup(sc);
        ath9k_calculate_summary_state(hw, vif);
+       ath9k_ps_restore(sc);
+
        if (ath9k_uses_beacons(vif->type))
                ath9k_beacon_assign_slot(sc, vif);
 
-out:
        mutex_unlock(&sc->mutex);
-       ath9k_ps_restore(sc);
-       return ret;
+       return 0;
 }
 
 static int ath9k_change_interface(struct ieee80211_hw *hw,
@@ -1033,21 +1010,9 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
 {
        struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       int ret = 0;
 
        ath_dbg(common, CONFIG, "Change Interface\n");
-
        mutex_lock(&sc->mutex);
-       ath9k_ps_wakeup(sc);
-
-       if (ath9k_uses_beacons(new_type) &&
-           !ath9k_uses_beacons(vif->type)) {
-               if (sc->nbcnvifs >= ATH_BCBUF) {
-                       ath_err(common, "No beacon slot available\n");
-                       ret = -ENOBUFS;
-                       goto out;
-               }
-       }
 
        if (ath9k_uses_beacons(vif->type))
                ath9k_beacon_remove_slot(sc, vif);
@@ -1055,14 +1020,15 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
        vif->type = new_type;
        vif->p2p = p2p;
 
+       ath9k_ps_wakeup(sc);
        ath9k_calculate_summary_state(hw, vif);
+       ath9k_ps_restore(sc);
+
        if (ath9k_uses_beacons(vif->type))
                ath9k_beacon_assign_slot(sc, vif);
 
-out:
-       ath9k_ps_restore(sc);
        mutex_unlock(&sc->mutex);
-       return ret;
+       return 0;
 }
 
 static void ath9k_remove_interface(struct ieee80211_hw *hw,
@@ -1073,7 +1039,6 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
 
        ath_dbg(common, CONFIG, "Detach Interface\n");
 
-       ath9k_ps_wakeup(sc);
        mutex_lock(&sc->mutex);
 
        sc->nvifs--;
@@ -1081,10 +1046,11 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
        if (ath9k_uses_beacons(vif->type))
                ath9k_beacon_remove_slot(sc, vif);
 
+       ath9k_ps_wakeup(sc);
        ath9k_calculate_summary_state(hw, NULL);
+       ath9k_ps_restore(sc);
 
        mutex_unlock(&sc->mutex);
-       ath9k_ps_restore(sc);
 }
 
 static void ath9k_enable_ps(struct ath_softc *sc)
@@ -1440,7 +1406,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
                                key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
                        if (sc->sc_ah->sw_mgmt_crypto &&
                            key->cipher == WLAN_CIPHER_SUITE_CCMP)
-                               key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
+                               key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
                        ret = 0;
                }
                break;
@@ -2257,7 +2223,7 @@ static int ath9k_suspend(struct ieee80211_hw *hw,
        mutex_lock(&sc->mutex);
 
        ath_cancel_work(sc);
-       del_timer_sync(&common->ani.timer);
+       ath_stop_ani(sc);
        del_timer_sync(&sc->rx_poll_timer);
 
        if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
index fb536e7e661b630a464700185b5ff845f4e83982..ec2d7c80756753f02e37f30ac3d42b45ec463259 100644 (file)
@@ -80,6 +80,7 @@ void ath_mci_flush_profile(struct ath_mci_profile *mci)
        struct ath_mci_profile_info *info, *tinfo;
 
        mci->aggr_limit = 0;
+       mci->num_mgmt = 0;
 
        if (list_empty(&mci->info))
                return;
@@ -120,7 +121,14 @@ static void ath_mci_update_scheme(struct ath_softc *sc)
        if (mci_hw->config & ATH_MCI_CONFIG_DISABLE_TUNING)
                goto skip_tuning;
 
+       mci->aggr_limit = 0;
        btcoex->duty_cycle = ath_mci_duty_cycle[num_profile];
+       btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD;
+       if (NUM_PROF(mci))
+               btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
+       else
+               btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL :
+                                                       ATH_BTCOEX_STOMP_LOW;
 
        if (num_profile == 1) {
                info = list_first_entry(&mci->info,
@@ -132,7 +140,8 @@ static void ath_mci_update_scheme(struct ath_softc *sc)
                        else if (info->T == 6) {
                                mci->aggr_limit = 6;
                                btcoex->duty_cycle = 30;
-                       }
+                       } else
+                               mci->aggr_limit = 6;
                        ath_dbg(common, MCI,
                                "Single SCO, aggregation limit %d 1/4 ms\n",
                                mci->aggr_limit);
@@ -191,6 +200,23 @@ skip_tuning:
        ath9k_btcoex_timer_resume(sc);
 }
 
+static void ath_mci_wait_btcal_done(struct ath_softc *sc)
+{
+       struct ath_hw *ah = sc->sc_ah;
+
+       /* Stop tx & rx */
+       ieee80211_stop_queues(sc->hw);
+       ath_stoprecv(sc);
+       ath_drain_all_txq(sc, false);
+
+       /* Wait for cal done */
+       ar9003_mci_start_reset(ah, ah->curchan);
+
+       /* Resume tx & rx */
+       ath_startrecv(sc);
+       ieee80211_wake_queues(sc->hw);
+}
+
 static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
 {
        struct ath_hw *ah = sc->sc_ah;
@@ -201,8 +227,8 @@ static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
        switch (opcode) {
        case MCI_GPM_BT_CAL_REQ:
                if (mci_hw->bt_state == MCI_BT_AWAKE) {
-                       ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START);
-                       ath9k_queue_reset(sc, RESET_TYPE_MCI);
+                       mci_hw->bt_state = MCI_BT_CAL_START;
+                       ath_mci_wait_btcal_done(sc);
                }
                ath_dbg(common, MCI, "MCI State : %d\n", mci_hw->bt_state);
                break;
@@ -224,8 +250,8 @@ static void ath9k_mci_work(struct work_struct *work)
        ath_mci_update_scheme(sc);
 }
 
-static void ath_mci_process_profile(struct ath_softc *sc,
-                                   struct ath_mci_profile_info *info)
+static u8 ath_mci_process_profile(struct ath_softc *sc,
+                                 struct ath_mci_profile_info *info)
 {
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_btcoex *btcoex = &sc->btcoex;
@@ -251,25 +277,15 @@ static void ath_mci_process_profile(struct ath_softc *sc,
 
        if (info->start) {
                if (!entry && !ath_mci_add_profile(common, mci, info))
-                       return;
+                       return 0;
        } else
                ath_mci_del_profile(common, mci, entry);
 
-       btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD;
-       mci->aggr_limit = mci->num_sco ? 6 : 0;
-
-       btcoex->duty_cycle = ath_mci_duty_cycle[NUM_PROF(mci)];
-       if (NUM_PROF(mci))
-               btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
-       else
-               btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL :
-                                                       ATH_BTCOEX_STOMP_LOW;
-
-       ieee80211_queue_work(sc->hw, &sc->mci_work);
+       return 1;
 }
 
-static void ath_mci_process_status(struct ath_softc *sc,
-                                  struct ath_mci_profile_status *status)
+static u8 ath_mci_process_status(struct ath_softc *sc,
+                                struct ath_mci_profile_status *status)
 {
        struct ath_btcoex *btcoex = &sc->btcoex;
        struct ath_mci_profile *mci = &btcoex->mci;
@@ -278,14 +294,14 @@ static void ath_mci_process_status(struct ath_softc *sc,
 
        /* Link status type are not handled */
        if (status->is_link)
-               return;
+               return 0;
 
        info.conn_handle = status->conn_handle;
        if (ath_mci_find_profile(mci, &info))
-               return;
+               return 0;
 
        if (status->conn_handle >= ATH_MCI_MAX_PROFILE)
-               return;
+               return 0;
 
        if (status->is_critical)
                __set_bit(status->conn_handle, mci->status);
@@ -299,7 +315,9 @@ static void ath_mci_process_status(struct ath_softc *sc,
        } while (++i < ATH_MCI_MAX_PROFILE);
 
        if (old_num_mgmt != mci->num_mgmt)
-               ieee80211_queue_work(sc->hw, &sc->mci_work);
+               return 1;
+
+       return 0;
 }
 
 static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
@@ -308,9 +326,16 @@ static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
        struct ath_mci_profile_info profile_info;
        struct ath_mci_profile_status profile_status;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       u8 major, minor;
+       u8 major, minor, update_scheme = 0;
        u32 seq_num;
 
+       if (ar9003_mci_state(ah, MCI_STATE_NEED_FLUSH_BT_INFO) &&
+           ar9003_mci_state(ah, MCI_STATE_ENABLE)) {
+               ath_dbg(common, MCI, "(MCI) Need to flush BT profiles\n");
+               ath_mci_flush_profile(&sc->btcoex.mci);
+               ar9003_mci_state(ah, MCI_STATE_SEND_STATUS_QUERY);
+       }
+
        switch (opcode) {
        case MCI_GPM_COEX_VERSION_QUERY:
                ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_COEX_VERSION);
@@ -336,7 +361,7 @@ static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
                        break;
                }
 
-               ath_mci_process_profile(sc, &profile_info);
+               update_scheme += ath_mci_process_profile(sc, &profile_info);
                break;
        case MCI_GPM_COEX_BT_STATUS_UPDATE:
                profile_status.is_link = *(rx_payload +
@@ -352,12 +377,14 @@ static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
                        profile_status.is_link, profile_status.conn_handle,
                        profile_status.is_critical, seq_num);
 
-               ath_mci_process_status(sc, &profile_status);
+               update_scheme += ath_mci_process_status(sc, &profile_status);
                break;
        default:
                ath_dbg(common, MCI, "Unknown GPM COEX message = 0x%02x\n", opcode);
                break;
        }
+       if (update_scheme)
+               ieee80211_queue_work(sc->hw, &sc->mci_work);
 }
 
 int ath_mci_setup(struct ath_softc *sc)
@@ -365,6 +392,7 @@ int ath_mci_setup(struct ath_softc *sc)
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_mci_coex *mci = &sc->mci_coex;
        struct ath_mci_buf *buf = &mci->sched_buf;
+       int ret;
 
        buf->bf_addr = dma_alloc_coherent(sc->dev,
                                  ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE,
@@ -384,9 +412,13 @@ int ath_mci_setup(struct ath_softc *sc)
        mci->gpm_buf.bf_addr = (u8 *)mci->sched_buf.bf_addr + mci->sched_buf.bf_len;
        mci->gpm_buf.bf_paddr = mci->sched_buf.bf_paddr + mci->sched_buf.bf_len;
 
-       ar9003_mci_setup(sc->sc_ah, mci->gpm_buf.bf_paddr,
-                        mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4),
-                        mci->sched_buf.bf_paddr);
+       ret = ar9003_mci_setup(sc->sc_ah, mci->gpm_buf.bf_paddr,
+                              mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4),
+                              mci->sched_buf.bf_paddr);
+       if (ret) {
+               ath_err(common, "Failed to initialize MCI\n");
+               return ret;
+       }
 
        INIT_WORK(&sc->mci_work, ath9k_mci_work);
        ath_dbg(common, MCI, "MCI Initialized\n");
@@ -551,9 +583,11 @@ void ath_mci_intr(struct ath_softc *sc)
        }
 
        if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
-           (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT))
+           (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) {
                mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR |
                             AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT);
+               ath_mci_msg(sc, MCI_GPM_COEX_NOOP, NULL);
+       }
 }
 
 void ath_mci_enable(struct ath_softc *sc)
index ef11dc639461383960fcfa865f7dde01313da1e2..0e630a99b68b8fa729af6007de32e97d16c7f6ff 100644 (file)
@@ -38,6 +38,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
        { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E  AR9580 */
        { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E  AR9462 */
        { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E  AR1111/AR9485 */
+       { PCI_VDEVICE(ATHEROS, 0x0036) }, /* PCI-E  AR9565 */
        { 0 }
 };
 
@@ -122,7 +123,8 @@ static void ath_pci_aspm_init(struct ath_common *common)
        if (!parent)
                return;
 
-       if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
+       if ((ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) &&
+           (AR_SREV_9285(ah))) {
                /* Bluetooth coexistance requires disabling ASPM. */
                pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL,
                        PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
index e034add9cd5a478a2dd085f5133a18898913b932..27ed80b5488133175a0b348a3f9752ebe69ed016 100644 (file)
@@ -25,141 +25,141 @@ static const struct ath_rate_table ar5416_11na_ratetable = {
        8, /* MCS start */
        {
                [0] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000,
-                       5400, 0, 12, 0, 0, 0, 0 }, /* 6 Mb */
+                       5400, 0, 12 }, /* 6 Mb */
                [1] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000,
-                       7800,  1, 18, 0, 1, 1, 1 }, /* 9 Mb */
+                       7800,  1, 18 }, /* 9 Mb */
                [2] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000,
-                       10000, 2, 24, 2, 2, 2, 2 }, /* 12 Mb */
+                       10000, 2, 24 }, /* 12 Mb */
                [3] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000,
-                       13900, 3, 36, 2, 3, 3, 3 }, /* 18 Mb */
+                       13900, 3, 36 }, /* 18 Mb */
                [4] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000,
-                       17300, 4, 48, 4, 4, 4, 4 }, /* 24 Mb */
+                       17300, 4, 48 }, /* 24 Mb */
                [5] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000,
-                       23000, 5, 72, 4, 5, 5, 5 }, /* 36 Mb */
+                       23000, 5, 72 }, /* 36 Mb */
                [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000,
-                       27400, 6, 96, 4, 6, 6, 6 }, /* 48 Mb */
+                       27400, 6, 96 }, /* 48 Mb */
                [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000,
-                       29300, 7, 108, 4, 7, 7, 7 }, /* 54 Mb */
+                       29300, 7, 108 }, /* 54 Mb */
                [8] = { RC_HT_SDT_2040, WLAN_RC_PHY_HT_20_SS, 6500,
-                       6400, 0, 0, 0, 38, 8, 38 }, /* 6.5 Mb */
+                       6400, 0, 0 }, /* 6.5 Mb */
                [9] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000,
-                       12700, 1, 1, 2, 39, 9, 39 }, /* 13 Mb */
+                       12700, 1, 1 }, /* 13 Mb */
                [10] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500,
-                       18800, 2, 2, 2, 40, 10, 40 }, /* 19.5 Mb */
+                       18800, 2, 2 }, /* 19.5 Mb */
                [11] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000,
-                       25000, 3, 3, 4, 41, 11, 41 }, /* 26 Mb */
+                       25000, 3, 3 }, /* 26 Mb */
                [12] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000,
-                       36700, 4, 4, 4, 42, 12, 42 }, /* 39 Mb */
+                       36700, 4, 4 }, /* 39 Mb */
                [13] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000,
-                       48100, 5, 5, 4, 43, 13, 43 }, /* 52 Mb */
+                       48100, 5, 5 }, /* 52 Mb */
                [14] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500,
-                       53500, 6, 6, 4, 44, 14, 44 }, /* 58.5 Mb */
+                       53500, 6, 6 }, /* 58.5 Mb */
                [15] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000,
-                       59000, 7, 7, 4, 45, 16, 46 }, /* 65 Mb */
+                       59000, 7, 7 }, /* 65 Mb */
                [16] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200,
-                       65400, 7, 7, 4, 45, 16, 46 }, /* 75 Mb */
+                       65400, 7, 7 }, /* 75 Mb */
                [17] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000,
-                       12700, 8, 8, 0, 47, 17, 47 }, /* 13 Mb */
+                       12700, 8, 8 }, /* 13 Mb */
                [18] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000,
-                       24800, 9, 9, 2, 48, 18, 48 }, /* 26 Mb */
+                       24800, 9, 9 }, /* 26 Mb */
                [19] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000,
-                       36600, 10, 10, 2, 49, 19, 49 }, /* 39 Mb */
+                       36600, 10, 10 }, /* 39 Mb */
                [20] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000,
-                       48100, 11, 11, 4, 50, 20, 50 }, /* 52 Mb */
+                       48100, 11, 11 }, /* 52 Mb */
                [21] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000,
-                       69500, 12, 12, 4, 51, 21, 51 }, /* 78 Mb */
+                       69500, 12, 12 }, /* 78 Mb */
                [22] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000,
-                       89500, 13, 13, 4, 52, 22, 52 }, /* 104 Mb */
+                       89500, 13, 13 }, /* 104 Mb */
                [23] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000,
-                       98900, 14, 14, 4, 53, 23, 53 }, /* 117 Mb */
+                       98900, 14, 14 }, /* 117 Mb */
                [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000,
-                       108300, 15, 15, 4, 54, 25, 55 }, /* 130 Mb */
+                       108300, 15, 15 }, /* 130 Mb */
                [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400,
-                       120000, 15, 15, 4, 54, 25, 55 }, /* 144.4 Mb */
+                       120000, 15, 15 }, /* 144.4 Mb */
                [26] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500,
-                       17400, 16, 16, 0, 56, 26, 56 }, /* 19.5 Mb */
+                       17400, 16, 16 }, /* 19.5 Mb */
                [27] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000,
-                       35100, 17, 17, 2, 57, 27, 57 }, /* 39 Mb */
+                       35100, 17, 17 }, /* 39 Mb */
                [28] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500,
-                       52600, 18, 18, 2, 58, 28, 58 }, /* 58.5 Mb */
+                       52600, 18, 18 }, /* 58.5 Mb */
                [29] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000,
-                       70400, 19, 19, 4, 59, 29, 59 }, /* 78 Mb */
+                       70400, 19, 19 }, /* 78 Mb */
                [30] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000,
-                       104900, 20, 20, 4, 60, 31, 61 }, /* 117 Mb */
+                       104900, 20, 20 }, /* 117 Mb */
                [31] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000,
-                       115800, 20, 20, 4, 60, 31, 61 }, /* 130 Mb*/
+                       115800, 20, 20 }, /* 130 Mb*/
                [32] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000,
-                       137200, 21, 21, 4, 62, 33, 63 }, /* 156 Mb */
+                       137200, 21, 21 }, /* 156 Mb */
                [33] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300,
-                       151100, 21, 21, 4, 62, 33, 63 }, /* 173.3 Mb */
+                       151100, 21, 21 }, /* 173.3 Mb */
                [34] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500,
-                       152800, 22, 22, 4, 64, 35, 65 }, /* 175.5 Mb */
+                       152800, 22, 22 }, /* 175.5 Mb */
                [35] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000,
-                       168400, 22, 22, 4, 64, 35, 65 }, /* 195 Mb*/
+                       168400, 22, 22 }, /* 195 Mb*/
                [36] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000,
-                       168400, 23, 23, 4, 66, 37, 67 }, /* 195 Mb */
+                       168400, 23, 23 }, /* 195 Mb */
                [37] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700,
-                       185000, 23, 23, 4, 66, 37, 67 }, /* 216.7 Mb */
+                       185000, 23, 23 }, /* 216.7 Mb */
                [38] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500,
-                       13200, 0, 0, 0, 38, 38, 38 }, /* 13.5 Mb*/
+                       13200, 0, 0 }, /* 13.5 Mb*/
                [39] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500,
-                       25900, 1, 1, 2, 39, 39, 39 }, /* 27.0 Mb*/
+                       25900, 1, 1 }, /* 27.0 Mb*/
                [40] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500,
-                       38600, 2, 2, 2, 40, 40, 40 }, /* 40.5 Mb*/
+                       38600, 2, 2 }, /* 40.5 Mb*/
                [41] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000,
-                       49800, 3, 3, 4, 41, 41, 41 }, /* 54 Mb */
+                       49800, 3, 3 }, /* 54 Mb */
                [42] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500,
-                       72200, 4, 4, 4, 42, 42, 42 }, /* 81 Mb */
+                       72200, 4, 4 }, /* 81 Mb */
                [43] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 108000,
-                       92900, 5, 5, 4, 43, 43, 43 }, /* 108 Mb */
+                       92900, 5, 5 }, /* 108 Mb */
                [44] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500,
-                       102700, 6, 6, 4, 44, 44, 44 }, /* 121.5 Mb*/
+                       102700, 6, 6 }, /* 121.5 Mb*/
                [45] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000,
-                       112000, 7, 7, 4, 45, 46, 46 }, /* 135 Mb */
+                       112000, 7, 7 }, /* 135 Mb */
                [46] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000,
-                       122000, 7, 7, 4, 45, 46, 46 }, /* 150 Mb */
+                       122000, 7, 7 }, /* 150 Mb */
                [47] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000,
-                       25800, 8, 8, 0, 47, 47, 47 }, /* 27 Mb */
+                       25800, 8, 8 }, /* 27 Mb */
                [48] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000,
-                       49800, 9, 9, 2, 48, 48, 48 }, /* 54 Mb */
+                       49800, 9, 9 }, /* 54 Mb */
                [49] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000,
-                       71900, 10, 10, 2, 49, 49, 49 }, /* 81 Mb */
+                       71900, 10, 10 }, /* 81 Mb */
                [50] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000,
-                       92500, 11, 11, 4, 50, 50, 50 }, /* 108 Mb */
+                       92500, 11, 11 }, /* 108 Mb */
                [51] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000,
-                       130300, 12, 12, 4, 51, 51, 51 }, /* 162 Mb */
+                       130300, 12, 12 }, /* 162 Mb */
                [52] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000,
-                       162800, 13, 13, 4, 52, 52, 52 }, /* 216 Mb */
+                       162800, 13, 13 }, /* 216 Mb */
                [53] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000,
-                       178200, 14, 14, 4, 53, 53, 53 }, /* 243 Mb */
+                       178200, 14, 14 }, /* 243 Mb */
                [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000,
-                       192100, 15, 15, 4, 54, 55, 55 }, /* 270 Mb */
+                       192100, 15, 15 }, /* 270 Mb */
                [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000,
-                       207000, 15, 15, 4, 54, 55, 55 }, /* 300 Mb */
+                       207000, 15, 15 }, /* 300 Mb */
                [56] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500,
-                       36100, 16, 16, 0, 56, 56, 56 }, /* 40.5 Mb */
+                       36100, 16, 16 }, /* 40.5 Mb */
                [57] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000,
-                       72900, 17, 17, 2, 57, 57, 57 }, /* 81 Mb */
+                       72900, 17, 17 }, /* 81 Mb */
                [58] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500,
-                       108300, 18, 18, 2, 58, 58, 58 }, /* 121.5 Mb */
+                       108300, 18, 18 }, /* 121.5 Mb */
                [59] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000,
-                       142000, 19, 19, 4, 59, 59, 59 }, /*  162 Mb */
+                       142000, 19, 19 }, /*  162 Mb */
                [60] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000,
-                       205100, 20, 20, 4, 60, 61, 61 }, /*  243 Mb */
+                       205100, 20, 20 }, /*  243 Mb */
                [61] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000,
-                       224700, 20, 20, 4, 60, 61, 61 }, /*  270 Mb */
+                       224700, 20, 20 }, /*  270 Mb */
                [62] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000,
-                       263100, 21, 21, 4, 62, 63, 63 }, /*  324 Mb */
+                       263100, 21, 21 }, /*  324 Mb */
                [63] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000,
-                       288000, 21, 21, 4, 62, 63, 63 }, /*  360 Mb */
+                       288000, 21, 21 }, /*  360 Mb */
                [64] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500,
-                       290700, 22, 22, 4, 64, 65, 65 }, /* 364.5 Mb */
+                       290700, 22, 22 }, /* 364.5 Mb */
                [65] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000,
-                       317200, 22, 22, 4, 64, 65, 65 }, /* 405 Mb */
+                       317200, 22, 22 }, /* 405 Mb */
                [66] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000,
-                       317200, 23, 23, 4, 66, 67, 67 }, /* 405 Mb */
+                       317200, 23, 23 }, /* 405 Mb */
                [67] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000,
-                       346400, 23, 23, 4, 66, 67, 67 }, /* 450 Mb */
+                       346400, 23, 23 }, /* 450 Mb */
        },
        50,  /* probe interval */
        WLAN_RC_HT_FLAG,  /* Phy rates allowed initially */
@@ -173,149 +173,149 @@ static const struct ath_rate_table ar5416_11ng_ratetable = {
        12, /* MCS start */
        {
                [0] = { RC_ALL, WLAN_RC_PHY_CCK, 1000,
-                       900, 0, 2, 0, 0, 0, 0 }, /* 1 Mb */
+                       900, 0, 2 }, /* 1 Mb */
                [1] = { RC_ALL, WLAN_RC_PHY_CCK, 2000,
-                       1900, 1, 4, 1, 1, 1, 1 }, /* 2 Mb */
+                       1900, 1, 4 }, /* 2 Mb */
                [2] = { RC_ALL, WLAN_RC_PHY_CCK, 5500,
-                       4900, 2, 11, 2, 2, 2, 2 }, /* 5.5 Mb */
+                       4900, 2, 11 }, /* 5.5 Mb */
                [3] = { RC_ALL, WLAN_RC_PHY_CCK, 11000,
-                       8100, 3, 22, 3, 3, 3, 3 }, /* 11 Mb */
+                       8100, 3, 22 }, /* 11 Mb */
                [4] = { RC_INVALID, WLAN_RC_PHY_OFDM, 6000,
-                       5400, 4, 12, 4, 4, 4, 4 }, /* 6 Mb */
+                       5400, 4, 12 }, /* 6 Mb */
                [5] = { RC_INVALID, WLAN_RC_PHY_OFDM, 9000,
-                       7800, 5, 18, 4, 5, 5, 5 }, /* 9 Mb */
+                       7800, 5, 18 }, /* 9 Mb */
                [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000,
-                       10100, 6, 24, 6, 6, 6, 6 }, /* 12 Mb */
+                       10100, 6, 24 }, /* 12 Mb */
                [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000,
-                       14100, 7, 36, 6, 7, 7, 7 }, /* 18 Mb */
+                       14100, 7, 36 }, /* 18 Mb */
                [8] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000,
-                       17700, 8, 48, 8, 8, 8, 8 }, /* 24 Mb */
+                       17700, 8, 48 }, /* 24 Mb */
                [9] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000,
-                       23700, 9, 72, 8, 9, 9, 9 }, /* 36 Mb */
+                       23700, 9, 72 }, /* 36 Mb */
                [10] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000,
-                       27400, 10, 96, 8, 10, 10, 10 }, /* 48 Mb */
+                       27400, 10, 96 }, /* 48 Mb */
                [11] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000,
-                       30900, 11, 108, 8, 11, 11, 11 }, /* 54 Mb */
+                       30900, 11, 108 }, /* 54 Mb */
                [12] = { RC_INVALID, WLAN_RC_PHY_HT_20_SS, 6500,
-                       6400, 0, 0, 4, 42, 12, 42 }, /* 6.5 Mb */
+                       6400, 0, 0 }, /* 6.5 Mb */
                [13] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000,
-                       12700, 1, 1, 6, 43, 13, 43 }, /* 13 Mb */
+                       12700, 1, 1 }, /* 13 Mb */
                [14] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500,
-                       18800, 2, 2, 6, 44, 14, 44 }, /* 19.5 Mb*/
+                       18800, 2, 2 }, /* 19.5 Mb*/
                [15] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000,
-                       25000, 3, 3, 8, 45, 15, 45 }, /* 26 Mb */
+                       25000, 3, 3 }, /* 26 Mb */
                [16] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000,
-                       36700, 4, 4, 8, 46, 16, 46 }, /* 39 Mb */
+                       36700, 4, 4 }, /* 39 Mb */
                [17] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000,
-                       48100, 5, 5, 8, 47, 17, 47 }, /* 52 Mb */
+                       48100, 5, 5 }, /* 52 Mb */
                [18] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500,
-                       53500, 6, 6, 8, 48, 18, 48 }, /* 58.5 Mb */
+                       53500, 6, 6 }, /* 58.5 Mb */
                [19] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000,
-                       59000, 7, 7, 8, 49, 20, 50 }, /* 65 Mb */
+                       59000, 7, 7 }, /* 65 Mb */
                [20] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200,
-                       65400, 7, 7, 8, 49, 20, 50 }, /* 65 Mb*/
+                       65400, 7, 7 }, /* 65 Mb*/
                [21] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000,
-                       12700, 8, 8, 4, 51, 21, 51 }, /* 13 Mb */
+                       12700, 8, 8 }, /* 13 Mb */
                [22] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000,
-                       24800, 9, 9, 6, 52, 22, 52 }, /* 26 Mb */
+                       24800, 9, 9 }, /* 26 Mb */
                [23] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000,
-                       36600, 10, 10, 6, 53, 23, 53 }, /* 39 Mb */
+                       36600, 10, 10 }, /* 39 Mb */
                [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000,
-                       48100, 11, 11, 8, 54, 24, 54 }, /* 52 Mb */
+                       48100, 11, 11 }, /* 52 Mb */
                [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000,
-                       69500, 12, 12, 8, 55, 25, 55 }, /* 78 Mb */
+                       69500, 12, 12 }, /* 78 Mb */
                [26] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000,
-                       89500, 13, 13, 8, 56, 26, 56 }, /* 104 Mb */
+                       89500, 13, 13 }, /* 104 Mb */
                [27] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000,
-                       98900, 14, 14, 8, 57, 27, 57 }, /* 117 Mb */
+                       98900, 14, 14 }, /* 117 Mb */
                [28] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000,
-                       108300, 15, 15, 8, 58, 29, 59 }, /* 130 Mb */
+                       108300, 15, 15 }, /* 130 Mb */
                [29] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400,
-                       120000, 15, 15, 8, 58, 29, 59 }, /* 144.4 Mb */
+                       120000, 15, 15 }, /* 144.4 Mb */
                [30] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500,
-                       17400, 16, 16, 4, 60, 30, 60 }, /* 19.5 Mb */
+                       17400, 16, 16 }, /* 19.5 Mb */
                [31] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000,
-                       35100, 17, 17, 6, 61, 31, 61 }, /* 39 Mb */
+                       35100, 17, 17 }, /* 39 Mb */
                [32] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500,
-                       52600, 18, 18, 6, 62, 32, 62 }, /* 58.5 Mb */
+                       52600, 18, 18 }, /* 58.5 Mb */
                [33] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000,
-                       70400, 19, 19, 8, 63, 33, 63 }, /* 78 Mb */
+                       70400, 19, 19 }, /* 78 Mb */
                [34] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000,
-                       104900, 20, 20, 8, 64, 35, 65 }, /* 117 Mb */
+                       104900, 20, 20 }, /* 117 Mb */
                [35] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000,
-                       115800, 20, 20, 8, 64, 35, 65 }, /* 130 Mb */
+                       115800, 20, 20 }, /* 130 Mb */
                [36] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000,
-                       137200, 21, 21, 8, 66, 37, 67 }, /* 156 Mb */
+                       137200, 21, 21 }, /* 156 Mb */
                [37] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300,
-                       151100, 21, 21, 8, 66, 37, 67 }, /* 173.3 Mb */
+                       151100, 21, 21 }, /* 173.3 Mb */
                [38] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500,
-                       152800, 22, 22, 8, 68, 39, 69 }, /* 175.5 Mb */
+                       152800, 22, 22 }, /* 175.5 Mb */
                [39] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000,
-                       168400, 22, 22, 8, 68, 39, 69 }, /* 195 Mb */
+                       168400, 22, 22 }, /* 195 Mb */
                [40] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000,
-                       168400, 23, 23, 8, 70, 41, 71 }, /* 195 Mb */
+                       168400, 23, 23 }, /* 195 Mb */
                [41] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700,
-                       185000, 23, 23, 8, 70, 41, 71 }, /* 216.7 Mb */
+                       185000, 23, 23 }, /* 216.7 Mb */
                [42] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500,
-                       13200, 0, 0, 8, 42, 42, 42 }, /* 13.5 Mb */
+                       13200, 0, 0 }, /* 13.5 Mb */
                [43] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500,
-                       25900, 1, 1, 8, 43, 43, 43 }, /* 27.0 Mb */
+                       25900, 1, 1 }, /* 27.0 Mb */
                [44] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500,
-                       38600, 2, 2, 8, 44, 44, 44 }, /* 40.5 Mb */
+                       38600, 2, 2 }, /* 40.5 Mb */
                [45] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000,
-                       49800, 3, 3, 8, 45, 45, 45 }, /* 54 Mb */
+                       49800, 3, 3 }, /* 54 Mb */
                [46] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500,
-                       72200, 4, 4, 8, 46, 46, 46 }, /* 81 Mb */
+                       72200, 4, 4 }, /* 81 Mb */
                [47] = { RC_HT_S_40 , WLAN_RC_PHY_HT_40_SS, 108000,
-                       92900, 5, 5, 8, 47, 47, 47 }, /* 108 Mb */
+                       92900, 5, 5 }, /* 108 Mb */
                [48] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500,
-                       102700, 6, 6, 8, 48, 48, 48 }, /* 121.5 Mb */
+                       102700, 6, 6 }, /* 121.5 Mb */
                [49] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000,
-                       112000, 7, 7, 8, 49, 50, 50 }, /* 135 Mb */
+                       112000, 7, 7 }, /* 135 Mb */
                [50] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000,
-                       122000, 7, 7, 8, 49, 50, 50 }, /* 150 Mb */
+                       122000, 7, 7 }, /* 150 Mb */
                [51] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000,
-                       25800, 8, 8, 8, 51, 51, 51 }, /* 27 Mb */
+                       25800, 8, 8 }, /* 27 Mb */
                [52] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000,
-                       49800, 9, 9, 8, 52, 52, 52 }, /* 54 Mb */
+                       49800, 9, 9 }, /* 54 Mb */
                [53] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000,
-                       71900, 10, 10, 8, 53, 53, 53 }, /* 81 Mb */
+                       71900, 10, 10 }, /* 81 Mb */
                [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000,
-                       92500, 11, 11, 8, 54, 54, 54 }, /* 108 Mb */
+                       92500, 11, 11 }, /* 108 Mb */
                [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000,
-                       130300, 12, 12, 8, 55, 55, 55 }, /* 162 Mb */
+                       130300, 12, 12 }, /* 162 Mb */
                [56] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000,
-                       162800, 13, 13, 8, 56, 56, 56 }, /* 216 Mb */
+                       162800, 13, 13 }, /* 216 Mb */
                [57] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000,
-                       178200, 14, 14, 8, 57, 57, 57 }, /* 243 Mb */
+                       178200, 14, 14 }, /* 243 Mb */
                [58] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000,
-                       192100, 15, 15, 8, 58, 59, 59 }, /* 270 Mb */
+                       192100, 15, 15 }, /* 270 Mb */
                [59] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000,
-                       207000, 15, 15, 8, 58, 59, 59 }, /* 300 Mb */
+                       207000, 15, 15 }, /* 300 Mb */
                [60] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500,
-                       36100, 16, 16, 8, 60, 60, 60 }, /* 40.5 Mb */
+                       36100, 16, 16 }, /* 40.5 Mb */
                [61] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000,
-                       72900, 17, 17, 8, 61, 61, 61 }, /* 81 Mb */
+                       72900, 17, 17 }, /* 81 Mb */
                [62] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500,
-                       108300, 18, 18, 8, 62, 62, 62 }, /* 121.5 Mb */
+                       108300, 18, 18 }, /* 121.5 Mb */
                [63] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000,
-                       142000, 19, 19, 8, 63, 63, 63 }, /* 162 Mb */
+                       142000, 19, 19 }, /* 162 Mb */
                [64] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000,
-                       205100, 20, 20, 8, 64, 65, 65 }, /* 243 Mb */
+                       205100, 20, 20 }, /* 243 Mb */
                [65] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000,
-                       224700, 20, 20, 8, 64, 65, 65 }, /* 270 Mb */
+                       224700, 20, 20 }, /* 270 Mb */
                [66] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000,
-                       263100, 21, 21, 8, 66, 67, 67 }, /* 324 Mb */
+                       263100, 21, 21 }, /* 324 Mb */
                [67] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000,
-                       288000, 21, 21, 8, 66, 67, 67 }, /* 360 Mb */
+                       288000, 21, 21 }, /* 360 Mb */
                [68] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500,
-                       290700, 22, 22, 8, 68, 69, 69 }, /* 364.5 Mb */
+                       290700, 22, 22 }, /* 364.5 Mb */
                [69] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000,
-                       317200, 22, 22, 8, 68, 69, 69 }, /* 405 Mb */
+                       317200, 22, 22 }, /* 405 Mb */
                [70] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000,
-                       317200, 23, 23, 8, 70, 71, 71 }, /* 405 Mb */
+                       317200, 23, 23 }, /* 405 Mb */
                [71] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000,
-                       346400, 23, 23, 8, 70, 71, 71 }, /* 450 Mb */
+                       346400, 23, 23 }, /* 450 Mb */
        },
        50,  /* probe interval */
        WLAN_RC_HT_FLAG,  /* Phy rates allowed initially */
@@ -326,21 +326,21 @@ static const struct ath_rate_table ar5416_11a_ratetable = {
        0,
        {
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
-                       5400, 0, 12, 0},
+                       5400, 0, 12},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
-                       7800,  1, 18, 0},
+                       7800,  1, 18},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
-                       10000, 2, 24, 2},
+                       10000, 2, 24},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
-                       13900, 3, 36, 2},
+                       13900, 3, 36},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
-                       17300, 4, 48, 4},
+                       17300, 4, 48},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
-                       23000, 5, 72, 4},
+                       23000, 5, 72},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
-                       27400, 6, 96, 4},
+                       27400, 6, 96},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
-                       29300, 7, 108, 4},
+                       29300, 7, 108},
        },
        50,  /* probe interval */
        0,   /* Phy rates allowed initially */
@@ -351,63 +351,62 @@ static const struct ath_rate_table ar5416_11g_ratetable = {
        0,
        {
                { RC_L_SDT, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */
-                       900, 0, 2, 0},
+                       900, 0, 2},
                { RC_L_SDT, WLAN_RC_PHY_CCK, 2000, /* 2 Mb */
-                       1900, 1, 4, 1},
+                       1900, 1, 4},
                { RC_L_SDT, WLAN_RC_PHY_CCK, 5500, /* 5.5 Mb */
-                       4900, 2, 11, 2},
+                       4900, 2, 11},
                { RC_L_SDT, WLAN_RC_PHY_CCK, 11000, /* 11 Mb */
-                       8100, 3, 22, 3},
+                       8100, 3, 22},
                { RC_INVALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
-                       5400, 4, 12, 4},
+                       5400, 4, 12},
                { RC_INVALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
-                       7800, 5, 18, 4},
+                       7800, 5, 18},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
-                       10000, 6, 24, 6},
+                       10000, 6, 24},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
-                       13900, 7, 36, 6},
+                       13900, 7, 36},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
-                       17300, 8, 48, 8},
+                       17300, 8, 48},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
-                       23000, 9, 72, 8},
+                       23000, 9, 72},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
-                       27400, 10, 96, 8},
+                       27400, 10, 96},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
-                       29300, 11, 108, 8},
+                       29300, 11, 108},
        },
        50,  /* probe interval */
        0,   /* Phy rates allowed initially */
 };
 
-static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table,
+static int ath_rc_get_rateindex(struct ath_rate_priv *ath_rc_priv,
                                struct ieee80211_tx_rate *rate)
 {
-       int rix = 0, i = 0;
-       static const int mcs_rix_off[] = { 7, 15, 20, 21, 22, 23 };
+       const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
+       int rix, i, idx = 0;
 
        if (!(rate->flags & IEEE80211_TX_RC_MCS))
                return rate->idx;
 
-       while (i < ARRAY_SIZE(mcs_rix_off) && rate->idx > mcs_rix_off[i]) {
-               rix++; i++;
+       for (i = 0; i < ath_rc_priv->max_valid_rate; i++) {
+               idx = ath_rc_priv->valid_rate_index[i];
+
+               if (WLAN_RC_PHY_HT(rate_table->info[idx].phy) &&
+                   rate_table->info[idx].ratecode == rate->idx)
+                       break;
        }
 
-       rix += rate->idx + rate_table->mcs_start;
+       rix = idx;
 
-       if ((rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
-           (rate->flags & IEEE80211_TX_RC_SHORT_GI))
-               rix = rate_table->info[rix].ht_index;
-       else if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
-               rix = rate_table->info[rix].sgi_index;
-       else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
-               rix = rate_table->info[rix].cw40index;
+       if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+               rix++;
 
        return rix;
 }
 
-static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table,
-                                  struct ath_rate_priv *ath_rc_priv)
+static void ath_rc_sort_validrates(struct ath_rate_priv *ath_rc_priv)
 {
+       const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
        u8 i, j, idx, idx_next;
 
        for (i = ath_rc_priv->max_valid_rate - 1; i > 0; i--) {
@@ -424,21 +423,6 @@ static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table,
        }
 }
 
-static void ath_rc_init_valid_rate_idx(struct ath_rate_priv *ath_rc_priv)
-{
-       u8 i;
-
-       for (i = 0; i < ath_rc_priv->rate_table_size; i++)
-               ath_rc_priv->valid_rate_index[i] = 0;
-}
-
-static inline void ath_rc_set_valid_rate_idx(struct ath_rate_priv *ath_rc_priv,
-                                          u8 index, int valid_tx_rate)
-{
-       BUG_ON(index > ath_rc_priv->rate_table_size);
-       ath_rc_priv->valid_rate_index[index] = !!valid_tx_rate;
-}
-
 static inline
 int ath_rc_get_nextvalid_txrate(const struct ath_rate_table *rate_table,
                                struct ath_rate_priv *ath_rc_priv,
@@ -479,8 +463,7 @@ static int ath_rc_valid_phyrate(u32 phy, u32 capflag, int ignore_cw)
 }
 
 static inline int
-ath_rc_get_lower_rix(const struct ath_rate_table *rate_table,
-                    struct ath_rate_priv *ath_rc_priv,
+ath_rc_get_lower_rix(struct ath_rate_priv *ath_rc_priv,
                     u8 cur_valid_txrate, u8 *next_idx)
 {
        int8_t i;
@@ -495,10 +478,9 @@ ath_rc_get_lower_rix(const struct ath_rate_table *rate_table,
        return 0;
 }
 
-static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv,
-                                const struct ath_rate_table *rate_table,
-                                u32 capflag)
+static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv)
 {
+       const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
        u8 i, hi = 0;
 
        for (i = 0; i < rate_table->rate_cnt; i++) {
@@ -506,14 +488,14 @@ static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv,
                        u32 phy = rate_table->info[i].phy;
                        u8 valid_rate_count = 0;
 
-                       if (!ath_rc_valid_phyrate(phy, capflag, 0))
+                       if (!ath_rc_valid_phyrate(phy, ath_rc_priv->ht_cap, 0))
                                continue;
 
                        valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy];
 
                        ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = i;
                        ath_rc_priv->valid_phy_ratecnt[phy] += 1;
-                       ath_rc_set_valid_rate_idx(ath_rc_priv, i, 1);
+                       ath_rc_priv->valid_rate_index[i] = true;
                        hi = i;
                }
        }
@@ -521,76 +503,73 @@ static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv,
        return hi;
 }
 
-static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv,
-                               const struct ath_rate_table *rate_table,
-                               struct ath_rateset *rateset,
-                               u32 capflag)
+static inline bool ath_rc_check_legacy(u8 rate, u8 dot11rate, u16 rate_flags,
+                                      u32 phy, u32 capflag)
 {
-       u8 i, j, hi = 0;
+       if (rate != dot11rate || WLAN_RC_PHY_HT(phy))
+               return false;
 
-       /* Use intersection of working rates and valid rates */
-       for (i = 0; i < rateset->rs_nrates; i++) {
-               for (j = 0; j < rate_table->rate_cnt; j++) {
-                       u32 phy = rate_table->info[j].phy;
-                       u16 rate_flags = rate_table->info[j].rate_flags;
-                       u8 rate = rateset->rs_rates[i];
-                       u8 dot11rate = rate_table->info[j].dot11rate;
-
-                       /* We allow a rate only if its valid and the
-                        * capflag matches one of the validity
-                        * (VALID/VALID_20/VALID_40) flags */
-
-                       if ((rate == dot11rate) &&
-                           (rate_flags & WLAN_RC_CAP_MODE(capflag)) ==
-                           WLAN_RC_CAP_MODE(capflag) &&
-                           (rate_flags & WLAN_RC_CAP_STREAM(capflag)) &&
-                           !WLAN_RC_PHY_HT(phy)) {
-                               u8 valid_rate_count = 0;
-
-                               if (!ath_rc_valid_phyrate(phy, capflag, 0))
-                                       continue;
-
-                               valid_rate_count =
-                                       ath_rc_priv->valid_phy_ratecnt[phy];
-
-                               ath_rc_priv->valid_phy_rateidx[phy]
-                                       [valid_rate_count] = j;
-                               ath_rc_priv->valid_phy_ratecnt[phy] += 1;
-                               ath_rc_set_valid_rate_idx(ath_rc_priv, j, 1);
-                               hi = max(hi, j);
-                       }
-               }
-       }
+       if ((rate_flags & WLAN_RC_CAP_MODE(capflag)) != WLAN_RC_CAP_MODE(capflag))
+               return false;
 
-       return hi;
+       if (!(rate_flags & WLAN_RC_CAP_STREAM(capflag)))
+               return false;
+
+       return true;
 }
 
-static u8 ath_rc_setvalid_htrates(struct ath_rate_priv *ath_rc_priv,
-                                 const struct ath_rate_table *rate_table,
-                                 struct ath_rateset *rateset, u32 capflag)
+static inline bool ath_rc_check_ht(u8 rate, u8 dot11rate, u16 rate_flags,
+                                  u32 phy, u32 capflag)
 {
-       u8 i, j, hi = 0;
+       if (rate != dot11rate || !WLAN_RC_PHY_HT(phy))
+               return false;
+
+       if (!WLAN_RC_PHY_HT_VALID(rate_flags, capflag))
+               return false;
+
+       if (!(rate_flags & WLAN_RC_CAP_STREAM(capflag)))
+               return false;
+
+       return true;
+}
+
+static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv, bool legacy)
+{
+       const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
+       struct ath_rateset *rateset;
+       u32 phy, capflag = ath_rc_priv->ht_cap;
+       u16 rate_flags;
+       u8 i, j, hi = 0, rate, dot11rate, valid_rate_count;
+
+       if (legacy)
+               rateset = &ath_rc_priv->neg_rates;
+       else
+               rateset = &ath_rc_priv->neg_ht_rates;
 
-       /* Use intersection of working rates and valid rates */
        for (i = 0; i < rateset->rs_nrates; i++) {
                for (j = 0; j < rate_table->rate_cnt; j++) {
-                       u32 phy = rate_table->info[j].phy;
-                       u16 rate_flags = rate_table->info[j].rate_flags;
-                       u8 rate = rateset->rs_rates[i];
-                       u8 dot11rate = rate_table->info[j].dot11rate;
-
-                       if ((rate != dot11rate) || !WLAN_RC_PHY_HT(phy) ||
-                           !(rate_flags & WLAN_RC_CAP_STREAM(capflag)) ||
-                           !WLAN_RC_PHY_HT_VALID(rate_flags, capflag))
+                       phy = rate_table->info[j].phy;
+                       rate_flags = rate_table->info[j].rate_flags;
+                       rate = rateset->rs_rates[i];
+                       dot11rate = rate_table->info[j].dot11rate;
+
+                       if (legacy &&
+                           !ath_rc_check_legacy(rate, dot11rate,
+                                                rate_flags, phy, capflag))
+                               continue;
+
+                       if (!legacy &&
+                           !ath_rc_check_ht(rate, dot11rate,
+                                            rate_flags, phy, capflag))
                                continue;
 
                        if (!ath_rc_valid_phyrate(phy, capflag, 0))
                                continue;
 
-                       ath_rc_priv->valid_phy_rateidx[phy]
-                               [ath_rc_priv->valid_phy_ratecnt[phy]] = j;
+                       valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy];
+                       ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = j;
                        ath_rc_priv->valid_phy_ratecnt[phy] += 1;
-                       ath_rc_set_valid_rate_idx(ath_rc_priv, j, 1);
+                       ath_rc_priv->valid_rate_index[j] = true;
                        hi = max(hi, j);
                }
        }
@@ -598,13 +577,10 @@ static u8 ath_rc_setvalid_htrates(struct ath_rate_priv *ath_rc_priv,
        return hi;
 }
 
-/* Finds the highest rate index we can use */
-static u8 ath_rc_get_highest_rix(struct ath_softc *sc,
-                                struct ath_rate_priv *ath_rc_priv,
-                                const struct ath_rate_table *rate_table,
-                                int *is_probing,
-                                bool legacy)
+static u8 ath_rc_get_highest_rix(struct ath_rate_priv *ath_rc_priv,
+                                int *is_probing)
 {
+       const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
        u32 best_thruput, this_thruput, now_msec;
        u8 rate, next_rate, best_rate, maxindex, minindex;
        int8_t index = 0;
@@ -624,8 +600,6 @@ static u8 ath_rc_get_highest_rix(struct ath_softc *sc,
                u8 per_thres;
 
                rate = ath_rc_priv->valid_rate_index[index];
-               if (legacy && !(rate_table->info[rate].rate_flags & RC_LEGACY))
-                       continue;
                if (rate > ath_rc_priv->rate_max_phy)
                        continue;
 
@@ -707,8 +681,6 @@ static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table,
        rate->count = tries;
        rate->idx = rate_table->info[rix].ratecode;
 
-       if (txrc->short_preamble)
-               rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
        if (txrc->rts || rtsctsenable)
                rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS;
 
@@ -726,37 +698,25 @@ static void ath_rc_rate_set_rtscts(struct ath_softc *sc,
                                   const struct ath_rate_table *rate_table,
                                   struct ieee80211_tx_info *tx_info)
 {
-       struct ieee80211_tx_rate *rates = tx_info->control.rates;
-       int i = 0, rix = 0, cix, enable_g_protection = 0;
+       struct ieee80211_bss_conf *bss_conf;
 
-       /* get the cix for the lowest valid rix */
-       for (i = 3; i >= 0; i--) {
-               if (rates[i].count && (rates[i].idx >= 0)) {
-                       rix = ath_rc_get_rateindex(rate_table, &rates[i]);
-                       break;
-               }
-       }
-       cix = rate_table->info[rix].ctrl_rate;
+       if (!tx_info->control.vif)
+               return;
+       /*
+        * For legacy frames, mac80211 takes care of CTS protection.
+        */
+       if (!(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS))
+               return;
 
-       /* All protection frames are transmited at 2Mb/s for 802.11g,
-        * otherwise we transmit them at 1Mb/s */
-       if (sc->hw->conf.channel->band == IEEE80211_BAND_2GHZ &&
-           !conf_is_ht(&sc->hw->conf))
-               enable_g_protection = 1;
+       bss_conf = &tx_info->control.vif->bss_conf;
+
+       if (!bss_conf->basic_rates)
+               return;
 
        /*
-        * If 802.11g protection is enabled, determine whether to use RTS/CTS or
-        * just CTS.  Note that this is only done for OFDM/HT unicast frames.
+        * For now, use the lowest allowed basic rate for HT frames.
         */
-       if ((tx_info->control.vif &&
-            tx_info->control.vif->bss_conf.use_cts_prot) &&
-           (rate_table->info[rix].phy == WLAN_RC_PHY_OFDM ||
-            WLAN_RC_PHY_HT(rate_table->info[rix].phy))) {
-               rates[0].flags |= IEEE80211_TX_RC_USE_CTS_PROTECT;
-               cix = rate_table->info[enable_g_protection].ctrl_rate;
-       }
-
-       tx_info->control.rts_cts_rate_idx = cix;
+       tx_info->control.rts_cts_rate_idx = __ffs(bss_conf->basic_rates);
 }
 
 static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
@@ -789,14 +749,8 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
        try_per_rate = 4;
 
        rate_table = ath_rc_priv->rate_table;
-       rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table,
-                                    &is_probe, false);
+       rix = ath_rc_get_highest_rix(ath_rc_priv, &is_probe);
 
-       /*
-        * If we're in HT mode and both us and our peer supports LDPC.
-        * We don't need to check our own device's capabilities as our own
-        * ht capabilities would have already been intersected with our peer's.
-        */
        if (conf_is_ht(&sc->hw->conf) &&
            (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
                tx_info->flags |= IEEE80211_TX_CTL_LDPC;
@@ -806,52 +760,45 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
                tx_info->flags |= (1 << IEEE80211_TX_CTL_STBC_SHIFT);
 
        if (is_probe) {
-               /* set one try for probe rates. For the
-                * probes don't enable rts */
+               /*
+                * Set one try for probe rates. For the
+                * probes don't enable RTS.
+                */
                ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
                                       1, rix, 0);
-
-               /* Get the next tried/allowed rate. No RTS for the next series
-                * after the probe rate
+               /*
+                * Get the next tried/allowed rate.
+                * No RTS for the next series after the probe rate.
                 */
-               ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
+               ath_rc_get_lower_rix(ath_rc_priv, rix, &rix);
                ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
                                       try_per_rate, rix, 0);
 
                tx_info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
        } else {
-               /* Set the chosen rate. No RTS for first series entry. */
+               /*
+                * Set the chosen rate. No RTS for first series entry.
+                */
                ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
                                       try_per_rate, rix, 0);
        }
 
-       /* Fill in the other rates for multirate retry */
-       for ( ; i < 3; i++) {
+       for ( ; i < 4; i++) {
+               /*
+                * Use twice the number of tries for the last MRR segment.
+                */
+               if (i + 1 == 4)
+                       try_per_rate = 8;
+
+               ath_rc_get_lower_rix(ath_rc_priv, rix, &rix);
 
-               ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
-               /* All other rates in the series have RTS enabled */
+               /*
+                * All other rates in the series have RTS enabled.
+                */
                ath_rc_rate_set_series(rate_table, &rates[i], txrc,
                                       try_per_rate, rix, 1);
        }
 
-       /* Use twice the number of tries for the last MRR segment. */
-       try_per_rate = 8;
-
-       /*
-        * If the last rate in the rate series is MCS and has
-        * more than 80% of per thresh, then use a legacy rate
-        * as last retry to ensure that the frame is tried in both
-        * MCS and legacy rate.
-        */
-       ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
-       if (WLAN_RC_PHY_HT(rate_table->info[rix].phy) &&
-           (ath_rc_priv->per[rix] > 45))
-               rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table,
-                               &is_probe, true);
-
-       /* All other rates in the series have RTS enabled */
-       ath_rc_rate_set_series(rate_table, &rates[i], txrc,
-                              try_per_rate, rix, 1);
        /*
         * NB:Change rate series to enable aggregation when operating
         * at lower MCS rates. When first rate in series is MCS2
@@ -893,7 +840,6 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
                rates[0].count = ATH_TXMAXTRY;
        }
 
-       /* Setup RTS/CTS */
        ath_rc_rate_set_rtscts(sc, rate_table, tx_info);
 }
 
@@ -1046,9 +992,6 @@ static void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
        stats->per = per;
 }
 
-/* Update PER, RSSI and whatever else that the code thinks it is doing.
-   If you can make sense of all this, you really need to go out more. */
-
 static void ath_rc_update_ht(struct ath_softc *sc,
                             struct ath_rate_priv *ath_rc_priv,
                             struct ieee80211_tx_info *tx_info,
@@ -1077,8 +1020,8 @@ static void ath_rc_update_ht(struct ath_softc *sc,
        if (ath_rc_priv->per[tx_rate] >= 55 && tx_rate > 0 &&
            rate_table->info[tx_rate].ratekbps <=
            rate_table->info[ath_rc_priv->rate_max_phy].ratekbps) {
-               ath_rc_get_lower_rix(rate_table, ath_rc_priv,
-                                    (u8)tx_rate, &ath_rc_priv->rate_max_phy);
+               ath_rc_get_lower_rix(ath_rc_priv, (u8)tx_rate,
+                                    &ath_rc_priv->rate_max_phy);
 
                /* Don't probe for a little while. */
                ath_rc_priv->probe_time = now_msec;
@@ -1122,25 +1065,42 @@ static void ath_rc_update_ht(struct ath_softc *sc,
 
 }
 
+static void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
+{
+       struct ath_rc_stats *stats;
+
+       stats = &rc->rcstats[final_rate];
+       stats->success++;
+}
 
 static void ath_rc_tx_status(struct ath_softc *sc,
                             struct ath_rate_priv *ath_rc_priv,
-                            struct ieee80211_tx_info *tx_info,
-                            int final_ts_idx, int xretries, int long_retry)
+                            struct sk_buff *skb)
 {
-       const struct ath_rate_table *rate_table;
+       struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
        struct ieee80211_tx_rate *rates = tx_info->status.rates;
+       struct ieee80211_tx_rate *rate;
+       int final_ts_idx = 0, xretries = 0, long_retry = 0;
        u8 flags;
        u32 i = 0, rix;
 
-       rate_table = ath_rc_priv->rate_table;
+       for (i = 0; i < sc->hw->max_rates; i++) {
+               rate = &tx_info->status.rates[i];
+               if (rate->idx < 0 || !rate->count)
+                       break;
+
+               final_ts_idx = i;
+               long_retry = rate->count - 1;
+       }
+
+       if (!(tx_info->flags & IEEE80211_TX_STAT_ACK))
+               xretries = 1;
 
        /*
         * If the first rate is not the final index, there
         * are intermediate rate failures to be processed.
         */
        if (final_ts_idx != 0) {
-               /* Process intermediate rates that failed.*/
                for (i = 0; i < final_ts_idx ; i++) {
                        if (rates[i].count != 0 && (rates[i].idx >= 0)) {
                                flags = rates[i].flags;
@@ -1152,32 +1112,24 @@ static void ath_rc_tx_status(struct ath_softc *sc,
                                    !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG))
                                        return;
 
-                               rix = ath_rc_get_rateindex(rate_table, &rates[i]);
+                               rix = ath_rc_get_rateindex(ath_rc_priv, &rates[i]);
                                ath_rc_update_ht(sc, ath_rc_priv, tx_info,
-                                               rix, xretries ? 1 : 2,
-                                               rates[i].count);
+                                                rix, xretries ? 1 : 2,
+                                                rates[i].count);
                        }
                }
-       } else {
-               /*
-                * Handle the special case of MIMO PS burst, where the second
-                * aggregate is sent out with only one rate and one try.
-                * Treating it as an excessive retry penalizes the rate
-                * inordinately.
-                */
-               if (rates[0].count == 1 && xretries == 1)
-                       xretries = 2;
        }
 
-       flags = rates[i].flags;
+       flags = rates[final_ts_idx].flags;
 
        /* If HT40 and we have switched mode from 40 to 20 => don't update */
        if ((flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
            !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG))
                return;
 
-       rix = ath_rc_get_rateindex(rate_table, &rates[i]);
+       rix = ath_rc_get_rateindex(ath_rc_priv, &rates[final_ts_idx]);
        ath_rc_update_ht(sc, ath_rc_priv, tx_info, rix, xretries, long_retry);
+       ath_debug_stat_rc(ath_rc_priv, rix);
 }
 
 static const
@@ -1185,8 +1137,6 @@ struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
                                             enum ieee80211_band band,
                                             bool is_ht)
 {
-       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-
        switch(band) {
        case IEEE80211_BAND_2GHZ:
                if (is_ht)
@@ -1197,34 +1147,25 @@ struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
                        return &ar5416_11na_ratetable;
                return &ar5416_11a_ratetable;
        default:
-               ath_dbg(common, CONFIG, "Invalid band\n");
                return NULL;
        }
 }
 
 static void ath_rc_init(struct ath_softc *sc,
-                       struct ath_rate_priv *ath_rc_priv,
-                       struct ieee80211_supported_band *sband,
-                       struct ieee80211_sta *sta,
-                       const struct ath_rate_table *rate_table)
+                       struct ath_rate_priv *ath_rc_priv)
 {
+       const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
        struct ath_rateset *rateset = &ath_rc_priv->neg_rates;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       struct ath_rateset *ht_mcs = &ath_rc_priv->neg_ht_rates;
        u8 i, j, k, hi = 0, hthi = 0;
 
-       /* Initial rate table size. Will change depending
-        * on the working rate set */
        ath_rc_priv->rate_table_size = RATE_TABLE_SIZE;
 
-       /* Initialize thresholds according to the global rate table */
        for (i = 0 ; i < ath_rc_priv->rate_table_size; i++) {
                ath_rc_priv->per[i] = 0;
+               ath_rc_priv->valid_rate_index[i] = 0;
        }
 
-       /* Determine the valid rates */
-       ath_rc_init_valid_rate_idx(ath_rc_priv);
-
        for (i = 0; i < WLAN_RC_PHY_MAX; i++) {
                for (j = 0; j < RATE_TABLE_SIZE; j++)
                        ath_rc_priv->valid_phy_rateidx[i][j] = 0;
@@ -1232,25 +1173,19 @@ static void ath_rc_init(struct ath_softc *sc,
        }
 
        if (!rateset->rs_nrates) {
-               /* No working rate, just initialize valid rates */
-               hi = ath_rc_init_validrates(ath_rc_priv, rate_table,
-                                           ath_rc_priv->ht_cap);
+               hi = ath_rc_init_validrates(ath_rc_priv);
        } else {
-               /* Use intersection of working rates and valid rates */
-               hi = ath_rc_setvalid_rates(ath_rc_priv, rate_table,
-                                          rateset, ath_rc_priv->ht_cap);
-               if (ath_rc_priv->ht_cap & WLAN_RC_HT_FLAG) {
-                       hthi = ath_rc_setvalid_htrates(ath_rc_priv,
-                                                      rate_table,
-                                                      ht_mcs,
-                                                      ath_rc_priv->ht_cap);
-               }
+               hi = ath_rc_setvalid_rates(ath_rc_priv, true);
+
+               if (ath_rc_priv->ht_cap & WLAN_RC_HT_FLAG)
+                       hthi = ath_rc_setvalid_rates(ath_rc_priv, false);
+
                hi = max(hi, hthi);
        }
 
        ath_rc_priv->rate_table_size = hi + 1;
        ath_rc_priv->rate_max_phy = 0;
-       BUG_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
+       WARN_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
 
        for (i = 0, k = 0; i < WLAN_RC_PHY_MAX; i++) {
                for (j = 0; j < ath_rc_priv->valid_phy_ratecnt[i]; j++) {
@@ -1258,28 +1193,26 @@ static void ath_rc_init(struct ath_softc *sc,
                                ath_rc_priv->valid_phy_rateidx[i][j];
                }
 
-               if (!ath_rc_valid_phyrate(i, rate_table->initial_ratemax, 1)
-                   || !ath_rc_priv->valid_phy_ratecnt[i])
+               if (!ath_rc_valid_phyrate(i, rate_table->initial_ratemax, 1) ||
+                   !ath_rc_priv->valid_phy_ratecnt[i])
                        continue;
 
                ath_rc_priv->rate_max_phy = ath_rc_priv->valid_phy_rateidx[i][j-1];
        }
-       BUG_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
-       BUG_ON(k > RATE_TABLE_SIZE);
+       WARN_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
+       WARN_ON(k > RATE_TABLE_SIZE);
 
        ath_rc_priv->max_valid_rate = k;
-       ath_rc_sort_validrates(rate_table, ath_rc_priv);
+       ath_rc_sort_validrates(ath_rc_priv);
        ath_rc_priv->rate_max_phy = (k > 4) ?
-                                       ath_rc_priv->valid_rate_index[k-4] :
-                                       ath_rc_priv->valid_rate_index[k-1];
-       ath_rc_priv->rate_table = rate_table;
+               ath_rc_priv->valid_rate_index[k-4] :
+               ath_rc_priv->valid_rate_index[k-1];
 
        ath_dbg(common, CONFIG, "RC Initialized with capabilities: 0x%x\n",
                ath_rc_priv->ht_cap);
 }
 
-static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta,
-                              bool is_cw40, bool is_sgi)
+static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta)
 {
        u8 caps = 0;
 
@@ -1289,10 +1222,14 @@ static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta,
                        caps |= WLAN_RC_TS_FLAG | WLAN_RC_DS_FLAG;
                else if (sta->ht_cap.mcs.rx_mask[1])
                        caps |= WLAN_RC_DS_FLAG;
-               if (is_cw40)
+               if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
                        caps |= WLAN_RC_40_FLAG;
-               if (is_sgi)
-                       caps |= WLAN_RC_SGI_FLAG;
+                       if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+                               caps |= WLAN_RC_SGI_FLAG;
+               } else {
+                       if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
+                               caps |= WLAN_RC_SGI_FLAG;
+               }
        }
 
        return caps;
@@ -1319,15 +1256,6 @@ static bool ath_tx_aggr_check(struct ath_softc *sc, struct ieee80211_sta *sta,
 /* mac80211 Rate Control callbacks */
 /***********************************/
 
-static void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
-{
-       struct ath_rc_stats *stats;
-
-       stats = &rc->rcstats[final_rate];
-       stats->success++;
-}
-
-
 static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
                          struct ieee80211_sta *sta, void *priv_sta,
                          struct sk_buff *skb)
@@ -1335,22 +1263,8 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
        struct ath_softc *sc = priv;
        struct ath_rate_priv *ath_rc_priv = priv_sta;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_hdr *hdr;
-       int final_ts_idx = 0, tx_status = 0;
-       int long_retry = 0;
-       __le16 fc;
-       int i;
-
-       hdr = (struct ieee80211_hdr *)skb->data;
-       fc = hdr->frame_control;
-       for (i = 0; i < sc->hw->max_rates; i++) {
-               struct ieee80211_tx_rate *rate = &tx_info->status.rates[i];
-               if (rate->idx < 0 || !rate->count)
-                       break;
-
-               final_ts_idx = i;
-               long_retry = rate->count - 1;
-       }
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       __le16 fc = hdr->frame_control;
 
        if (!priv_sta || !ieee80211_is_data(fc))
                return;
@@ -1363,11 +1277,7 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
        if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED)
                return;
 
-       if (!(tx_info->flags & IEEE80211_TX_STAT_ACK))
-               tx_status = 1;
-
-       ath_rc_tx_status(sc, ath_rc_priv, tx_info, final_ts_idx, tx_status,
-                        long_retry);
+       ath_rc_tx_status(sc, ath_rc_priv, skb);
 
        /* Check if aggregation has to be enabled for this tid */
        if (conf_is_ht(&sc->hw->conf) &&
@@ -1383,19 +1293,14 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
                                ieee80211_start_tx_ba_session(sta, tid, 0);
                }
        }
-
-       ath_debug_stat_rc(ath_rc_priv,
-               ath_rc_get_rateindex(ath_rc_priv->rate_table,
-                       &tx_info->status.rates[final_ts_idx]));
 }
 
 static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
                           struct ieee80211_sta *sta, void *priv_sta)
 {
        struct ath_softc *sc = priv;
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_rate_priv *ath_rc_priv = priv_sta;
-       const struct ath_rate_table *rate_table;
-       bool is_cw40, is_sgi = false;
        int i, j = 0;
 
        for (i = 0; i < sband->n_bitrates; i++) {
@@ -1417,20 +1322,15 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
                ath_rc_priv->neg_ht_rates.rs_nrates = j;
        }
 
-       is_cw40 = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40);
-
-       if (is_cw40)
-               is_sgi = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
-       else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
-               is_sgi = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
-
-       /* Choose rate table first */
-
-       rate_table = ath_choose_rate_table(sc, sband->band,
-                             sta->ht_cap.ht_supported);
+       ath_rc_priv->rate_table = ath_choose_rate_table(sc, sband->band,
+                                                       sta->ht_cap.ht_supported);
+       if (!ath_rc_priv->rate_table) {
+               ath_err(common, "No rate table chosen\n");
+               return;
+       }
 
-       ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta, is_cw40, is_sgi);
-       ath_rc_init(sc, priv_sta, sband, sta, rate_table);
+       ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta);
+       ath_rc_init(sc, priv_sta);
 }
 
 static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
@@ -1439,40 +1339,14 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
 {
        struct ath_softc *sc = priv;
        struct ath_rate_priv *ath_rc_priv = priv_sta;
-       const struct ath_rate_table *rate_table = NULL;
-       bool oper_cw40 = false, oper_sgi;
-       bool local_cw40 = !!(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG);
-       bool local_sgi = !!(ath_rc_priv->ht_cap & WLAN_RC_SGI_FLAG);
-
-       /* FIXME: Handle AP mode later when we support CWM */
 
        if (changed & IEEE80211_RC_BW_CHANGED) {
-               if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
-                       return;
-
-               if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
-                       oper_cw40 = true;
-
-               if (oper_cw40)
-                       oper_sgi = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
-                                  true : false;
-               else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
-                       oper_sgi = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
-                                  true : false;
-               else
-                       oper_sgi = false;
-
-               if ((local_cw40 != oper_cw40) || (local_sgi != oper_sgi)) {
-                       rate_table = ath_choose_rate_table(sc, sband->band,
-                                                  sta->ht_cap.ht_supported);
-                       ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta,
-                                                  oper_cw40, oper_sgi);
-                       ath_rc_init(sc, priv_sta, sband, sta, rate_table);
-
-                       ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG,
-                               "Operating HT Bandwidth changed to: %d\n",
-                               sc->hw->conf.channel_type);
-               }
+               ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta);
+               ath_rc_init(sc, priv_sta);
+
+               ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG,
+                       "Operating HT Bandwidth changed to: %d\n",
+                       sc->hw->conf.channel_type);
        }
 }
 
@@ -1484,7 +1358,7 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
        struct ath_rate_priv *rc = file->private_data;
        char *buf;
        unsigned int len = 0, max;
-       int i = 0;
+       int rix;
        ssize_t retval;
 
        if (rc->rate_table == NULL)
@@ -1500,7 +1374,8 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
                       "HT", "MCS", "Rate",
                       "Success", "Retries", "XRetries", "PER");
 
-       for (i = 0; i < rc->rate_table_size; i++) {
+       for (rix = 0; rix < rc->max_valid_rate; rix++) {
+               u8 i = rc->valid_rate_index[rix];
                u32 ratekbps = rc->rate_table->info[i].ratekbps;
                struct ath_rc_stats *stats = &rc->rcstats[i];
                char mcs[5];
index 75f8e9b06b2859d2866f16653c3a22913ee6baf5..268e67dc5fb2d945a26a332081b32167d9e133dc 100644 (file)
@@ -160,10 +160,6 @@ struct ath_rate_table {
                u32 user_ratekbps;
                u8 ratecode;
                u8 dot11rate;
-               u8 ctrl_rate;
-               u8 cw40index;
-               u8 sgi_index;
-               u8 ht_index;
        } info[RATE_TABLE_SIZE];
        u32 probe_interval;
        u8 initial_ratemax;
index 4480c0cc655f6f6ffca73774b2178e748936155e..83d16e7ed27239bfa4734a2b1b66e98b2800dbf4 100644 (file)
@@ -424,8 +424,8 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
                rfilt |= ATH9K_RX_FILTER_COMP_BAR;
 
        if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
-               /* The following may also be needed for other older chips */
-               if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160)
+               /* This is needed for older chips */
+               if (sc->sc_ah->hw_version.macVersion <= AR_SREV_VERSION_9160)
                        rfilt |= ATH9K_RX_FILTER_PROM;
                rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
        }
index 87cac8eb78349f1abb6321f4d1f552769726bb20..4e6760f8596d2dc07543ab36d419c1977c3c95ec 100644 (file)
 #define AR_SREV_REVISION_9580_10       4 /* AR9580 1.0 */
 #define AR_SREV_VERSION_9462           0x280
 #define AR_SREV_REVISION_9462_20       2
+#define AR_SREV_VERSION_9565            0x2C0
+#define AR_SREV_REVISION_9565_10        0
 #define AR_SREV_VERSION_9550           0x400
 
 #define AR_SREV_5416(_ah) \
        (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
        ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_20))
 
+#define AR_SREV_9565(_ah) \
+       (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565))
+
+#define AR_SREV_9565_10(_ah) \
+       (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565) && \
+        ((_ah)->hw_version.macRev == AR_SREV_REVISION_9565_10))
+
 #define AR_SREV_9550(_ah) \
        (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9550))
 
index 44a08eb53c62bc74baed99b25176d554571a5f58..a483d518758cfe4d57f1b38c8e8812b3ce3d8a74 100644 (file)
@@ -497,7 +497,7 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
 
        REG_RMW(ah, AR_PCIE_PM_CTRL, set, clr);
 
-       if (AR_SREV_9462(ah)) {
+       if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
                /*
                 * this is needed to prevent the chip waking up
                 * the host within 3-4 seconds with certain
index 0d4155aec48d72196d5c64eee5c2517766760632..36618e3a5e609831184b973e30b3a9aa739522f3 100644 (file)
@@ -568,7 +568,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
                if (!an->sleeping) {
                        ath_tx_queue_tid(txq, tid);
 
-                       if (ts->ts_status & ATH9K_TXERR_FILT)
+                       if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
                                tid->ac->clear_ps_filter = true;
                }
        }
@@ -1773,11 +1773,12 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
        TX_STAT_INC(txq->axq_qnum, queued);
 }
 
-static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
+static void setup_frame_info(struct ieee80211_hw *hw,
+                            struct ieee80211_sta *sta,
+                            struct sk_buff *skb,
                             int framelen)
 {
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_sta *sta = tx_info->control.sta;
        struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        const struct ieee80211_rate *rate;
@@ -1819,10 +1820,14 @@ u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
 {
        struct ath_hw *ah = sc->sc_ah;
        struct ath9k_channel *curchan = ah->curchan;
+
        if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
            (curchan->channelFlags & CHANNEL_5GHZ) &&
            (chainmask == 0x7) && (rate < 0x90))
                return 0x3;
+       else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) &&
+                IS_CCK_RATE(rate))
+               return 0x2;
        else
                return chainmask;
 }
@@ -1935,7 +1940,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_sta *sta = info->control.sta;
+       struct ieee80211_sta *sta = txctl->sta;
        struct ieee80211_vif *vif = info->control.vif;
        struct ath_softc *sc = hw->priv;
        struct ath_txq *txq = txctl->txq;
@@ -1979,7 +1984,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
            !ieee80211_is_data(hdr->frame_control))
                info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
 
-       setup_frame_info(hw, skb, frmlen);
+       setup_frame_info(hw, sta, skb, frmlen);
 
        /*
         * At this point, the vif, hw_key and sta pointers in the tx control
index 376be11161c0bdaaa310205523ebf3d61ba1bdcd..2aa4a59c72c87d4045de3f405d697e0067cf71da 100644 (file)
@@ -425,6 +425,7 @@ struct ar9170 {
        bool rx_has_plcp;
        struct sk_buff *rx_failover;
        int rx_failover_missing;
+       u32 ampdu_ref;
 
        /* FIFO for collecting outstanding BlockAckRequest */
        struct list_head bar_list[__AR9170_NUM_TXQ];
@@ -577,7 +578,9 @@ void carl9170_rx(struct ar9170 *ar, void *buf, unsigned int len);
 void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len);
 
 /* TX */
-void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+void carl9170_op_tx(struct ieee80211_hw *hw,
+                   struct ieee80211_tx_control *control,
+                   struct sk_buff *skb);
 void carl9170_tx_janitor(struct work_struct *work);
 void carl9170_tx_process_status(struct ar9170 *ar,
                                const struct carl9170_rsp *cmd);
index c5ca6f1f5836c26f1360fde607e02decaca340d0..24ac2876a7337ad2a015f69a4f273165d3d8d7f0 100644 (file)
@@ -341,6 +341,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
                if (SUPP(CARL9170FW_WLANTX_CAB)) {
                        if_comb_types |=
                                BIT(NL80211_IFTYPE_AP) |
+                               BIT(NL80211_IFTYPE_MESH_POINT) |
                                BIT(NL80211_IFTYPE_P2P_GO);
                }
        }
index 53415bfd8bef7e673749904369775520ff74beae..e3b1b6e87760ed0e70ca77dfcb23ae3e4b9df982 100644 (file)
@@ -304,7 +304,8 @@ int carl9170_set_operating_mode(struct ar9170 *ar)
        struct ath_common *common = &ar->common;
        u8 *mac_addr, *bssid;
        u32 cam_mode = AR9170_MAC_CAM_DEFAULTS;
-       u32 enc_mode = AR9170_MAC_ENCRYPTION_DEFAULTS;
+       u32 enc_mode = AR9170_MAC_ENCRYPTION_DEFAULTS |
+               AR9170_MAC_ENCRYPTION_MGMT_RX_SOFTWARE;
        u32 rx_ctrl = AR9170_MAC_RX_CTRL_DEAGG |
                      AR9170_MAC_RX_CTRL_SHORT_FILTER;
        u32 sniffer = AR9170_MAC_SNIFFER_DEFAULTS;
@@ -318,10 +319,10 @@ int carl9170_set_operating_mode(struct ar9170 *ar)
                bssid = common->curbssid;
 
                switch (vif->type) {
-               case NL80211_IFTYPE_MESH_POINT:
                case NL80211_IFTYPE_ADHOC:
                        cam_mode |= AR9170_MAC_CAM_IBSS;
                        break;
+               case NL80211_IFTYPE_MESH_POINT:
                case NL80211_IFTYPE_AP:
                        cam_mode |= AR9170_MAC_CAM_AP;
 
index 858e58dfc4dc67176f7d9693d7babb598ea6980e..67997b39aba79f0d14c47ffdbe4e85248bece20b 100644 (file)
@@ -616,10 +616,12 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw,
 
                        goto unlock;
 
+               case NL80211_IFTYPE_MESH_POINT:
                case NL80211_IFTYPE_AP:
                        if ((vif->type == NL80211_IFTYPE_STATION) ||
                            (vif->type == NL80211_IFTYPE_WDS) ||
-                           (vif->type == NL80211_IFTYPE_AP))
+                           (vif->type == NL80211_IFTYPE_AP) ||
+                           (vif->type == NL80211_IFTYPE_MESH_POINT))
                                break;
 
                        err = -EBUSY;
@@ -1147,6 +1149,7 @@ static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                break;
        case WLAN_CIPHER_SUITE_CCMP:
                ktype = AR9170_ENC_ALG_AESCCMP;
+               key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
                break;
        default:
                return -EOPNOTSUPP;
@@ -1778,6 +1781,7 @@ void *carl9170_alloc(size_t priv_size)
        hw->wiphy->interface_modes = 0;
 
        hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
+                    IEEE80211_HW_MFP_CAPABLE |
                     IEEE80211_HW_REPORTS_TX_ACK_STATUS |
                     IEEE80211_HW_SUPPORTS_PS |
                     IEEE80211_HW_PS_NULLFUNC_STACK |
index 6f6a34155667d0da84fdb5c9d63824ce7d1ac37b..a0b72307854799b81c36649d916da2940be90ad6 100644 (file)
@@ -206,6 +206,7 @@ void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
 
                case NL80211_IFTYPE_AP:
                case NL80211_IFTYPE_ADHOC:
+               case NL80211_IFTYPE_MESH_POINT:
                        carl9170_update_beacon(ar, true);
                        break;
 
@@ -623,7 +624,8 @@ static void carl9170_ba_check(struct ar9170 *ar, void *data, unsigned int len)
 #undef TID_CHECK
 }
 
-static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms)
+static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms,
+                                struct ieee80211_rx_status *rx_status)
 {
        __le16 fc;
 
@@ -636,6 +638,9 @@ static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms)
                return true;
        }
 
+       rx_status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
+       rx_status->ampdu_reference = ar->ampdu_ref;
+
        /*
         * "802.11n - 7.4a.3 A-MPDU contents" describes in which contexts
         * certain frame types can be part of an aMPDU.
@@ -684,12 +689,15 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
        if (unlikely(len < sizeof(*mac)))
                goto drop;
 
+       memset(&status, 0, sizeof(status));
+
        mpdu_len = len - sizeof(*mac);
 
        mac = (void *)(buf + mpdu_len);
        mac_status = mac->status;
        switch (mac_status & AR9170_RX_STATUS_MPDU) {
        case AR9170_RX_STATUS_MPDU_FIRST:
+               ar->ampdu_ref++;
                /* Aggregated MPDUs start with an PLCP header */
                if (likely(mpdu_len >= sizeof(struct ar9170_rx_head))) {
                        head = (void *) buf;
@@ -720,12 +728,13 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
                break;
 
        case AR9170_RX_STATUS_MPDU_LAST:
+               status.flag |= RX_FLAG_AMPDU_IS_LAST;
+
                /*
                 * The last frame of an A-MPDU has an extra tail
                 * which does contain the phy status of the whole
                 * aggregate.
                 */
-
                if (likely(mpdu_len >= sizeof(struct ar9170_rx_phystatus))) {
                        mpdu_len -= sizeof(struct ar9170_rx_phystatus);
                        phy = (void *)(buf + mpdu_len);
@@ -773,11 +782,10 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
        if (unlikely(mpdu_len < (2 + 2 + ETH_ALEN + FCS_LEN)))
                goto drop;
 
-       memset(&status, 0, sizeof(status));
        if (unlikely(carl9170_rx_mac_status(ar, head, mac, &status)))
                goto drop;
 
-       if (!carl9170_ampdu_check(ar, buf, mac_status))
+       if (!carl9170_ampdu_check(ar, buf, mac_status, &status))
                goto drop;
 
        if (phy)
index 6a8681407a1de93373072d5b66dbb39238683804..84377cf580e06a29a69d4ca469ed5736f4aa9936 100644 (file)
@@ -867,14 +867,15 @@ static bool carl9170_tx_cts_check(struct ar9170 *ar,
        return false;
 }
 
-static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
+static int carl9170_tx_prepare(struct ar9170 *ar,
+                              struct ieee80211_sta *sta,
+                              struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr;
        struct _carl9170_tx_superframe *txc;
        struct carl9170_vif_info *cvif;
        struct ieee80211_tx_info *info;
        struct ieee80211_tx_rate *txrate;
-       struct ieee80211_sta *sta;
        struct carl9170_tx_info *arinfo;
        unsigned int hw_queue;
        int i;
@@ -910,8 +911,6 @@ static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
        else
                cvif = NULL;
 
-       sta = info->control.sta;
-
        txc = (void *)skb_push(skb, sizeof(*txc));
        memset(txc, 0, sizeof(*txc));
 
@@ -1457,20 +1456,21 @@ err_unlock_rcu:
        return false;
 }
 
-void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void carl9170_op_tx(struct ieee80211_hw *hw,
+                   struct ieee80211_tx_control *control,
+                   struct sk_buff *skb)
 {
        struct ar9170 *ar = hw->priv;
        struct ieee80211_tx_info *info;
-       struct ieee80211_sta *sta;
+       struct ieee80211_sta *sta = control->sta;
        bool run;
 
        if (unlikely(!IS_STARTED(ar)))
                goto err_free;
 
        info = IEEE80211_SKB_CB(skb);
-       sta = info->control.sta;
 
-       if (unlikely(carl9170_tx_prepare(ar, skb)))
+       if (unlikely(carl9170_tx_prepare(ar, sta, skb)))
                goto err_free;
 
        carl9170_tx_accounting(ar, skb);
index 4648bbf76abcb617d97d5a40aa4a4255b1228c74..098fe9ee7096958a73093c2cbe18d7b37216e556 100644 (file)
@@ -4,6 +4,7 @@ b43-y                           += tables.o
 b43-$(CONFIG_B43_PHY_N)                += tables_nphy.o
 b43-$(CONFIG_B43_PHY_N)                += radio_2055.o
 b43-$(CONFIG_B43_PHY_N)                += radio_2056.o
+b43-$(CONFIG_B43_PHY_N)                += radio_2057.o
 b43-y                          += phy_common.o
 b43-y                          += phy_g.o
 b43-y                          += phy_a.o
index 7c899fc7ddd0ea1bced38d801b4eb18d0e1341f6..b298e5d68be2f0a58cf02d45d2ccd9a1e1e464bd 100644 (file)
@@ -241,16 +241,18 @@ enum {
 #define B43_SHM_SH_PHYVER              0x0050  /* PHY version */
 #define B43_SHM_SH_PHYTYPE             0x0052  /* PHY type */
 #define B43_SHM_SH_ANTSWAP             0x005C  /* Antenna swap threshold */
-#define B43_SHM_SH_HOSTFLO             0x005E  /* Hostflags for ucode options (low) */
-#define B43_SHM_SH_HOSTFMI             0x0060  /* Hostflags for ucode options (middle) */
-#define B43_SHM_SH_HOSTFHI             0x0062  /* Hostflags for ucode options (high) */
+#define B43_SHM_SH_HOSTF1              0x005E  /* Hostflags 1 for ucode options */
+#define B43_SHM_SH_HOSTF2              0x0060  /* Hostflags 2 for ucode options */
+#define B43_SHM_SH_HOSTF3              0x0062  /* Hostflags 3 for ucode options */
 #define B43_SHM_SH_RFATT               0x0064  /* Current radio attenuation value */
 #define B43_SHM_SH_RADAR               0x0066  /* Radar register */
 #define B43_SHM_SH_PHYTXNOI            0x006E  /* PHY noise directly after TX (lower 8bit only) */
 #define B43_SHM_SH_RFRXSP1             0x0072  /* RF RX SP Register 1 */
+#define B43_SHM_SH_HOSTF4              0x0078  /* Hostflags 4 for ucode options */
 #define B43_SHM_SH_CHAN                        0x00A0  /* Current channel (low 8bit only) */
 #define  B43_SHM_SH_CHAN_5GHZ          0x0100  /* Bit set, if 5 Ghz channel */
 #define  B43_SHM_SH_CHAN_40MHZ         0x0200  /* Bit set, if 40 Mhz channel width */
+#define B43_SHM_SH_HOSTF5              0x00D4  /* Hostflags 5 for ucode options */
 #define B43_SHM_SH_BCMCFIFOID          0x0108  /* Last posted cookie to the bcast/mcast FIFO */
 /* TSSI information */
 #define B43_SHM_SH_TSSI_CCK            0x0058  /* TSSI for last 4 CCK frames (32bit) */
@@ -415,6 +417,8 @@ enum {
 #define B43_PHYTYPE_HT                 0x07
 #define B43_PHYTYPE_LCN                        0x08
 #define B43_PHYTYPE_LCNXN              0x09
+#define B43_PHYTYPE_LCN40              0x0a
+#define B43_PHYTYPE_AC                 0x0b
 
 /* PHYRegisters */
 #define B43_PHY_ILT_A_CTRL             0x0072
index a140165dfee0515b70263cddf6e7902f4171de48..73730e94e0ac79fdbdf257f1cf969179e1c62a46 100644 (file)
@@ -533,11 +533,11 @@ u64 b43_hf_read(struct b43_wldev *dev)
 {
        u64 ret;
 
-       ret = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI);
+       ret = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF3);
        ret <<= 16;
-       ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFMI);
+       ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF2);
        ret <<= 16;
-       ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO);
+       ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF1);
 
        return ret;
 }
@@ -550,9 +550,9 @@ void b43_hf_write(struct b43_wldev *dev, u64 value)
        lo = (value & 0x00000000FFFFULL);
        mi = (value & 0x0000FFFF0000ULL) >> 16;
        hi = (value & 0xFFFF00000000ULL) >> 32;
-       b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO, lo);
-       b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFMI, mi);
-       b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI, hi);
+       b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF1, lo);
+       b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF2, mi);
+       b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF3, hi);
 }
 
 /* Read the firmware capabilities bitmask (Opensource firmware only) */
@@ -3412,7 +3412,8 @@ static void b43_tx_work(struct work_struct *work)
 }
 
 static void b43_op_tx(struct ieee80211_hw *hw,
-                    struct sk_buff *skb)
+                     struct ieee80211_tx_control *control,
+                     struct sk_buff *skb)
 {
        struct b43_wl *wl = hw_to_b43_wl(hw);
 
@@ -4282,6 +4283,35 @@ out:
        return err;
 }
 
+static char *b43_phy_name(struct b43_wldev *dev, u8 phy_type)
+{
+       switch (phy_type) {
+       case B43_PHYTYPE_A:
+               return "A";
+       case B43_PHYTYPE_B:
+               return "B";
+       case B43_PHYTYPE_G:
+               return "G";
+       case B43_PHYTYPE_N:
+               return "N";
+       case B43_PHYTYPE_LP:
+               return "LP";
+       case B43_PHYTYPE_SSLPN:
+               return "SSLPN";
+       case B43_PHYTYPE_HT:
+               return "HT";
+       case B43_PHYTYPE_LCN:
+               return "LCN";
+       case B43_PHYTYPE_LCNXN:
+               return "LCNXN";
+       case B43_PHYTYPE_LCN40:
+               return "LCN40";
+       case B43_PHYTYPE_AC:
+               return "AC";
+       }
+       return "UNKNOWN";
+}
+
 /* Get PHY and RADIO versioning numbers */
 static int b43_phy_versioning(struct b43_wldev *dev)
 {
@@ -4342,13 +4372,13 @@ static int b43_phy_versioning(struct b43_wldev *dev)
                unsupported = 1;
        }
        if (unsupported) {
-               b43err(dev->wl, "FOUND UNSUPPORTED PHY "
-                      "(Analog %u, Type %u, Revision %u)\n",
-                      analog_type, phy_type, phy_rev);
+               b43err(dev->wl, "FOUND UNSUPPORTED PHY (Analog %u, Type %d (%s), Revision %u)\n",
+                      analog_type, phy_type, b43_phy_name(dev, phy_type),
+                      phy_rev);
                return -EOPNOTSUPP;
        }
-       b43dbg(dev->wl, "Found PHY: Analog %u, Type %u, Revision %u\n",
-              analog_type, phy_type, phy_rev);
+       b43info(dev->wl, "Found PHY: Analog %u, Type %d (%s), Revision %u\n",
+               analog_type, phy_type, b43_phy_name(dev, phy_type), phy_rev);
 
        /* Get RADIO versioning */
        if (dev->dev->core_rev >= 24) {
index 3f8883b14d9cc98ca334890541320aa50836d36e..f01676ac481b25071e9f7aae9ad790dbd5836ff0 100644 (file)
@@ -240,6 +240,21 @@ void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set)
                          (b43_radio_read16(dev, offset) & mask) | set);
 }
 
+bool b43_radio_wait_value(struct b43_wldev *dev, u16 offset, u16 mask,
+                         u16 value, int delay, int timeout)
+{
+       u16 val;
+       int i;
+
+       for (i = 0; i < timeout; i += delay) {
+               val = b43_radio_read(dev, offset);
+               if ((val & mask) == value)
+                       return true;
+               udelay(delay);
+       }
+       return false;
+}
+
 u16 b43_phy_read(struct b43_wldev *dev, u16 reg)
 {
        assert_mac_suspended(dev);
@@ -428,7 +443,7 @@ int b43_phy_shm_tssi_read(struct b43_wldev *dev, u16 shm_offset)
        average = (a + b + c + d + 2) / 4;
        if (is_ofdm) {
                /* Adjust for CCK-boost */
-               if (b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO)
+               if (b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF1)
                    & B43_HF_CCKBOOST)
                        average = (average >= 13) ? (average - 13) : 0;
        }
index 9233b13fc16d8a205eb474a3870f59bc3b6b7e8c..f1b999349876bbfc8cad799858434f5e64a14b37 100644 (file)
@@ -364,6 +364,12 @@ void b43_radio_set(struct b43_wldev *dev, u16 offset, u16 set);
  */
 void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set);
 
+/**
+ * b43_radio_wait_value - Waits for a given value in masked register read
+ */
+bool b43_radio_wait_value(struct b43_wldev *dev, u16 offset, u16 mask,
+                         u16 value, int delay, int timeout);
+
 /**
  * b43_radio_lock - Lock firmware radio register access
  */
index b92bb9c92ad1bb3a228f9e0fbf806da9686c6a5f..3c35382ee6c23ebfbaed8d1dcfdf33e804faec96 100644 (file)
@@ -32,6 +32,7 @@
 #include "tables_nphy.h"
 #include "radio_2055.h"
 #include "radio_2056.h"
+#include "radio_2057.h"
 #include "main.h"
 
 struct nphy_txgains {
@@ -126,6 +127,46 @@ ok:
        b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
 }
 
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverrideRev7 */
+static void b43_nphy_rf_control_override_rev7(struct b43_wldev *dev, u16 field,
+                                             u16 value, u8 core, bool off,
+                                             u8 override)
+{
+       const struct nphy_rf_control_override_rev7 *e;
+       u16 en_addrs[3][2] = {
+               { 0x0E7, 0x0EC }, { 0x342, 0x343 }, { 0x346, 0x347 }
+       };
+       u16 en_addr;
+       u16 en_mask = field;
+       u16 val_addr;
+       u8 i;
+
+       /* Remember: we can get NULL! */
+       e = b43_nphy_get_rf_ctl_over_rev7(dev, field, override);
+
+       for (i = 0; i < 2; i++) {
+               if (override >= ARRAY_SIZE(en_addrs)) {
+                       b43err(dev->wl, "Invalid override value %d\n", override);
+                       return;
+               }
+               en_addr = en_addrs[override][i];
+
+               val_addr = (i == 0) ? e->val_addr_core0 : e->val_addr_core1;
+
+               if (off) {
+                       b43_phy_mask(dev, en_addr, ~en_mask);
+                       if (e) /* Do it safer, better than wl */
+                               b43_phy_mask(dev, val_addr, ~e->val_mask);
+               } else {
+                       if (!core || (core & (1 << i))) {
+                               b43_phy_set(dev, en_addr, en_mask);
+                               if (e)
+                                       b43_phy_maskset(dev, val_addr, ~e->val_mask, (value << e->val_shift));
+                       }
+               }
+       }
+}
+
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */
 static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
                                                u16 value, u8 core, bool off)
@@ -458,6 +499,137 @@ static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
                b43_nphy_stay_in_carrier_search(dev, false);
 }
 
+/**************************************************
+ * Radio 0x2057
+ **************************************************/
+
+/* http://bcm-v4.sipsolutions.net/PHY/radio2057_rcal */
+static u8 b43_radio_2057_rcal(struct b43_wldev *dev)
+{
+       struct b43_phy *phy = &dev->phy;
+       u16 tmp;
+
+       if (phy->radio_rev == 5) {
+               b43_phy_mask(dev, 0x342, ~0x2);
+               udelay(10);
+               b43_radio_set(dev, R2057_IQTEST_SEL_PU, 0x1);
+               b43_radio_maskset(dev, 0x1ca, ~0x2, 0x1);
+       }
+
+       b43_radio_set(dev, R2057_RCAL_CONFIG, 0x1);
+       udelay(10);
+       b43_radio_set(dev, R2057_RCAL_CONFIG, 0x3);
+       if (!b43_radio_wait_value(dev, R2057_RCCAL_N1_1, 1, 1, 100, 1000000)) {
+               b43err(dev->wl, "Radio 0x2057 rcal timeout\n");
+               return 0;
+       }
+       b43_radio_mask(dev, R2057_RCAL_CONFIG, ~0x2);
+       tmp = b43_radio_read(dev, R2057_RCAL_STATUS) & 0x3E;
+       b43_radio_mask(dev, R2057_RCAL_CONFIG, ~0x1);
+
+       if (phy->radio_rev == 5) {
+               b43_radio_mask(dev, R2057_IPA2G_CASCONV_CORE0, ~0x1);
+               b43_radio_mask(dev, 0x1ca, ~0x2);
+       }
+       if (phy->radio_rev <= 4 || phy->radio_rev == 6) {
+               b43_radio_maskset(dev, R2057_TEMPSENSE_CONFIG, ~0x3C, tmp);
+               b43_radio_maskset(dev, R2057_BANDGAP_RCAL_TRIM, ~0xF0,
+                                 tmp << 2);
+       }
+
+       return tmp & 0x3e;
+}
+
+/* http://bcm-v4.sipsolutions.net/PHY/radio2057_rccal */
+static u16 b43_radio_2057_rccal(struct b43_wldev *dev)
+{
+       struct b43_phy *phy = &dev->phy;
+       bool special = (phy->radio_rev == 3 || phy->radio_rev == 4 ||
+                       phy->radio_rev == 6);
+       u16 tmp;
+
+       if (special) {
+               b43_radio_write(dev, R2057_RCCAL_MASTER, 0x61);
+               b43_radio_write(dev, R2057_RCCAL_TRC0, 0xC0);
+       } else {
+               b43_radio_write(dev, 0x1AE, 0x61);
+               b43_radio_write(dev, R2057_RCCAL_TRC0, 0xE1);
+       }
+       b43_radio_write(dev, R2057_RCCAL_X1, 0x6E);
+       b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x55);
+       if (!b43_radio_wait_value(dev, R2057_RCCAL_DONE_OSCCAP, 1, 1, 500,
+                                 5000000))
+               b43dbg(dev->wl, "Radio 0x2057 rccal timeout\n");
+       b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x15);
+       if (special) {
+               b43_radio_write(dev, R2057_RCCAL_MASTER, 0x69);
+               b43_radio_write(dev, R2057_RCCAL_TRC0, 0xB0);
+       } else {
+               b43_radio_write(dev, 0x1AE, 0x69);
+               b43_radio_write(dev, R2057_RCCAL_TRC0, 0xD5);
+       }
+       b43_radio_write(dev, R2057_RCCAL_X1, 0x6E);
+       b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x55);
+       if (!b43_radio_wait_value(dev, R2057_RCCAL_DONE_OSCCAP, 1, 1, 500,
+                                 5000000))
+               b43dbg(dev->wl, "Radio 0x2057 rccal timeout\n");
+       b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x15);
+       if (special) {
+               b43_radio_write(dev, R2057_RCCAL_MASTER, 0x73);
+               b43_radio_write(dev, R2057_RCCAL_X1, 0x28);
+               b43_radio_write(dev, R2057_RCCAL_TRC0, 0xB0);
+       } else {
+               b43_radio_write(dev, 0x1AE, 0x73);
+               b43_radio_write(dev, R2057_RCCAL_X1, 0x6E);
+               b43_radio_write(dev, R2057_RCCAL_TRC0, 0x99);
+       }
+       b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x55);
+       if (!b43_radio_wait_value(dev, R2057_RCCAL_DONE_OSCCAP, 1, 1, 500,
+                                 5000000)) {
+               b43err(dev->wl, "Radio 0x2057 rcal timeout\n");
+               return 0;
+       }
+       tmp = b43_radio_read(dev, R2057_RCCAL_DONE_OSCCAP);
+       b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x15);
+       return tmp;
+}
+
+static void b43_radio_2057_init_pre(struct b43_wldev *dev)
+{
+       b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, ~B43_NPHY_RFCTL_CMD_CHIP0PU);
+       /* Maybe wl meant to reset and set (order?) RFCTL_CMD_OEPORFORCE? */
+       b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_OEPORFORCE);
+       b43_phy_set(dev, B43_NPHY_RFCTL_CMD, ~B43_NPHY_RFCTL_CMD_OEPORFORCE);
+       b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_CHIP0PU);
+}
+
+static void b43_radio_2057_init_post(struct b43_wldev *dev)
+{
+       b43_radio_set(dev, R2057_XTALPUOVR_PINCTRL, 0x1);
+
+       b43_radio_set(dev, R2057_RFPLL_MISC_CAL_RESETN, 0x78);
+       b43_radio_set(dev, R2057_XTAL_CONFIG2, 0x80);
+       mdelay(2);
+       b43_radio_mask(dev, R2057_RFPLL_MISC_CAL_RESETN, ~0x78);
+       b43_radio_mask(dev, R2057_XTAL_CONFIG2, ~0x80);
+
+       if (dev->phy.n->init_por) {
+               b43_radio_2057_rcal(dev);
+               b43_radio_2057_rccal(dev);
+       }
+       b43_radio_mask(dev, R2057_RFPLL_MASTER, ~0x8);
+
+       dev->phy.n->init_por = false;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/Radio/2057/Init */
+static void b43_radio_2057_init(struct b43_wldev *dev)
+{
+       b43_radio_2057_init_pre(dev);
+       r2057_upload_inittabs(dev);
+       b43_radio_2057_init_post(dev);
+}
+
 /**************************************************
  * Radio 0x2056
  **************************************************/
@@ -545,7 +717,9 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
        enum ieee80211_band band = b43_current_band(dev->wl);
        u16 offset;
        u8 i;
-       u16 bias, cbias, pag_boost, pgag_boost, mixg_boost, padg_boost;
+       u16 bias, cbias;
+       u16 pag_boost, padg_boost, pgag_boost, mixg_boost;
+       u16 paa_boost, pada_boost, pgaa_boost, mixa_boost;
 
        B43_WARN_ON(dev->phy.rev < 3);
 
@@ -630,7 +804,56 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
                        b43_radio_write(dev, offset | B2056_TX_PA_SPARE1, 0xee);
                }
        } else if (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ) {
-               /* TODO */
+               u16 freq = dev->phy.channel_freq;
+               if (freq < 5100) {
+                       paa_boost = 0xA;
+                       pada_boost = 0x77;
+                       pgaa_boost = 0xF;
+                       mixa_boost = 0xF;
+               } else if (freq < 5340) {
+                       paa_boost = 0x8;
+                       pada_boost = 0x77;
+                       pgaa_boost = 0xFB;
+                       mixa_boost = 0xF;
+               } else if (freq < 5650) {
+                       paa_boost = 0x0;
+                       pada_boost = 0x77;
+                       pgaa_boost = 0xB;
+                       mixa_boost = 0xF;
+               } else {
+                       paa_boost = 0x0;
+                       pada_boost = 0x77;
+                       if (freq != 5825)
+                               pgaa_boost = -(freq - 18) / 36 + 168;
+                       else
+                               pgaa_boost = 6;
+                       mixa_boost = 0xF;
+               }
+
+               for (i = 0; i < 2; i++) {
+                       offset = i ? B2056_TX1 : B2056_TX0;
+
+                       b43_radio_write(dev,
+                               offset | B2056_TX_INTPAA_BOOST_TUNE, paa_boost);
+                       b43_radio_write(dev,
+                               offset | B2056_TX_PADA_BOOST_TUNE, pada_boost);
+                       b43_radio_write(dev,
+                               offset | B2056_TX_PGAA_BOOST_TUNE, pgaa_boost);
+                       b43_radio_write(dev,
+                               offset | B2056_TX_MIXA_BOOST_TUNE, mixa_boost);
+                       b43_radio_write(dev,
+                               offset | B2056_TX_TXSPARE1, 0x30);
+                       b43_radio_write(dev,
+                               offset | B2056_TX_PA_SPARE2, 0xee);
+                       b43_radio_write(dev,
+                               offset | B2056_TX_PADA_CASCBIAS, 0x03);
+                       b43_radio_write(dev,
+                               offset | B2056_TX_INTPAA_IAUX_STAT, 0x50);
+                       b43_radio_write(dev,
+                               offset | B2056_TX_INTPAA_IMAIN_STAT, 0x50);
+                       b43_radio_write(dev,
+                               offset | B2056_TX_INTPAA_CASCBIAS, 0x30);
+               }
        }
 
        udelay(50);
@@ -643,6 +866,37 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
        udelay(300);
 }
 
+static u8 b43_radio_2056_rcal(struct b43_wldev *dev)
+{
+       struct b43_phy *phy = &dev->phy;
+       u16 mast2, tmp;
+
+       if (phy->rev != 3)
+               return 0;
+
+       mast2 = b43_radio_read(dev, B2056_SYN_PLL_MAST2);
+       b43_radio_write(dev, B2056_SYN_PLL_MAST2, mast2 | 0x7);
+
+       udelay(10);
+       b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x01);
+       udelay(10);
+       b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x09);
+
+       if (!b43_radio_wait_value(dev, B2056_SYN_RCAL_CODE_OUT, 0x80, 0x80, 100,
+                                 1000000)) {
+               b43err(dev->wl, "Radio recalibration timeout\n");
+               return 0;
+       }
+
+       b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x01);
+       tmp = b43_radio_read(dev, B2056_SYN_RCAL_CODE_OUT);
+       b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x00);
+
+       b43_radio_write(dev, B2056_SYN_PLL_MAST2, mast2);
+
+       return tmp & 0x1f;
+}
+
 static void b43_radio_init2056_pre(struct b43_wldev *dev)
 {
        b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
@@ -665,10 +919,8 @@ static void b43_radio_init2056_post(struct b43_wldev *dev)
        b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2);
        b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC);
        b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1);
-       /*
-       if (nphy->init_por)
-               Call Radio 2056 Recalibrate
-       */
+       if (dev->phy.n->init_por)
+               b43_radio_2056_rcal(dev);
 }
 
 /*
@@ -680,6 +932,8 @@ static void b43_radio_init2056(struct b43_wldev *dev)
        b43_radio_init2056_pre(dev);
        b2056_upload_inittabs(dev, 0, 0);
        b43_radio_init2056_post(dev);
+
+       dev->phy.n->init_por = false;
 }
 
 /**************************************************
@@ -753,8 +1007,6 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
 {
        struct b43_phy_n *nphy = dev->phy.n;
        struct ssb_sprom *sprom = dev->dev->bus_sprom;
-       int i;
-       u16 val;
        bool workaround = false;
 
        if (sprom->revision < 4)
@@ -777,15 +1029,7 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
        b43_radio_set(dev, B2055_CAL_MISC, 0x1);
        msleep(1);
        b43_radio_set(dev, B2055_CAL_MISC, 0x40);
-       for (i = 0; i < 200; i++) {
-               val = b43_radio_read(dev, B2055_CAL_COUT2);
-               if (val & 0x80) {
-                       i = 0;
-                       break;
-               }
-               udelay(10);
-       }
-       if (i)
+       if (!b43_radio_wait_value(dev, B2055_CAL_COUT2, 0x80, 0x80, 10, 2000))
                b43err(dev->wl, "radio post init timeout\n");
        b43_radio_mask(dev, B2055_CAL_LPOCTL, 0xFF7F);
        b43_switch_channel(dev, dev->phy.channel);
@@ -1860,12 +2104,334 @@ static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev)
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
 static void b43_nphy_gain_ctl_workarounds(struct b43_wldev *dev)
 {
-       if (dev->phy.rev >= 3)
+       if (dev->phy.rev >= 7)
+               ; /* TODO */
+       else if (dev->phy.rev >= 3)
                b43_nphy_gain_ctl_workarounds_rev3plus(dev);
        else
                b43_nphy_gain_ctl_workarounds_rev1_2(dev);
 }
 
+/* http://bcm-v4.sipsolutions.net/PHY/N/Read_Lpf_Bw_Ctl */
+static u16 b43_nphy_read_lpf_ctl(struct b43_wldev *dev, u16 offset)
+{
+       if (!offset)
+               offset = (dev->phy.is_40mhz) ? 0x159 : 0x154;
+       return b43_ntab_read(dev, B43_NTAB16(7, offset)) & 0x7;
+}
+
+static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
+{
+       struct ssb_sprom *sprom = dev->dev->bus_sprom;
+       struct b43_phy *phy = &dev->phy;
+
+       u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
+                                       0x1F };
+       u8 rx2tx_delays_ipa[9] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
+
+       u16 ntab7_15e_16e[] = { 0x10f, 0x10f };
+       u8 ntab7_138_146[] = { 0x11, 0x11 };
+       u8 ntab7_133[] = { 0x77, 0x11, 0x11 };
+
+       u16 lpf_20, lpf_40, lpf_11b;
+       u16 bcap_val, bcap_val_11b, bcap_val_11n_20, bcap_val_11n_40;
+       u16 scap_val, scap_val_11b, scap_val_11n_20, scap_val_11n_40;
+       bool rccal_ovrd = false;
+
+       u16 rx2tx_lut_20_11b, rx2tx_lut_20_11n, rx2tx_lut_40_11n;
+       u16 bias, conv, filt;
+
+       u32 tmp32;
+       u8 core;
+
+       if (phy->rev == 7) {
+               b43_phy_set(dev, B43_NPHY_FINERX2_CGC, 0x10);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN0, 0xFF80, 0x0020);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN0, 0x80FF, 0x2700);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN1, 0xFF80, 0x002E);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN1, 0x80FF, 0x3300);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN2, 0xFF80, 0x0037);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN2, 0x80FF, 0x3A00);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN3, 0xFF80, 0x003C);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN3, 0x80FF, 0x3E00);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN4, 0xFF80, 0x003E);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN4, 0x80FF, 0x3F00);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN5, 0xFF80, 0x0040);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN5, 0x80FF, 0x4000);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN6, 0xFF80, 0x0040);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN6, 0x80FF, 0x4000);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN7, 0xFF80, 0x0040);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN7, 0x80FF, 0x4000);
+       }
+       if (phy->rev <= 8) {
+               b43_phy_write(dev, 0x23F, 0x1B0);
+               b43_phy_write(dev, 0x240, 0x1B0);
+       }
+       if (phy->rev >= 8)
+               b43_phy_maskset(dev, B43_NPHY_TXTAILCNT, ~0xFF, 0x72);
+
+       b43_ntab_write(dev, B43_NTAB16(8, 0x00), 2);
+       b43_ntab_write(dev, B43_NTAB16(8, 0x10), 2);
+       tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0));
+       tmp32 &= 0xffffff;
+       b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32);
+       b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x15e), 2, ntab7_15e_16e);
+       b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x16e), 2, ntab7_15e_16e);
+
+       if (b43_nphy_ipa(dev))
+               b43_nphy_set_rf_sequence(dev, 0, rx2tx_events_ipa,
+                               rx2tx_delays_ipa, ARRAY_SIZE(rx2tx_events_ipa));
+
+       b43_phy_maskset(dev, 0x299, 0x3FFF, 0x4000);
+       b43_phy_maskset(dev, 0x29D, 0x3FFF, 0x4000);
+
+       lpf_20 = b43_nphy_read_lpf_ctl(dev, 0x154);
+       lpf_40 = b43_nphy_read_lpf_ctl(dev, 0x159);
+       lpf_11b = b43_nphy_read_lpf_ctl(dev, 0x152);
+       if (b43_nphy_ipa(dev)) {
+               if ((phy->radio_rev == 5 && phy->is_40mhz) ||
+                   phy->radio_rev == 7 || phy->radio_rev == 8) {
+                       bcap_val = b43_radio_read(dev, 0x16b);
+                       scap_val = b43_radio_read(dev, 0x16a);
+                       scap_val_11b = scap_val;
+                       bcap_val_11b = bcap_val;
+                       if (phy->radio_rev == 5 && phy->is_40mhz) {
+                               scap_val_11n_20 = scap_val;
+                               bcap_val_11n_20 = bcap_val;
+                               scap_val_11n_40 = bcap_val_11n_40 = 0xc;
+                               rccal_ovrd = true;
+                       } else { /* Rev 7/8 */
+                               lpf_20 = 4;
+                               lpf_11b = 1;
+                               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+                                       scap_val_11n_20 = 0xc;
+                                       bcap_val_11n_20 = 0xc;
+                                       scap_val_11n_40 = 0xa;
+                                       bcap_val_11n_40 = 0xa;
+                               } else {
+                                       scap_val_11n_20 = 0x14;
+                                       bcap_val_11n_20 = 0x14;
+                                       scap_val_11n_40 = 0xf;
+                                       bcap_val_11n_40 = 0xf;
+                               }
+                               rccal_ovrd = true;
+                       }
+               }
+       } else {
+               if (phy->radio_rev == 5) {
+                       lpf_20 = 1;
+                       lpf_40 = 3;
+                       bcap_val = b43_radio_read(dev, 0x16b);
+                       scap_val = b43_radio_read(dev, 0x16a);
+                       scap_val_11b = scap_val;
+                       bcap_val_11b = bcap_val;
+                       scap_val_11n_20 = 0x11;
+                       scap_val_11n_40 = 0x11;
+                       bcap_val_11n_20 = 0x13;
+                       bcap_val_11n_40 = 0x13;
+                       rccal_ovrd = true;
+               }
+       }
+       if (rccal_ovrd) {
+               rx2tx_lut_20_11b = (bcap_val_11b << 8) |
+                                  (scap_val_11b << 3) |
+                                  lpf_11b;
+               rx2tx_lut_20_11n = (bcap_val_11n_20 << 8) |
+                                  (scap_val_11n_20 << 3) |
+                                  lpf_20;
+               rx2tx_lut_40_11n = (bcap_val_11n_40 << 8) |
+                                  (scap_val_11n_40 << 3) |
+                                  lpf_40;
+               for (core = 0; core < 2; core++) {
+                       b43_ntab_write(dev, B43_NTAB16(7, 0x152 + core * 16),
+                                      rx2tx_lut_20_11b);
+                       b43_ntab_write(dev, B43_NTAB16(7, 0x153 + core * 16),
+                                      rx2tx_lut_20_11n);
+                       b43_ntab_write(dev, B43_NTAB16(7, 0x154 + core * 16),
+                                      rx2tx_lut_20_11n);
+                       b43_ntab_write(dev, B43_NTAB16(7, 0x155 + core * 16),
+                                      rx2tx_lut_40_11n);
+                       b43_ntab_write(dev, B43_NTAB16(7, 0x156 + core * 16),
+                                      rx2tx_lut_40_11n);
+                       b43_ntab_write(dev, B43_NTAB16(7, 0x157 + core * 16),
+                                      rx2tx_lut_40_11n);
+                       b43_ntab_write(dev, B43_NTAB16(7, 0x158 + core * 16),
+                                      rx2tx_lut_40_11n);
+                       b43_ntab_write(dev, B43_NTAB16(7, 0x159 + core * 16),
+                                      rx2tx_lut_40_11n);
+               }
+               b43_nphy_rf_control_override_rev7(dev, 16, 1, 3, false, 2);
+       }
+       b43_phy_write(dev, 0x32F, 0x3);
+       if (phy->radio_rev == 4 || phy->radio_rev == 6)
+               b43_nphy_rf_control_override_rev7(dev, 4, 1, 3, false, 0);
+
+       if (phy->radio_rev == 3 || phy->radio_rev == 4 || phy->radio_rev == 6) {
+               if (sprom->revision &&
+                   sprom->boardflags2_hi & B43_BFH2_IPALVLSHIFT_3P3) {
+                       b43_radio_write(dev, 0x5, 0x05);
+                       b43_radio_write(dev, 0x6, 0x30);
+                       b43_radio_write(dev, 0x7, 0x00);
+                       b43_radio_set(dev, 0x4f, 0x1);
+                       b43_radio_set(dev, 0xd4, 0x1);
+                       bias = 0x1f;
+                       conv = 0x6f;
+                       filt = 0xaa;
+               } else {
+                       bias = 0x2b;
+                       conv = 0x7f;
+                       filt = 0xee;
+               }
+               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+                       for (core = 0; core < 2; core++) {
+                               if (core == 0) {
+                                       b43_radio_write(dev, 0x5F, bias);
+                                       b43_radio_write(dev, 0x64, conv);
+                                       b43_radio_write(dev, 0x66, filt);
+                               } else {
+                                       b43_radio_write(dev, 0xE8, bias);
+                                       b43_radio_write(dev, 0xE9, conv);
+                                       b43_radio_write(dev, 0xEB, filt);
+                               }
+                       }
+               }
+       }
+
+       if (b43_nphy_ipa(dev)) {
+               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+                       if (phy->radio_rev == 3 || phy->radio_rev == 4 ||
+                           phy->radio_rev == 6) {
+                               for (core = 0; core < 2; core++) {
+                                       if (core == 0)
+                                               b43_radio_write(dev, 0x51,
+                                                               0x7f);
+                                       else
+                                               b43_radio_write(dev, 0xd6,
+                                                               0x7f);
+                               }
+                       }
+                       if (phy->radio_rev == 3) {
+                               for (core = 0; core < 2; core++) {
+                                       if (core == 0) {
+                                               b43_radio_write(dev, 0x64,
+                                                               0x13);
+                                               b43_radio_write(dev, 0x5F,
+                                                               0x1F);
+                                               b43_radio_write(dev, 0x66,
+                                                               0xEE);
+                                               b43_radio_write(dev, 0x59,
+                                                               0x8A);
+                                               b43_radio_write(dev, 0x80,
+                                                               0x3E);
+                                       } else {
+                                               b43_radio_write(dev, 0x69,
+                                                               0x13);
+                                               b43_radio_write(dev, 0xE8,
+                                                               0x1F);
+                                               b43_radio_write(dev, 0xEB,
+                                                               0xEE);
+                                               b43_radio_write(dev, 0xDE,
+                                                               0x8A);
+                                               b43_radio_write(dev, 0x105,
+                                                               0x3E);
+                                       }
+                               }
+                       } else if (phy->radio_rev == 7 || phy->radio_rev == 8) {
+                               if (!phy->is_40mhz) {
+                                       b43_radio_write(dev, 0x5F, 0x14);
+                                       b43_radio_write(dev, 0xE8, 0x12);
+                               } else {
+                                       b43_radio_write(dev, 0x5F, 0x16);
+                                       b43_radio_write(dev, 0xE8, 0x16);
+                               }
+                       }
+               } else {
+                       u16 freq = phy->channel_freq;
+                       if ((freq >= 5180 && freq <= 5230) ||
+                           (freq >= 5745 && freq <= 5805)) {
+                               b43_radio_write(dev, 0x7D, 0xFF);
+                               b43_radio_write(dev, 0xFE, 0xFF);
+                       }
+               }
+       } else {
+               if (phy->radio_rev != 5) {
+                       for (core = 0; core < 2; core++) {
+                               if (core == 0) {
+                                       b43_radio_write(dev, 0x5c, 0x61);
+                                       b43_radio_write(dev, 0x51, 0x70);
+                               } else {
+                                       b43_radio_write(dev, 0xe1, 0x61);
+                                       b43_radio_write(dev, 0xd6, 0x70);
+                               }
+                       }
+               }
+       }
+
+       if (phy->radio_rev == 4) {
+               b43_ntab_write(dev, B43_NTAB16(8, 0x05), 0x20);
+               b43_ntab_write(dev, B43_NTAB16(8, 0x15), 0x20);
+               for (core = 0; core < 2; core++) {
+                       if (core == 0) {
+                               b43_radio_write(dev, 0x1a1, 0x00);
+                               b43_radio_write(dev, 0x1a2, 0x3f);
+                               b43_radio_write(dev, 0x1a6, 0x3f);
+                       } else {
+                               b43_radio_write(dev, 0x1a7, 0x00);
+                               b43_radio_write(dev, 0x1ab, 0x3f);
+                               b43_radio_write(dev, 0x1ac, 0x3f);
+                       }
+               }
+       } else {
+               b43_phy_set(dev, B43_NPHY_AFECTL_C1, 0x4);
+               b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x4);
+               b43_phy_set(dev, B43_NPHY_AFECTL_C2, 0x4);
+               b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4);
+
+               b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x1);
+               b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x1);
+               b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x1);
+               b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x1);
+               b43_ntab_write(dev, B43_NTAB16(8, 0x05), 0x20);
+               b43_ntab_write(dev, B43_NTAB16(8, 0x15), 0x20);
+
+               b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x4);
+               b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, ~0x4);
+               b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x4);
+               b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x4);
+       }
+
+       b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, 0x2);
+
+       b43_ntab_write(dev, B43_NTAB32(16, 0x100), 20);
+       b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x138), 2, ntab7_138_146);
+       b43_ntab_write(dev, B43_NTAB16(7, 0x141), 0x77);
+       b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x133), 3, ntab7_133);
+       b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x146), 2, ntab7_138_146);
+       b43_ntab_write(dev, B43_NTAB16(7, 0x123), 0x77);
+       b43_ntab_write(dev, B43_NTAB16(7, 0x12A), 0x77);
+
+       if (!phy->is_40mhz) {
+               b43_ntab_write(dev, B43_NTAB32(16, 0x03), 0x18D);
+               b43_ntab_write(dev, B43_NTAB32(16, 0x7F), 0x18D);
+       } else {
+               b43_ntab_write(dev, B43_NTAB32(16, 0x03), 0x14D);
+               b43_ntab_write(dev, B43_NTAB32(16, 0x7F), 0x14D);
+       }
+
+       b43_nphy_gain_ctl_workarounds(dev);
+
+       /* TODO
+       b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4,
+                           aux_adc_vmid_rev7_core0);
+       b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4,
+                           aux_adc_vmid_rev7_core1);
+       b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0C), 4,
+                           aux_adc_gain_rev7);
+       b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1C), 4,
+                           aux_adc_gain_rev7);
+       */
+}
+
 static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
 {
        struct b43_phy_n *nphy = dev->phy.n;
@@ -1916,7 +2482,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
                        rx2tx_delays[6] = 1;
                        rx2tx_events[7] = 0x1F;
                }
-               b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays,
+               b43_nphy_set_rf_sequence(dev, 0, rx2tx_events, rx2tx_delays,
                                         ARRAY_SIZE(rx2tx_events));
        }
 
@@ -1926,8 +2492,13 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
 
        b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700);
 
-       b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D);
-       b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D);
+       if (!dev->phy.is_40mhz) {
+               b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D);
+               b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D);
+       } else {
+               b43_ntab_write(dev, B43_NTAB32(16, 3), 0x14D);
+               b43_ntab_write(dev, B43_NTAB32(16, 127), 0x14D);
+       }
 
        b43_nphy_gain_ctl_workarounds(dev);
 
@@ -1963,13 +2534,14 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
        b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32);
 
        if (dev->phy.rev == 4 &&
-               b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+           b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
                b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC,
                                0x70);
                b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC,
                                0x70);
        }
 
+       /* Dropped probably-always-true condition */
        b43_phy_write(dev, 0x224, 0x03eb);
        b43_phy_write(dev, 0x225, 0x03eb);
        b43_phy_write(dev, 0x226, 0x0341);
@@ -1982,6 +2554,9 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
        b43_phy_write(dev, 0x22d, 0x042b);
        b43_phy_write(dev, 0x22e, 0x0381);
        b43_phy_write(dev, 0x22f, 0x0381);
+
+       if (dev->phy.rev >= 6 && sprom->boardflags2_lo & B43_BFL2_SINGLEANT_CCK)
+               ; /* TODO: 0x0080000000000000 HF */
 }
 
 static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
@@ -1996,6 +2571,12 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
        u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 };
        u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
 
+       if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD ||
+           dev->dev->board_type == 0x8B) {
+               delays1[0] = 0x1;
+               delays1[5] = 0x14;
+       }
+
        if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
            nphy->band5g_pwrgain) {
                b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8);
@@ -2007,8 +2588,10 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
 
        b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0x000A);
        b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0x000A);
-       b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA);
-       b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA);
+       if (dev->phy.rev < 3) {
+               b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA);
+               b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA);
+       }
 
        if (dev->phy.rev < 2) {
                b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0x0000);
@@ -2024,11 +2607,6 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
        b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
        b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
 
-       if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD &&
-           dev->dev->board_type == 0x8B) {
-               delays1[0] = 0x1;
-               delays1[5] = 0x14;
-       }
        b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7);
        b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7);
 
@@ -2055,11 +2633,13 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
        b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD);
        b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20);
 
-       b43_phy_mask(dev, B43_NPHY_PIL_DW1,
-                       ~B43_NPHY_PIL_DW_64QAM & 0xFFFF);
-       b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5);
-       b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4);
-       b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00);
+       if (dev->phy.rev < 3) {
+               b43_phy_mask(dev, B43_NPHY_PIL_DW1,
+                            ~B43_NPHY_PIL_DW_64QAM & 0xFFFF);
+               b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5);
+               b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4);
+               b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00);
+       }
 
        if (dev->phy.rev == 2)
                b43_phy_set(dev, B43_NPHY_FINERX2_CGC,
@@ -2083,7 +2663,9 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
        b43_phy_set(dev, B43_NPHY_IQFLIP,
                    B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
 
-       if (dev->phy.rev >= 3)
+       if (dev->phy.rev >= 7)
+               b43_nphy_workarounds_rev7plus(dev);
+       else if (dev->phy.rev >= 3)
                b43_nphy_workarounds_rev3plus(dev);
        else
                b43_nphy_workarounds_rev1_2(dev);
@@ -2542,7 +3124,7 @@ static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev)
                b43_nphy_ipa_internal_tssi_setup(dev);
 
        if (phy->rev >= 7)
-               ; /* TODO: Override Rev7 with 0x2000, 0, 3, 0, 0 as arguments */
+               b43_nphy_rf_control_override_rev7(dev, 0x2000, 0, 3, false, 0);
        else if (phy->rev >= 3)
                b43_nphy_rf_control_override(dev, 0x2000, 0, 3, false);
 
@@ -2554,7 +3136,7 @@ static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev)
        b43_nphy_rssi_select(dev, 0, 0);
 
        if (phy->rev >= 7)
-               ; /* TODO: Override Rev7 with 0x2000, 0, 3, 1, 0 as arguments */
+               b43_nphy_rf_control_override_rev7(dev, 0x2000, 0, 3, true, 0);
        else if (phy->rev >= 3)
                b43_nphy_rf_control_override(dev, 0x2000, 0, 3, true);
 
@@ -4761,6 +5343,7 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
        nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4);
        nphy->spur_avoid = (phy->rev >= 3) ?
                                B43_SPUR_AVOID_AUTO : B43_SPUR_AVOID_DISABLE;
+       nphy->init_por = true;
        nphy->gain_boost = true; /* this way we follow wl, assume it is true */
        nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */
        nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */
@@ -4801,6 +5384,8 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
                nphy->ipa2g_on = sprom->fem.ghz2.extpa_gain == 2;
                nphy->ipa5g_on = sprom->fem.ghz5.extpa_gain == 2;
        }
+
+       nphy->init_por = true;
 }
 
 static void b43_nphy_op_free(struct b43_wldev *dev)
@@ -4887,7 +5472,9 @@ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
        if (blocked) {
                b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
                                ~B43_NPHY_RFCTL_CMD_CHIP0PU);
-               if (dev->phy.rev >= 3) {
+               if (dev->phy.rev >= 7) {
+                       /* TODO */
+               } else if (dev->phy.rev >= 3) {
                        b43_radio_mask(dev, 0x09, ~0x2);
 
                        b43_radio_write(dev, 0x204D, 0);
@@ -4905,7 +5492,10 @@ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
                        b43_radio_write(dev, 0x3064, 0);
                }
        } else {
-               if (dev->phy.rev >= 3) {
+               if (dev->phy.rev >= 7) {
+                       b43_radio_2057_init(dev);
+                       b43_switch_channel(dev, dev->phy.channel);
+               } else if (dev->phy.rev >= 3) {
                        b43_radio_init2056(dev);
                        b43_switch_channel(dev, dev->phy.channel);
                } else {
index fd12b386fea1cc5a1c0796589b650d12e6917fcf..092c0140c2490d777056399db075cfcbb24a2855 100644 (file)
@@ -785,6 +785,7 @@ struct b43_phy_n {
        u16 papd_epsilon_offset[2];
        s32 preamble_override;
        u32 bb_mult_save;
+       bool init_por;
 
        bool gain_boost;
        bool elna_gain_config;
diff --git a/drivers/net/wireless/b43/radio_2057.c b/drivers/net/wireless/b43/radio_2057.c
new file mode 100644 (file)
index 0000000..d61d683
--- /dev/null
@@ -0,0 +1,141 @@
+/*
+
+  Broadcom B43 wireless driver
+  IEEE 802.11n 2057 radio device data tables
+
+  Copyright (c) 2010 RafaÅ‚ MiÅ‚ecki <zajec5@gmail.com>
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 2 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; see the file COPYING.  If not, write to
+  the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
+  Boston, MA 02110-1301, USA.
+
+*/
+
+#include "b43.h"
+#include "radio_2057.h"
+#include "phy_common.h"
+
+static u16 r2057_rev4_init[42][2] = {
+       { 0x0E, 0x20 }, { 0x31, 0x00 }, { 0x32, 0x00 }, { 0x33, 0x00 },
+       { 0x35, 0x26 }, { 0x3C, 0xff }, { 0x3D, 0xff }, { 0x3E, 0xff },
+       { 0x3F, 0xff }, { 0x62, 0x33 }, { 0x8A, 0xf0 }, { 0x8B, 0x10 },
+       { 0x8C, 0xf0 }, { 0x91, 0x3f }, { 0x92, 0x36 }, { 0xA4, 0x8c },
+       { 0xA8, 0x55 }, { 0xAF, 0x01 }, { 0x10F, 0xf0 }, { 0x110, 0x10 },
+       { 0x111, 0xf0 }, { 0x116, 0x3f }, { 0x117, 0x36 }, { 0x129, 0x8c },
+       { 0x12D, 0x55 }, { 0x134, 0x01 }, { 0x15E, 0x00 }, { 0x15F, 0x00 },
+       { 0x160, 0x00 }, { 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 },
+       { 0x169, 0x02 }, { 0x16A, 0x00 }, { 0x16B, 0x00 }, { 0x16C, 0x00 },
+       { 0x1A4, 0x00 }, { 0x1A5, 0x00 }, { 0x1A6, 0x00 }, { 0x1AA, 0x00 },
+       { 0x1AB, 0x00 }, { 0x1AC, 0x00 },
+};
+
+static u16 r2057_rev5_init[44][2] = {
+       { 0x00, 0x00 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x23, 0x6 },
+       { 0x31, 0x00 }, { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 },
+       { 0x59, 0x88 }, { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f },
+       { 0x64, 0x0f }, { 0x81, 0x01 }, { 0x91, 0x3f }, { 0x92, 0x36 },
+       { 0xA1, 0x20 }, { 0xD6, 0x70 }, { 0xDE, 0x88 }, { 0xE1, 0x20 },
+       { 0xE8, 0x0f }, { 0xE9, 0x0f }, { 0x106, 0x01 }, { 0x116, 0x3f },
+       { 0x117, 0x36 }, { 0x126, 0x20 }, { 0x15E, 0x00 }, { 0x15F, 0x00 },
+       { 0x160, 0x00 }, { 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 },
+       { 0x16A, 0x00 }, { 0x16B, 0x00 }, { 0x16C, 0x00 }, { 0x1A4, 0x00 },
+       { 0x1A5, 0x00 }, { 0x1A6, 0x00 }, { 0x1AA, 0x00 }, { 0x1AB, 0x00 },
+       { 0x1AC, 0x00 }, { 0x1B7, 0x0c }, { 0x1C1, 0x01 }, { 0x1C2, 0x80 },
+};
+
+static u16 r2057_rev5a_init[45][2] = {
+       { 0x00, 0x15 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x23, 0x6 },
+       { 0x31, 0x00 }, { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 },
+       { 0x59, 0x88 }, { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f },
+       { 0x64, 0x0f }, { 0x81, 0x01 }, { 0x91, 0x3f }, { 0x92, 0x36 },
+       { 0xC9, 0x01 }, { 0xD6, 0x70 }, { 0xDE, 0x88 }, { 0xE1, 0x20 },
+       { 0xE8, 0x0f }, { 0xE9, 0x0f }, { 0x106, 0x01 }, { 0x116, 0x3f },
+       { 0x117, 0x36 }, { 0x126, 0x20 }, { 0x14E, 0x01 }, { 0x15E, 0x00 },
+       { 0x15F, 0x00 }, { 0x160, 0x00 }, { 0x161, 0x00 }, { 0x162, 0x00 },
+       { 0x163, 0x00 }, { 0x16A, 0x00 }, { 0x16B, 0x00 }, { 0x16C, 0x00 },
+       { 0x1A4, 0x00 }, { 0x1A5, 0x00 }, { 0x1A6, 0x00 }, { 0x1AA, 0x00 },
+       { 0x1AB, 0x00 }, { 0x1AC, 0x00 }, { 0x1B7, 0x0c }, { 0x1C1, 0x01 },
+       { 0x1C2, 0x80 },
+};
+
+static u16 r2057_rev7_init[54][2] = {
+       { 0x00, 0x00 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x31, 0x00 },
+       { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 }, { 0x59, 0x88 },
+       { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f }, { 0x64, 0x13 },
+       { 0x66, 0xee }, { 0x6E, 0x58 }, { 0x75, 0x13 }, { 0x7B, 0x13 },
+       { 0x7C, 0x14 }, { 0x7D, 0xee }, { 0x81, 0x01 }, { 0x91, 0x3f },
+       { 0x92, 0x36 }, { 0xA1, 0x20 }, { 0xD6, 0x70 }, { 0xDE, 0x88 },
+       { 0xE1, 0x20 }, { 0xE8, 0x0f }, { 0xE9, 0x13 }, { 0xEB, 0xee },
+       { 0xF3, 0x58 }, { 0xFA, 0x13 }, { 0x100, 0x13 }, { 0x101, 0x14 },
+       { 0x102, 0xee }, { 0x106, 0x01 }, { 0x116, 0x3f }, { 0x117, 0x36 },
+       { 0x126, 0x20 }, { 0x15E, 0x00 }, { 0x15F, 0x00 }, { 0x160, 0x00 },
+       { 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 }, { 0x16A, 0x00 },
+       { 0x16B, 0x00 }, { 0x16C, 0x00 }, { 0x1A4, 0x00 }, { 0x1A5, 0x00 },
+       { 0x1A6, 0x00 }, { 0x1AA, 0x00 }, { 0x1AB, 0x00 }, { 0x1AC, 0x00 },
+       { 0x1B7, 0x05 }, { 0x1C2, 0xa0 },
+};
+
+static u16 r2057_rev8_init[54][2] = {
+       { 0x00, 0x08 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x31, 0x00 },
+       { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 }, { 0x59, 0x88 },
+       { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f }, { 0x64, 0x0f },
+       { 0x6E, 0x58 }, { 0x75, 0x13 }, { 0x7B, 0x13 }, { 0x7C, 0x0f },
+       { 0x7D, 0xee }, { 0x81, 0x01 }, { 0x91, 0x3f }, { 0x92, 0x36 },
+       { 0xA1, 0x20 }, { 0xC9, 0x01 }, { 0xD6, 0x70 }, { 0xDE, 0x88 },
+       { 0xE1, 0x20 }, { 0xE8, 0x0f }, { 0xE9, 0x0f }, { 0xF3, 0x58 },
+       { 0xFA, 0x13 }, { 0x100, 0x13 }, { 0x101, 0x0f }, { 0x102, 0xee },
+       { 0x106, 0x01 }, { 0x116, 0x3f }, { 0x117, 0x36 }, { 0x126, 0x20 },
+       { 0x14E, 0x01 }, { 0x15E, 0x00 }, { 0x15F, 0x00 }, { 0x160, 0x00 },
+       { 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 }, { 0x16A, 0x00 },
+       { 0x16B, 0x00 }, { 0x16C, 0x00 }, { 0x1A4, 0x00 }, { 0x1A5, 0x00 },
+       { 0x1A6, 0x00 }, { 0x1AA, 0x00 }, { 0x1AB, 0x00 }, { 0x1AC, 0x00 },
+       { 0x1B7, 0x05 }, { 0x1C2, 0xa0 },
+};
+
+void r2057_upload_inittabs(struct b43_wldev *dev)
+{
+       struct b43_phy *phy = &dev->phy;
+       u16 *table = NULL;
+       u16 size, i;
+
+       if (phy->rev == 7) {
+               table = r2057_rev4_init[0];
+               size = ARRAY_SIZE(r2057_rev4_init);
+       } else if (phy->rev == 8 || phy->rev == 9) {
+               if (phy->radio_rev == 5) {
+                       if (phy->radio_rev == 8) {
+                               table = r2057_rev5_init[0];
+                               size = ARRAY_SIZE(r2057_rev5_init);
+                       } else {
+                               table = r2057_rev5a_init[0];
+                               size = ARRAY_SIZE(r2057_rev5a_init);
+                       }
+               } else if (phy->radio_rev == 7) {
+                       table = r2057_rev7_init[0];
+                       size = ARRAY_SIZE(r2057_rev7_init);
+               } else if (phy->radio_rev == 9) {
+                       table = r2057_rev8_init[0];
+                       size = ARRAY_SIZE(r2057_rev8_init);
+               }
+       }
+
+       if (table) {
+               for (i = 0; i < 10; i++) {
+                       pr_info("radio_write 0x%X ", *table);
+                       table++;
+                       pr_info("0x%X\n", *table);
+                       table++;
+               }
+       }
+}
diff --git a/drivers/net/wireless/b43/radio_2057.h b/drivers/net/wireless/b43/radio_2057.h
new file mode 100644 (file)
index 0000000..eeebd8f
--- /dev/null
@@ -0,0 +1,430 @@
+#ifndef B43_RADIO_2057_H_
+#define B43_RADIO_2057_H_
+
+#include <linux/types.h>
+
+#include "tables_nphy.h"
+
+#define R2057_DACBUF_VINCM_CORE0               0x000
+#define R2057_IDCODE                           0x001
+#define R2057_RCCAL_MASTER                     0x002
+#define R2057_RCCAL_CAP_SIZE                   0x003
+#define R2057_RCAL_CONFIG                      0x004
+#define R2057_GPAIO_CONFIG                     0x005
+#define R2057_GPAIO_SEL1                       0x006
+#define R2057_GPAIO_SEL0                       0x007
+#define R2057_CLPO_CONFIG                      0x008
+#define R2057_BANDGAP_CONFIG                   0x009
+#define R2057_BANDGAP_RCAL_TRIM                        0x00a
+#define R2057_AFEREG_CONFIG                    0x00b
+#define R2057_TEMPSENSE_CONFIG                 0x00c
+#define R2057_XTAL_CONFIG1                     0x00d
+#define R2057_XTAL_ICORE_SIZE                  0x00e
+#define R2057_XTAL_BUF_SIZE                    0x00f
+#define R2057_XTAL_PULLCAP_SIZE                        0x010
+#define R2057_RFPLL_MASTER                     0x011
+#define R2057_VCOMONITOR_VTH_L                 0x012
+#define R2057_VCOMONITOR_VTH_H                 0x013
+#define R2057_VCOCAL_BIASRESET_RFPLLREG_VOUT   0x014
+#define R2057_VCO_VARCSIZE_IDAC                        0x015
+#define R2057_VCOCAL_COUNTVAL0                 0x016
+#define R2057_VCOCAL_COUNTVAL1                 0x017
+#define R2057_VCOCAL_INTCLK_COUNT              0x018
+#define R2057_VCOCAL_MASTER                    0x019
+#define R2057_VCOCAL_NUMCAPCHANGE              0x01a
+#define R2057_VCOCAL_WINSIZE                   0x01b
+#define R2057_VCOCAL_DELAY_AFTER_REFRESH       0x01c
+#define R2057_VCOCAL_DELAY_AFTER_CLOSELOOP     0x01d
+#define R2057_VCOCAL_DELAY_AFTER_OPENLOOP      0x01e
+#define R2057_VCOCAL_DELAY_BEFORE_OPENLOOP     0x01f
+#define R2057_VCO_FORCECAPEN_FORCECAP1         0x020
+#define R2057_VCO_FORCECAP0                    0x021
+#define R2057_RFPLL_REFMASTER_SPAREXTALSIZE    0x022
+#define R2057_RFPLL_PFD_RESET_PW               0x023
+#define R2057_RFPLL_LOOPFILTER_R2              0x024
+#define R2057_RFPLL_LOOPFILTER_R1              0x025
+#define R2057_RFPLL_LOOPFILTER_C3              0x026
+#define R2057_RFPLL_LOOPFILTER_C2              0x027
+#define R2057_RFPLL_LOOPFILTER_C1              0x028
+#define R2057_CP_KPD_IDAC                      0x029
+#define R2057_RFPLL_IDACS                      0x02a
+#define R2057_RFPLL_MISC_EN                    0x02b
+#define R2057_RFPLL_MMD0                       0x02c
+#define R2057_RFPLL_MMD1                       0x02d
+#define R2057_RFPLL_MISC_CAL_RESETN            0x02e
+#define R2057_JTAGXTAL_SIZE_CPBIAS_FILTRES     0x02f
+#define R2057_VCO_ALCREF_BBPLLXTAL_SIZE                0x030
+#define R2057_VCOCAL_READCAP0                  0x031
+#define R2057_VCOCAL_READCAP1                  0x032
+#define R2057_VCOCAL_STATUS                    0x033
+#define R2057_LOGEN_PUS                                0x034
+#define R2057_LOGEN_PTAT_RESETS                        0x035
+#define R2057_VCOBUF_IDACS                     0x036
+#define R2057_VCOBUF_TUNE                      0x037
+#define R2057_CMOSBUF_TX2GQ_IDACS              0x038
+#define R2057_CMOSBUF_TX2GI_IDACS              0x039
+#define R2057_CMOSBUF_TX5GQ_IDACS              0x03a
+#define R2057_CMOSBUF_TX5GI_IDACS              0x03b
+#define R2057_CMOSBUF_RX2GQ_IDACS              0x03c
+#define R2057_CMOSBUF_RX2GI_IDACS              0x03d
+#define R2057_CMOSBUF_RX5GQ_IDACS              0x03e
+#define R2057_CMOSBUF_RX5GI_IDACS              0x03f
+#define R2057_LOGEN_MX2G_IDACS                 0x040
+#define R2057_LOGEN_MX2G_TUNE                  0x041
+#define R2057_LOGEN_MX5G_IDACS                 0x042
+#define R2057_LOGEN_MX5G_TUNE                  0x043
+#define R2057_LOGEN_MX5G_RCCR                  0x044
+#define R2057_LOGEN_INDBUF2G_IDAC              0x045
+#define R2057_LOGEN_INDBUF2G_IBOOST            0x046
+#define R2057_LOGEN_INDBUF2G_TUNE              0x047
+#define R2057_LOGEN_INDBUF5G_IDAC              0x048
+#define R2057_LOGEN_INDBUF5G_IBOOST            0x049
+#define R2057_LOGEN_INDBUF5G_TUNE              0x04a
+#define R2057_CMOSBUF_TX_RCCR                  0x04b
+#define R2057_CMOSBUF_RX_RCCR                  0x04c
+#define R2057_LOGEN_SEL_PKDET                  0x04d
+#define R2057_CMOSBUF_SHAREIQ_PTAT             0x04e
+#define R2057_RXTXBIAS_CONFIG_CORE0            0x04f
+#define R2057_TXGM_TXRF_PUS_CORE0              0x050
+#define R2057_TXGM_IDAC_BLEED_CORE0            0x051
+#define R2057_TXGM_GAIN_CORE0                  0x056
+#define R2057_TXGM2G_PKDET_PUS_CORE0           0x057
+#define R2057_PAD2G_PTATS_CORE0                        0x058
+#define R2057_PAD2G_IDACS_CORE0                        0x059
+#define R2057_PAD2G_BOOST_PU_CORE0             0x05a
+#define R2057_PAD2G_CASCV_GAIN_CORE0           0x05b
+#define R2057_TXMIX2G_TUNE_BOOST_PU_CORE0      0x05c
+#define R2057_TXMIX2G_LODC_CORE0               0x05d
+#define R2057_PAD2G_TUNE_PUS_CORE0             0x05e
+#define R2057_IPA2G_GAIN_CORE0                 0x05f
+#define R2057_TSSI2G_SPARE1_CORE0              0x060
+#define R2057_TSSI2G_SPARE2_CORE0              0x061
+#define R2057_IPA2G_TUNEV_CASCV_PTAT_CORE0     0x062
+#define R2057_IPA2G_IMAIN_CORE0                        0x063
+#define R2057_IPA2G_CASCONV_CORE0              0x064
+#define R2057_IPA2G_CASCOFFV_CORE0             0x065
+#define R2057_IPA2G_BIAS_FILTER_CORE0          0x066
+#define R2057_TX5G_PKDET_CORE0                 0x069
+#define R2057_PGA_PTAT_TXGM5G_PU_CORE0         0x06a
+#define R2057_PAD5G_PTATS1_CORE0               0x06b
+#define R2057_PAD5G_CLASS_PTATS2_CORE0         0x06c
+#define R2057_PGA_BOOSTPTAT_IMAIN_CORE0                0x06d
+#define R2057_PAD5G_CASCV_IMAIN_CORE0          0x06e
+#define R2057_TXMIX5G_IBOOST_PAD_IAUX_CORE0    0x06f
+#define R2057_PGA_BOOST_TUNE_CORE0             0x070
+#define R2057_PGA_GAIN_CORE0                   0x071
+#define R2057_PAD5G_CASCOFFV_GAIN_PUS_CORE0    0x072
+#define R2057_TXMIX5G_BOOST_TUNE_CORE0         0x073
+#define R2057_PAD5G_TUNE_MISC_PUS_CORE0                0x074
+#define R2057_IPA5G_IAUX_CORE0                 0x075
+#define R2057_IPA5G_GAIN_CORE0                 0x076
+#define R2057_TSSI5G_SPARE1_CORE0              0x077
+#define R2057_TSSI5G_SPARE2_CORE0              0x078
+#define R2057_IPA5G_CASCOFFV_PU_CORE0          0x079
+#define R2057_IPA5G_PTAT_CORE0                 0x07a
+#define R2057_IPA5G_IMAIN_CORE0                        0x07b
+#define R2057_IPA5G_CASCONV_CORE0              0x07c
+#define R2057_IPA5G_BIAS_FILTER_CORE0          0x07d
+#define R2057_PAD_BIAS_FILTER_BWS_CORE0                0x080
+#define R2057_TR2G_CONFIG1_CORE0_NU            0x081
+#define R2057_TR2G_CONFIG2_CORE0_NU            0x082
+#define R2057_LNA5G_RFEN_CORE0                 0x083
+#define R2057_TR5G_CONFIG2_CORE0_NU            0x084
+#define R2057_RXRFBIAS_IBOOST_PU_CORE0         0x085
+#define R2057_RXRF_IABAND_RXGM_IMAIN_PTAT_CORE0        0x086
+#define R2057_RXGM_CMFBITAIL_AUXPTAT_CORE0     0x087
+#define R2057_RXMIX_ICORE_RXGM_IAUX_CORE0      0x088
+#define R2057_RXMIX_CMFBITAIL_PU_CORE0         0x089
+#define R2057_LNA2_IMAIN_PTAT_PU_CORE0         0x08a
+#define R2057_LNA2_IAUX_PTAT_CORE0             0x08b
+#define R2057_LNA1_IMAIN_PTAT_PU_CORE0         0x08c
+#define R2057_LNA15G_INPUT_MATCH_TUNE_CORE0    0x08d
+#define R2057_RXRFBIAS_BANDSEL_CORE0           0x08e
+#define R2057_TIA_CONFIG_CORE0                 0x08f
+#define R2057_TIA_IQGAIN_CORE0                 0x090
+#define R2057_TIA_IBIAS2_CORE0                 0x091
+#define R2057_TIA_IBIAS1_CORE0                 0x092
+#define R2057_TIA_SPARE_Q_CORE0                        0x093
+#define R2057_TIA_SPARE_I_CORE0                        0x094
+#define R2057_RXMIX2G_PUS_CORE0                        0x095
+#define R2057_RXMIX2G_VCMREFS_CORE0            0x096
+#define R2057_RXMIX2G_LODC_QI_CORE0            0x097
+#define R2057_W12G_BW_LNA2G_PUS_CORE0          0x098
+#define R2057_LNA2G_GAIN_CORE0                 0x099
+#define R2057_LNA2G_TUNE_CORE0                 0x09a
+#define R2057_RXMIX5G_PUS_CORE0                        0x09b
+#define R2057_RXMIX5G_VCMREFS_CORE0            0x09c
+#define R2057_RXMIX5G_LODC_QI_CORE0            0x09d
+#define R2057_W15G_BW_LNA5G_PUS_CORE0          0x09e
+#define R2057_LNA5G_GAIN_CORE0                 0x09f
+#define R2057_LNA5G_TUNE_CORE0                 0x0a0
+#define R2057_LPFSEL_TXRX_RXBB_PUS_CORE0       0x0a1
+#define R2057_RXBB_BIAS_MASTER_CORE0           0x0a2
+#define R2057_RXBB_VGABUF_IDACS_CORE0          0x0a3
+#define R2057_LPF_VCMREF_TXBUF_VCMREF_CORE0    0x0a4
+#define R2057_TXBUF_VINCM_CORE0                        0x0a5
+#define R2057_TXBUF_IDACS_CORE0                        0x0a6
+#define R2057_LPF_RESP_RXBUF_BW_CORE0          0x0a7
+#define R2057_RXBB_CC_CORE0                    0x0a8
+#define R2057_RXBB_SPARE3_CORE0                        0x0a9
+#define R2057_RXBB_RCCAL_HPC_CORE0             0x0aa
+#define R2057_LPF_IDACS_CORE0                  0x0ab
+#define R2057_LPFBYP_DCLOOP_BYP_IDAC_CORE0     0x0ac
+#define R2057_TXBUF_GAIN_CORE0                 0x0ad
+#define R2057_AFELOOPBACK_AACI_RESP_CORE0      0x0ae
+#define R2057_RXBUF_DEGEN_CORE0                        0x0af
+#define R2057_RXBB_SPARE2_CORE0                        0x0b0
+#define R2057_RXBB_SPARE1_CORE0                        0x0b1
+#define R2057_RSSI_MASTER_CORE0                        0x0b2
+#define R2057_W2_MASTER_CORE0                  0x0b3
+#define R2057_NB_MASTER_CORE0                  0x0b4
+#define R2057_W2_IDACS0_Q_CORE0                        0x0b5
+#define R2057_W2_IDACS1_Q_CORE0                        0x0b6
+#define R2057_W2_IDACS0_I_CORE0                        0x0b7
+#define R2057_W2_IDACS1_I_CORE0                        0x0b8
+#define R2057_RSSI_GPAIOSEL_W1_IDACS_CORE0     0x0b9
+#define R2057_NB_IDACS_Q_CORE0                 0x0ba
+#define R2057_NB_IDACS_I_CORE0                 0x0bb
+#define R2057_BACKUP4_CORE0                    0x0c1
+#define R2057_BACKUP3_CORE0                    0x0c2
+#define R2057_BACKUP2_CORE0                    0x0c3
+#define R2057_BACKUP1_CORE0                    0x0c4
+#define R2057_SPARE16_CORE0                    0x0c5
+#define R2057_SPARE15_CORE0                    0x0c6
+#define R2057_SPARE14_CORE0                    0x0c7
+#define R2057_SPARE13_CORE0                    0x0c8
+#define R2057_SPARE12_CORE0                    0x0c9
+#define R2057_SPARE11_CORE0                    0x0ca
+#define R2057_TX2G_BIAS_RESETS_CORE0           0x0cb
+#define R2057_TX5G_BIAS_RESETS_CORE0           0x0cc
+#define R2057_IQTEST_SEL_PU                    0x0cd
+#define R2057_XTAL_CONFIG2                     0x0ce
+#define R2057_BUFS_MISC_LPFBW_CORE0            0x0cf
+#define R2057_TXLPF_RCCAL_CORE0                        0x0d0
+#define R2057_RXBB_GPAIOSEL_RXLPF_RCCAL_CORE0  0x0d1
+#define R2057_LPF_GAIN_CORE0                   0x0d2
+#define R2057_DACBUF_IDACS_BW_CORE0            0x0d3
+#define R2057_RXTXBIAS_CONFIG_CORE1            0x0d4
+#define R2057_TXGM_TXRF_PUS_CORE1              0x0d5
+#define R2057_TXGM_IDAC_BLEED_CORE1            0x0d6
+#define R2057_TXGM_GAIN_CORE1                  0x0db
+#define R2057_TXGM2G_PKDET_PUS_CORE1           0x0dc
+#define R2057_PAD2G_PTATS_CORE1                        0x0dd
+#define R2057_PAD2G_IDACS_CORE1                        0x0de
+#define R2057_PAD2G_BOOST_PU_CORE1             0x0df
+#define R2057_PAD2G_CASCV_GAIN_CORE1           0x0e0
+#define R2057_TXMIX2G_TUNE_BOOST_PU_CORE1      0x0e1
+#define R2057_TXMIX2G_LODC_CORE1               0x0e2
+#define R2057_PAD2G_TUNE_PUS_CORE1             0x0e3
+#define R2057_IPA2G_GAIN_CORE1                 0x0e4
+#define R2057_TSSI2G_SPARE1_CORE1              0x0e5
+#define R2057_TSSI2G_SPARE2_CORE1              0x0e6
+#define R2057_IPA2G_TUNEV_CASCV_PTAT_CORE1     0x0e7
+#define R2057_IPA2G_IMAIN_CORE1                        0x0e8
+#define R2057_IPA2G_CASCONV_CORE1              0x0e9
+#define R2057_IPA2G_CASCOFFV_CORE1             0x0ea
+#define R2057_IPA2G_BIAS_FILTER_CORE1          0x0eb
+#define R2057_TX5G_PKDET_CORE1                 0x0ee
+#define R2057_PGA_PTAT_TXGM5G_PU_CORE1         0x0ef
+#define R2057_PAD5G_PTATS1_CORE1               0x0f0
+#define R2057_PAD5G_CLASS_PTATS2_CORE1         0x0f1
+#define R2057_PGA_BOOSTPTAT_IMAIN_CORE1                0x0f2
+#define R2057_PAD5G_CASCV_IMAIN_CORE1          0x0f3
+#define R2057_TXMIX5G_IBOOST_PAD_IAUX_CORE1    0x0f4
+#define R2057_PGA_BOOST_TUNE_CORE1             0x0f5
+#define R2057_PGA_GAIN_CORE1                   0x0f6
+#define R2057_PAD5G_CASCOFFV_GAIN_PUS_CORE1    0x0f7
+#define R2057_TXMIX5G_BOOST_TUNE_CORE1         0x0f8
+#define R2057_PAD5G_TUNE_MISC_PUS_CORE1                0x0f9
+#define R2057_IPA5G_IAUX_CORE1                 0x0fa
+#define R2057_IPA5G_GAIN_CORE1                 0x0fb
+#define R2057_TSSI5G_SPARE1_CORE1              0x0fc
+#define R2057_TSSI5G_SPARE2_CORE1              0x0fd
+#define R2057_IPA5G_CASCOFFV_PU_CORE1          0x0fe
+#define R2057_IPA5G_PTAT_CORE1                 0x0ff
+#define R2057_IPA5G_IMAIN_CORE1                        0x100
+#define R2057_IPA5G_CASCONV_CORE1              0x101
+#define R2057_IPA5G_BIAS_FILTER_CORE1          0x102
+#define R2057_PAD_BIAS_FILTER_BWS_CORE1                0x105
+#define R2057_TR2G_CONFIG1_CORE1_NU            0x106
+#define R2057_TR2G_CONFIG2_CORE1_NU            0x107
+#define R2057_LNA5G_RFEN_CORE1                 0x108
+#define R2057_TR5G_CONFIG2_CORE1_NU            0x109
+#define R2057_RXRFBIAS_IBOOST_PU_CORE1         0x10a
+#define R2057_RXRF_IABAND_RXGM_IMAIN_PTAT_CORE1        0x10b
+#define R2057_RXGM_CMFBITAIL_AUXPTAT_CORE1     0x10c
+#define R2057_RXMIX_ICORE_RXGM_IAUX_CORE1      0x10d
+#define R2057_RXMIX_CMFBITAIL_PU_CORE1         0x10e
+#define R2057_LNA2_IMAIN_PTAT_PU_CORE1         0x10f
+#define R2057_LNA2_IAUX_PTAT_CORE1             0x110
+#define R2057_LNA1_IMAIN_PTAT_PU_CORE1         0x111
+#define R2057_LNA15G_INPUT_MATCH_TUNE_CORE1    0x112
+#define R2057_RXRFBIAS_BANDSEL_CORE1           0x113
+#define R2057_TIA_CONFIG_CORE1                 0x114
+#define R2057_TIA_IQGAIN_CORE1                 0x115
+#define R2057_TIA_IBIAS2_CORE1                 0x116
+#define R2057_TIA_IBIAS1_CORE1                 0x117
+#define R2057_TIA_SPARE_Q_CORE1                        0x118
+#define R2057_TIA_SPARE_I_CORE1                        0x119
+#define R2057_RXMIX2G_PUS_CORE1                        0x11a
+#define R2057_RXMIX2G_VCMREFS_CORE1            0x11b
+#define R2057_RXMIX2G_LODC_QI_CORE1            0x11c
+#define R2057_W12G_BW_LNA2G_PUS_CORE1          0x11d
+#define R2057_LNA2G_GAIN_CORE1                 0x11e
+#define R2057_LNA2G_TUNE_CORE1                 0x11f
+#define R2057_RXMIX5G_PUS_CORE1                        0x120
+#define R2057_RXMIX5G_VCMREFS_CORE1            0x121
+#define R2057_RXMIX5G_LODC_QI_CORE1            0x122
+#define R2057_W15G_BW_LNA5G_PUS_CORE1          0x123
+#define R2057_LNA5G_GAIN_CORE1                 0x124
+#define R2057_LNA5G_TUNE_CORE1                 0x125
+#define R2057_LPFSEL_TXRX_RXBB_PUS_CORE1       0x126
+#define R2057_RXBB_BIAS_MASTER_CORE1           0x127
+#define R2057_RXBB_VGABUF_IDACS_CORE1          0x128
+#define R2057_LPF_VCMREF_TXBUF_VCMREF_CORE1    0x129
+#define R2057_TXBUF_VINCM_CORE1                        0x12a
+#define R2057_TXBUF_IDACS_CORE1                        0x12b
+#define R2057_LPF_RESP_RXBUF_BW_CORE1          0x12c
+#define R2057_RXBB_CC_CORE1                    0x12d
+#define R2057_RXBB_SPARE3_CORE1                        0x12e
+#define R2057_RXBB_RCCAL_HPC_CORE1             0x12f
+#define R2057_LPF_IDACS_CORE1                  0x130
+#define R2057_LPFBYP_DCLOOP_BYP_IDAC_CORE1     0x131
+#define R2057_TXBUF_GAIN_CORE1                 0x132
+#define R2057_AFELOOPBACK_AACI_RESP_CORE1      0x133
+#define R2057_RXBUF_DEGEN_CORE1                        0x134
+#define R2057_RXBB_SPARE2_CORE1                        0x135
+#define R2057_RXBB_SPARE1_CORE1                        0x136
+#define R2057_RSSI_MASTER_CORE1                        0x137
+#define R2057_W2_MASTER_CORE1                  0x138
+#define R2057_NB_MASTER_CORE1                  0x139
+#define R2057_W2_IDACS0_Q_CORE1                        0x13a
+#define R2057_W2_IDACS1_Q_CORE1                        0x13b
+#define R2057_W2_IDACS0_I_CORE1                        0x13c
+#define R2057_W2_IDACS1_I_CORE1                        0x13d
+#define R2057_RSSI_GPAIOSEL_W1_IDACS_CORE1     0x13e
+#define R2057_NB_IDACS_Q_CORE1                 0x13f
+#define R2057_NB_IDACS_I_CORE1                 0x140
+#define R2057_BACKUP4_CORE1                    0x146
+#define R2057_BACKUP3_CORE1                    0x147
+#define R2057_BACKUP2_CORE1                    0x148
+#define R2057_BACKUP1_CORE1                    0x149
+#define R2057_SPARE16_CORE1                    0x14a
+#define R2057_SPARE15_CORE1                    0x14b
+#define R2057_SPARE14_CORE1                    0x14c
+#define R2057_SPARE13_CORE1                    0x14d
+#define R2057_SPARE12_CORE1                    0x14e
+#define R2057_SPARE11_CORE1                    0x14f
+#define R2057_TX2G_BIAS_RESETS_CORE1           0x150
+#define R2057_TX5G_BIAS_RESETS_CORE1           0x151
+#define R2057_SPARE8_CORE1                     0x152
+#define R2057_SPARE7_CORE1                     0x153
+#define R2057_BUFS_MISC_LPFBW_CORE1            0x154
+#define R2057_TXLPF_RCCAL_CORE1                        0x155
+#define R2057_RXBB_GPAIOSEL_RXLPF_RCCAL_CORE1  0x156
+#define R2057_LPF_GAIN_CORE1                   0x157
+#define R2057_DACBUF_IDACS_BW_CORE1            0x158
+#define R2057_DACBUF_VINCM_CORE1               0x159
+#define R2057_RCCAL_START_R1_Q1_P1             0x15a
+#define R2057_RCCAL_X1                         0x15b
+#define R2057_RCCAL_TRC0                       0x15c
+#define R2057_RCCAL_TRC1                       0x15d
+#define R2057_RCCAL_DONE_OSCCAP                        0x15e
+#define R2057_RCCAL_N0_0                       0x15f
+#define R2057_RCCAL_N0_1                       0x160
+#define R2057_RCCAL_N1_0                       0x161
+#define R2057_RCCAL_N1_1                       0x162
+#define R2057_RCAL_STATUS                      0x163
+#define R2057_XTALPUOVR_PINCTRL                        0x164
+#define R2057_OVR_REG0                         0x165
+#define R2057_OVR_REG1                         0x166
+#define R2057_OVR_REG2                         0x167
+#define R2057_OVR_REG3                         0x168
+#define R2057_OVR_REG4                         0x169
+#define R2057_RCCAL_SCAP_VAL                   0x16a
+#define R2057_RCCAL_BCAP_VAL                   0x16b
+#define R2057_RCCAL_HPC_VAL                    0x16c
+#define R2057_RCCAL_OVERRIDES                  0x16d
+#define R2057_TX0_IQCAL_GAIN_BW                        0x170
+#define R2057_TX0_LOFT_FINE_I                  0x171
+#define R2057_TX0_LOFT_FINE_Q                  0x172
+#define R2057_TX0_LOFT_COARSE_I                        0x173
+#define R2057_TX0_LOFT_COARSE_Q                        0x174
+#define R2057_TX0_TX_SSI_MASTER                        0x175
+#define R2057_TX0_IQCAL_VCM_HG                 0x176
+#define R2057_TX0_IQCAL_IDAC                   0x177
+#define R2057_TX0_TSSI_VCM                     0x178
+#define R2057_TX0_TX_SSI_MUX                   0x179
+#define R2057_TX0_TSSIA                                0x17a
+#define R2057_TX0_TSSIG                                0x17b
+#define R2057_TX0_TSSI_MISC1                   0x17c
+#define R2057_TX0_TXRXCOUPLE_2G_ATTEN          0x17d
+#define R2057_TX0_TXRXCOUPLE_2G_PWRUP          0x17e
+#define R2057_TX0_TXRXCOUPLE_5G_ATTEN          0x17f
+#define R2057_TX0_TXRXCOUPLE_5G_PWRUP          0x180
+#define R2057_TX1_IQCAL_GAIN_BW                        0x190
+#define R2057_TX1_LOFT_FINE_I                  0x191
+#define R2057_TX1_LOFT_FINE_Q                  0x192
+#define R2057_TX1_LOFT_COARSE_I                        0x193
+#define R2057_TX1_LOFT_COARSE_Q                        0x194
+#define R2057_TX1_TX_SSI_MASTER                        0x195
+#define R2057_TX1_IQCAL_VCM_HG                 0x196
+#define R2057_TX1_IQCAL_IDAC                   0x197
+#define R2057_TX1_TSSI_VCM                     0x198
+#define R2057_TX1_TX_SSI_MUX                   0x199
+#define R2057_TX1_TSSIA                                0x19a
+#define R2057_TX1_TSSIG                                0x19b
+#define R2057_TX1_TSSI_MISC1                   0x19c
+#define R2057_TX1_TXRXCOUPLE_2G_ATTEN          0x19d
+#define R2057_TX1_TXRXCOUPLE_2G_PWRUP          0x19e
+#define R2057_TX1_TXRXCOUPLE_5G_ATTEN          0x19f
+#define R2057_TX1_TXRXCOUPLE_5G_PWRUP          0x1a0
+#define R2057_AFE_VCM_CAL_MASTER_CORE0         0x1a1
+#define R2057_AFE_SET_VCM_I_CORE0              0x1a2
+#define R2057_AFE_SET_VCM_Q_CORE0              0x1a3
+#define R2057_AFE_STATUS_VCM_IQADC_CORE0       0x1a4
+#define R2057_AFE_STATUS_VCM_I_CORE0           0x1a5
+#define R2057_AFE_STATUS_VCM_Q_CORE0           0x1a6
+#define R2057_AFE_VCM_CAL_MASTER_CORE1         0x1a7
+#define R2057_AFE_SET_VCM_I_CORE1              0x1a8
+#define R2057_AFE_SET_VCM_Q_CORE1              0x1a9
+#define R2057_AFE_STATUS_VCM_IQADC_CORE1       0x1aa
+#define R2057_AFE_STATUS_VCM_I_CORE1           0x1ab
+#define R2057_AFE_STATUS_VCM_Q_CORE1           0x1ac
+
+#define R2057v7_DACBUF_VINCM_CORE0             0x1ad
+#define R2057v7_RCCAL_MASTER                   0x1ae
+#define R2057v7_TR2G_CONFIG3_CORE0_NU          0x1af
+#define R2057v7_TR2G_CONFIG3_CORE1_NU          0x1b0
+#define R2057v7_LOGEN_PUS1                     0x1b1
+#define R2057v7_OVR_REG5                       0x1b2
+#define R2057v7_OVR_REG6                       0x1b3
+#define R2057v7_OVR_REG7                       0x1b4
+#define R2057v7_OVR_REG8                       0x1b5
+#define R2057v7_OVR_REG9                       0x1b6
+#define R2057v7_OVR_REG10                      0x1b7
+#define R2057v7_OVR_REG11                      0x1b8
+#define R2057v7_OVR_REG12                      0x1b9
+#define R2057v7_OVR_REG13                      0x1ba
+#define R2057v7_OVR_REG14                      0x1bb
+#define R2057v7_OVR_REG15                      0x1bc
+#define R2057v7_OVR_REG16                      0x1bd
+#define R2057v7_OVR_REG1                       0x1be
+#define R2057v7_OVR_REG18                      0x1bf
+#define R2057v7_OVR_REG19                      0x1c0
+#define R2057v7_OVR_REG20                      0x1c1
+#define R2057v7_OVR_REG21                      0x1c2
+#define R2057v7_OVR_REG2                       0x1c3
+#define R2057v7_OVR_REG23                      0x1c4
+#define R2057v7_OVR_REG24                      0x1c5
+#define R2057v7_OVR_REG25                      0x1c6
+#define R2057v7_OVR_REG26                      0x1c7
+#define R2057v7_OVR_REG27                      0x1c8
+#define R2057v7_OVR_REG28                      0x1c9
+#define R2057v7_IQTEST_SEL_PU2                 0x1ca
+
+#define R2057_VCM_MASK                         0x7
+
+void r2057_upload_inittabs(struct b43_wldev *dev);
+
+#endif /* B43_RADIO_2057_H_ */
index f0d8377429c695dc6d5cbe4342d49a4bc513ef5d..97d4e27bf36f3c3f14b336086efe770dbf671f77 100644 (file)
@@ -2757,6 +2757,49 @@ const struct nphy_rf_control_override_rev3 tbl_rf_control_override_rev3[] = {
        { 0x00C0,  6, 0xE7, 0xF9, 0xEC, 0xFB }  /* field == 0x4000 (fls 15) */
 };
 
+/* field, val_addr_core0, val_addr_core1, val_mask, val_shift */
+static const struct nphy_rf_control_override_rev7
+                       tbl_rf_control_override_rev7_over0[] = {
+       { 0x0004, 0x07A, 0x07D, 0x0002, 1 },
+       { 0x0008, 0x07A, 0x07D, 0x0004, 2 },
+       { 0x0010, 0x07A, 0x07D, 0x0010, 4 },
+       { 0x0020, 0x07A, 0x07D, 0x0020, 5 },
+       { 0x0040, 0x07A, 0x07D, 0x0040, 6 },
+       { 0x0080, 0x0F8, 0x0FA, 0x0080, 7 },
+       { 0x0400, 0x0F8, 0x0FA, 0x0070, 4 },
+       { 0x0800, 0x07B, 0x07E, 0xFFFF, 0 },
+       { 0x1000, 0x07C, 0x07F, 0xFFFF, 0 },
+       { 0x6000, 0x348, 0x349, 0xFFFF, 0 },
+       { 0x2000, 0x348, 0x349, 0x000F, 0 },
+};
+
+/* field, val_addr_core0, val_addr_core1, val_mask, val_shift */
+static const struct nphy_rf_control_override_rev7
+                       tbl_rf_control_override_rev7_over1[] = {
+       { 0x0002, 0x340, 0x341, 0x0002, 1 },
+       { 0x0008, 0x340, 0x341, 0x0008, 3 },
+       { 0x0020, 0x340, 0x341, 0x0020, 5 },
+       { 0x0010, 0x340, 0x341, 0x0010, 4 },
+       { 0x0004, 0x340, 0x341, 0x0004, 2 },
+       { 0x0080, 0x340, 0x341, 0x0700, 8 },
+       { 0x0800, 0x340, 0x341, 0x4000, 14 },
+       { 0x0400, 0x340, 0x341, 0x2000, 13 },
+       { 0x0200, 0x340, 0x341, 0x0800, 12 },
+       { 0x0100, 0x340, 0x341, 0x0100, 11 },
+       { 0x0040, 0x340, 0x341, 0x0040, 6 },
+       { 0x0001, 0x340, 0x341, 0x0001, 0 },
+};
+
+/* field, val_addr_core0, val_addr_core1, val_mask, val_shift */
+static const struct nphy_rf_control_override_rev7
+                       tbl_rf_control_override_rev7_over2[] = {
+       { 0x0008, 0x344, 0x345, 0x0008, 3 },
+       { 0x0002, 0x344, 0x345, 0x0002, 1 },
+       { 0x0001, 0x344, 0x345, 0x0001, 0 },
+       { 0x0004, 0x344, 0x345, 0x0004, 2 },
+       { 0x0010, 0x344, 0x345, 0x0010, 4 },
+};
+
 struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_wa_phy6_radio11_ghz2 = {
        { 10, 14, 19, 27 },
        { -5, 6, 10, 15 },
@@ -3248,3 +3291,35 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
 
        return e;
 }
+
+const struct nphy_rf_control_override_rev7 *b43_nphy_get_rf_ctl_over_rev7(
+       struct b43_wldev *dev, u16 field, u8 override)
+{
+       const struct nphy_rf_control_override_rev7 *e;
+       u8 size, i;
+
+       switch (override) {
+       case 0:
+               e = tbl_rf_control_override_rev7_over0;
+               size = ARRAY_SIZE(tbl_rf_control_override_rev7_over0);
+               break;
+       case 1:
+               e = tbl_rf_control_override_rev7_over1;
+               size = ARRAY_SIZE(tbl_rf_control_override_rev7_over1);
+               break;
+       case 2:
+               e = tbl_rf_control_override_rev7_over2;
+               size = ARRAY_SIZE(tbl_rf_control_override_rev7_over2);
+               break;
+       default:
+               b43err(dev->wl, "Invalid override value %d\n", override);
+               return NULL;
+       }
+
+       for (i = 0; i < size; i++) {
+               if (e[i].field == field)
+                       return &e[i];
+       }
+
+       return NULL;
+}
index f348953c02308b5c29f4048f5e4e87ebff8a37cb..c600700ceedc05ae9cae24e0f221d927b5310fa7 100644 (file)
@@ -35,6 +35,14 @@ struct nphy_rf_control_override_rev3 {
        u8 val_addr1;
 };
 
+struct nphy_rf_control_override_rev7 {
+       u16 field;
+       u16 val_addr_core0;
+       u16 val_addr_core1;
+       u16 val_mask;
+       u8 val_shift;
+};
+
 struct nphy_gain_ctl_workaround_entry {
        s8 lna1_gain[4];
        s8 lna2_gain[4];
@@ -202,5 +210,7 @@ extern const struct nphy_rf_control_override_rev2
        tbl_rf_control_override_rev2[];
 extern const struct nphy_rf_control_override_rev3
        tbl_rf_control_override_rev3[];
+const struct nphy_rf_control_override_rev7 *b43_nphy_get_rf_ctl_over_rev7(
+       struct b43_wldev *dev, u16 field, u8 override);
 
 #endif /* B43_TABLES_NPHY_H_ */
index 8156135a0590775311baa7936f723f6f28242ff1..18e208e3eca1c4be2ec3e7a8bbf02056d71132ff 100644 (file)
@@ -1920,7 +1920,7 @@ static int b43legacy_gpio_init(struct b43legacy_wldev *dev)
                return 0;
        ssb_write32(gpiodev, B43legacy_GPIO_CONTROL,
                    (ssb_read32(gpiodev, B43legacy_GPIO_CONTROL)
-                    & mask) | set);
+                    & ~mask) | set);
 
        return 0;
 }
@@ -2492,6 +2492,7 @@ static void b43legacy_tx_work(struct work_struct *work)
 }
 
 static void b43legacy_op_tx(struct ieee80211_hw *hw,
+                           struct ieee80211_tx_control *control,
                            struct sk_buff *skb)
 {
        struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
@@ -3894,6 +3895,8 @@ static void b43legacy_remove(struct ssb_device *dev)
        cancel_work_sync(&wl->firmware_load);
 
        B43legacy_WARN_ON(!wl);
+       if (!wldev->fw.ucode)
+               return;                 /* NULL if fw never loaded */
        if (wl->current_dev == wldev)
                ieee80211_unregister_hw(wl->hw);
 
index b480088b3dbe2e0834a28228341a66a74c79bee0..c9d811eb6556bfa4494155391398c2bb2b1df815 100644 (file)
@@ -55,6 +55,14 @@ config BRCMFMAC_USB
          IEEE802.11n embedded FullMAC WLAN driver. Say Y if you want to
          use the driver for an USB wireless card.
 
+config BRCMISCAN
+       bool "Broadcom I-Scan (OBSOLETE)"
+       depends on BRCMFMAC
+       ---help---
+         This option enables the I-Scan method. By default fullmac uses the
+         new E-Scan method which uses less memory in firmware and gives no
+         limitation on the number of scan results.
+
 config BRCMDBG
        bool "Broadcom driver debug functions"
        depends on BRCMSMAC || BRCMFMAC
index 8e7e6928c93699bf9b7df35f9efc4c9b26928481..3b2c4c20e7fcfcccaa2a6706108473f805c0f13b 100644 (file)
@@ -185,7 +185,7 @@ brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
        return err;
 }
 
-static int
+int
 brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
                        void *data, bool write)
 {
@@ -249,7 +249,9 @@ u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
        int retval;
 
        brcmf_dbg(INFO, "addr:0x%08x\n", addr);
+       sdio_claim_host(sdiodev->func[1]);
        retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
+       sdio_release_host(sdiodev->func[1]);
        brcmf_dbg(INFO, "data:0x%02x\n", data);
 
        if (ret)
@@ -264,7 +266,9 @@ u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
        int retval;
 
        brcmf_dbg(INFO, "addr:0x%08x\n", addr);
+       sdio_claim_host(sdiodev->func[1]);
        retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
+       sdio_release_host(sdiodev->func[1]);
        brcmf_dbg(INFO, "data:0x%08x\n", data);
 
        if (ret)
@@ -279,7 +283,9 @@ void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
        int retval;
 
        brcmf_dbg(INFO, "addr:0x%08x, data:0x%02x\n", addr, data);
+       sdio_claim_host(sdiodev->func[1]);
        retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
+       sdio_release_host(sdiodev->func[1]);
 
        if (ret)
                *ret = retval;
@@ -291,7 +297,9 @@ void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
        int retval;
 
        brcmf_dbg(INFO, "addr:0x%08x, data:0x%08x\n", addr, data);
+       sdio_claim_host(sdiodev->func[1]);
        retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
+       sdio_release_host(sdiodev->func[1]);
 
        if (ret)
                *ret = retval;
@@ -356,15 +364,20 @@ brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
        brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
                  fn, addr, pkt->len);
 
+       sdio_claim_host(sdiodev->func[1]);
+
        width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
        err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
        if (err)
-               return err;
+               goto done;
 
        incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
        err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ,
                                         fn, addr, pkt);
 
+done:
+       sdio_release_host(sdiodev->func[1]);
+
        return err;
 }
 
@@ -378,15 +391,20 @@ int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
        brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
                  fn, addr, pktq->qlen);
 
+       sdio_claim_host(sdiodev->func[1]);
+
        width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
        err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
        if (err)
-               return err;
+               goto done;
 
        incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
        err = brcmf_sdioh_request_chain(sdiodev, incr_fix, SDIOH_READ, fn, addr,
                                        pktq);
 
+done:
+       sdio_release_host(sdiodev->func[1]);
+
        return err;
 }
 
@@ -428,10 +446,12 @@ brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
        if (flags & SDIO_REQ_ASYNC)
                return -ENOTSUPP;
 
+       sdio_claim_host(sdiodev->func[1]);
+
        if (bar0 != sdiodev->sbwad) {
                err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
                if (err)
-                       return err;
+                       goto done;
 
                sdiodev->sbwad = bar0;
        }
@@ -443,8 +463,13 @@ brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
        if (width == 4)
                addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
 
-       return brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_WRITE, fn,
-                                         addr, pkt);
+       err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_WRITE, fn,
+                                        addr, pkt);
+
+done:
+       sdio_release_host(sdiodev->func[1]);
+
+       return err;
 }
 
 int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr,
@@ -485,8 +510,10 @@ int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
        brcmf_dbg(TRACE, "Enter\n");
 
        /* issue abort cmd52 command through F0 */
+       sdio_claim_host(sdiodev->func[1]);
        brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE, SDIO_FUNC_0,
                                 SDIO_CCCR_ABORT, &t_func);
+       sdio_release_host(sdiodev->func[1]);
 
        brcmf_dbg(TRACE, "Exit\n");
        return 0;
index 7c4ee72f9d56006cb7e069a9472a0524bbebb9c9..c3247d5b3c222bfcc6ab8a05236515be073f55b6 100644 (file)
@@ -42,6 +42,7 @@
 
 #define DMA_ALIGN_MASK 0x03
 
+#define SDIO_DEVICE_ID_BROADCOM_43241  0x4324
 #define SDIO_DEVICE_ID_BROADCOM_4329   0x4329
 #define SDIO_DEVICE_ID_BROADCOM_4330   0x4330
 #define SDIO_DEVICE_ID_BROADCOM_4334   0x4334
@@ -51,6 +52,7 @@
 
 /* devices we support, null terminated */
 static const struct sdio_device_id brcmf_sdmmc_ids[] = {
+       {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43241)},
        {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
        {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
        {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)},
@@ -101,7 +103,6 @@ static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
        if (regaddr == SDIO_CCCR_IOEx) {
                sdfunc = sdiodev->func[2];
                if (sdfunc) {
-                       sdio_claim_host(sdfunc);
                        if (*byte & SDIO_FUNC_ENABLE_2) {
                                /* Enable Function 2 */
                                err_ret = sdio_enable_func(sdfunc);
@@ -117,7 +118,6 @@ static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
                                                  "Disable F2 failed:%d\n",
                                                  err_ret);
                        }
-                       sdio_release_host(sdfunc);
                }
        } else if ((regaddr == SDIO_CCCR_ABORT) ||
                   (regaddr == SDIO_CCCR_IENx)) {
@@ -126,17 +126,13 @@ static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
                if (!sdfunc)
                        return -ENOMEM;
                sdfunc->num = 0;
-               sdio_claim_host(sdfunc);
                sdio_writeb(sdfunc, *byte, regaddr, &err_ret);
-               sdio_release_host(sdfunc);
                kfree(sdfunc);
        } else if (regaddr < 0xF0) {
                brcmf_dbg(ERROR, "F0 Wr:0x%02x: write disallowed\n", regaddr);
                err_ret = -EPERM;
        } else {
-               sdio_claim_host(sdfunc);
                sdio_f0_writeb(sdfunc, *byte, regaddr, &err_ret);
-               sdio_release_host(sdfunc);
        }
 
        return err_ret;
@@ -157,7 +153,6 @@ int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint func,
                /* handle F0 separately */
                err_ret = brcmf_sdioh_f0_write_byte(sdiodev, regaddr, byte);
        } else {
-               sdio_claim_host(sdiodev->func[func]);
                if (rw) /* CMD52 Write */
                        sdio_writeb(sdiodev->func[func], *byte, regaddr,
                                    &err_ret);
@@ -168,7 +163,6 @@ int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint func,
                        *byte = sdio_readb(sdiodev->func[func], regaddr,
                                           &err_ret);
                }
-               sdio_release_host(sdiodev->func[func]);
        }
 
        if (err_ret)
@@ -195,8 +189,6 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
        brcmf_pm_resume_wait(sdiodev, &sdiodev->request_word_wait);
        if (brcmf_pm_resume_error(sdiodev))
                return -EIO;
-       /* Claim host controller */
-       sdio_claim_host(sdiodev->func[func]);
 
        if (rw) {               /* CMD52 Write */
                if (nbytes == 4)
@@ -217,9 +209,6 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
                        brcmf_dbg(ERROR, "Invalid nbytes: %d\n", nbytes);
        }
 
-       /* Release host controller */
-       sdio_release_host(sdiodev->func[func]);
-
        if (err_ret)
                brcmf_dbg(ERROR, "Failed to %s word, Err: 0x%08x\n",
                          rw ? "write" : "read", err_ret);
@@ -273,9 +262,6 @@ brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
        if (brcmf_pm_resume_error(sdiodev))
                return -EIO;
 
-       /* Claim host controller */
-       sdio_claim_host(sdiodev->func[func]);
-
        skb_queue_walk(pktq, pkt) {
                uint pkt_len = pkt->len;
                pkt_len += 3;
@@ -298,9 +284,6 @@ brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
                SGCount++;
        }
 
-       /* Release host controller */
-       sdio_release_host(sdiodev->func[func]);
-
        brcmf_dbg(TRACE, "Exit\n");
        return err_ret;
 }
@@ -326,9 +309,6 @@ int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
        if (brcmf_pm_resume_error(sdiodev))
                return -EIO;
 
-       /* Claim host controller */
-       sdio_claim_host(sdiodev->func[func]);
-
        pkt_len += 3;
        pkt_len &= (uint)~3;
 
@@ -342,9 +322,6 @@ int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
                          write ? "TX" : "RX", pkt, addr, pkt_len);
        }
 
-       /* Release host controller */
-       sdio_release_host(sdiodev->func[func]);
-
        return status;
 }
 
index a11fe54f595091dbec82f57066a2be6bc29bf26c..17e7ae73e0089600780a54a35ab0e7c75dc08a87 100644 (file)
@@ -27,6 +27,7 @@
  * IO codes that are interpreted by dongle firmware
  ******************************************************************************/
 #define BRCMF_C_UP                             2
+#define BRCMF_C_DOWN                           3
 #define BRCMF_C_SET_PROMISC                    10
 #define BRCMF_C_GET_RATE                       12
 #define BRCMF_C_GET_INFRA                      19
 #define BRCMF_C_REASSOC                                53
 #define BRCMF_C_SET_ROAM_TRIGGER               55
 #define BRCMF_C_SET_ROAM_DELTA                 57
+#define BRCMF_C_GET_BCNPRD                     75
+#define BRCMF_C_SET_BCNPRD                     76
 #define BRCMF_C_GET_DTIMPRD                    77
+#define BRCMF_C_SET_DTIMPRD                    78
 #define BRCMF_C_SET_COUNTRY                    84
 #define BRCMF_C_GET_PM                         85
 #define BRCMF_C_SET_PM                         86
 #define BRCMF_EVENT_MSG_FLUSHTXQ       0x02
 #define BRCMF_EVENT_MSG_GROUP          0x04
 
+#define BRCMF_ESCAN_REQ_VERSION 1
+
+#define WLC_BSS_RSSI_ON_CHANNEL                0x0002
+
+#define BRCMF_MAXRATES_IN_SET          16      /* max # of rates in rateset */
+#define BRCMF_STA_ASSOC                        0x10            /* Associated */
+
 struct brcmf_event_msg {
        __be16 version;
        __be16 flags;
@@ -140,6 +151,8 @@ struct brcmf_event_msg {
        __be32 datalen;
        u8 addr[ETH_ALEN];
        char ifname[IFNAMSIZ];
+       u8 ifidx;
+       u8 bsscfgidx;
 } __packed;
 
 struct brcm_ethhdr {
@@ -454,6 +467,24 @@ struct brcmf_scan_results_le {
        __le32 count;
 };
 
+struct brcmf_escan_params_le {
+       __le32 version;
+       __le16 action;
+       __le16 sync_id;
+       struct brcmf_scan_params_le params_le;
+};
+
+struct brcmf_escan_result_le {
+       __le32 buflen;
+       __le32 version;
+       __le16 sync_id;
+       __le16 bss_count;
+       struct brcmf_bss_info_le bss_info_le;
+};
+
+#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(struct brcmf_escan_result_le) - \
+       sizeof(struct brcmf_bss_info_le))
+
 /* used for association with a specific BSSID and chanspec list */
 struct brcmf_assoc_params_le {
        /* 00:00:00:00:00:00: broadcast scan */
@@ -542,6 +573,28 @@ struct brcmf_channel_info_le {
        __le32 scan_channel;
 };
 
+struct brcmf_sta_info_le {
+       __le16  ver;            /* version of this struct */
+       __le16  len;            /* length in bytes of this structure */
+       __le16  cap;            /* sta's advertised capabilities */
+       __le32  flags;          /* flags defined below */
+       __le32  idle;           /* time since data pkt rx'd from sta */
+       u8      ea[ETH_ALEN];           /* Station address */
+       __le32  count;                  /* # rates in this set */
+       u8      rates[BRCMF_MAXRATES_IN_SET];   /* rates in 500kbps units */
+                                               /* w/hi bit set if basic */
+       __le32  in;             /* seconds elapsed since associated */
+       __le32  listen_interval_inms; /* Min Listen interval in ms for STA */
+       __le32  tx_pkts;        /* # of packets transmitted */
+       __le32  tx_failures;    /* # of packets failed */
+       __le32  rx_ucast_pkts;  /* # of unicast packets received */
+       __le32  rx_mcast_pkts;  /* # of multicast packets received */
+       __le32  tx_rate;        /* Rate of last successful tx frame */
+       __le32  rx_rate;        /* Rate of last successful rx frame */
+       __le32  rx_decrypt_succeeds;    /* # of packet decrypted successfully */
+       __le32  rx_decrypt_failures;    /* # of packet decrypted failed */
+};
+
 /* Bus independent dongle command */
 struct brcmf_dcmd {
        uint cmd;               /* common dongle cmd definition */
@@ -561,7 +614,7 @@ struct brcmf_pub {
        /* Linkage ponters */
        struct brcmf_bus *bus_if;
        struct brcmf_proto *prot;
-       struct brcmf_cfg80211_dev *config;
+       struct brcmf_cfg80211_info *config;
        struct device *dev;             /* fullmac dongle device pointer */
 
        /* Internal brcmf items */
@@ -634,10 +687,13 @@ extern const struct bcmevent_name bcmevent_names[];
 
 extern uint brcmf_c_mkiovar(char *name, char *data, uint datalen,
                          char *buf, uint len);
+extern uint brcmf_c_mkiovar_bsscfg(char *name, char *data, uint datalen,
+                                  char *buf, uint buflen, s32 bssidx);
 
 extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
 
 extern s32 brcmf_exec_dcmd(struct net_device *dev, u32 cmd, void *arg, u32 len);
+extern int brcmf_netlink_dcmd(struct net_device *ndev, struct brcmf_dcmd *dcmd);
 
 /* Return pointer to interface name */
 extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
@@ -657,10 +713,6 @@ extern int brcmf_c_host_event(struct brcmf_pub *drvr, int *idx,
 
 extern void brcmf_del_if(struct brcmf_pub *drvr, int ifidx);
 
-/* Send packet to dongle via data channel */
-extern int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx,\
-                        struct sk_buff *pkt);
-
 extern void brcmf_c_pktfilter_offload_set(struct brcmf_pub *drvr, char *arg);
 extern void brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg,
                                             int enable, int master_mode);
index 537f499cc5d26f1174747443e863f798177b3377..9b8ee19ea55d12ccf45a644e139490dd5d2b4843 100644 (file)
@@ -103,7 +103,7 @@ extern int brcmf_attach(uint bus_hdrlen, struct device *dev);
 extern void brcmf_detach(struct device *dev);
 
 /* Indication from bus module to change flow-control state */
-extern void brcmf_txflowcontrol(struct device *dev, int ifidx, bool on);
+extern void brcmf_txflowblock(struct device *dev, bool state);
 
 /* Notify tx completion */
 extern void brcmf_txcomplete(struct device *dev, struct sk_buff *txp,
index 6f70953f0bade06ef046a845829a887611c0a8b8..15c5db5752d199d2ec4c78913c44a5713dfb78a8 100644 (file)
@@ -80,12 +80,60 @@ brcmf_c_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
        strncpy(buf, name, buflen);
 
        /* append data onto the end of the name string */
-       memcpy(&buf[len], data, datalen);
-       len += datalen;
+       if (data && datalen) {
+               memcpy(&buf[len], data, datalen);
+               len += datalen;
+       }
 
        return len;
 }
 
+uint
+brcmf_c_mkiovar_bsscfg(char *name, char *data, uint datalen,
+                      char *buf, uint buflen, s32 bssidx)
+{
+       const s8 *prefix = "bsscfg:";
+       s8 *p;
+       u32 prefixlen;
+       u32 namelen;
+       u32 iolen;
+       __le32 bssidx_le;
+
+       if (bssidx == 0)
+               return brcmf_c_mkiovar(name, data, datalen, buf, buflen);
+
+       prefixlen = (u32) strlen(prefix); /* lengh of bsscfg prefix */
+       namelen = (u32) strlen(name) + 1; /* lengh of iovar  name + null */
+       iolen = prefixlen + namelen + sizeof(bssidx_le) + datalen;
+
+       if (buflen < 0 || iolen > (u32)buflen) {
+               brcmf_dbg(ERROR, "buffer is too short\n");
+               return 0;
+       }
+
+       p = buf;
+
+       /* copy prefix, no null */
+       memcpy(p, prefix, prefixlen);
+       p += prefixlen;
+
+       /* copy iovar name including null */
+       memcpy(p, name, namelen);
+       p += namelen;
+
+       /* bss config index as first data */
+       bssidx_le = cpu_to_le32(bssidx);
+       memcpy(p, &bssidx_le, sizeof(bssidx_le));
+       p += sizeof(bssidx_le);
+
+       /* parameter buffer follows */
+       if (datalen)
+               memcpy(p, data, datalen);
+
+       return iolen;
+
+}
+
 bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
                      struct sk_buff *pkt, int prec)
 {
@@ -205,7 +253,8 @@ brcmf_c_show_host_event(struct brcmf_event_msg *event, void *event_data)
                BRCMF_E_ACTION_FRAME_COMPLETE, "ACTION FRAME TX COMPLETE"}, {
                BRCMF_E_IF, "IF"}, {
                BRCMF_E_RSSI, "RSSI"}, {
-               BRCMF_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE"}
+               BRCMF_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE"}, {
+               BRCMF_E_ESCAN_RESULT, "ESCAN_RESULT"}
        };
        uint event_type, flags, auth_type, datalen;
        static u32 seqnum_prev;
@@ -350,6 +399,11 @@ brcmf_c_show_host_event(struct brcmf_event_msg *event, void *event_data)
                brcmf_dbg(EVENT, "MACEVENT: %s\n", event_name);
                break;
 
+       case BRCMF_E_ESCAN_RESULT:
+               brcmf_dbg(EVENT, "MACEVENT: %s\n", event_name);
+               datalen = 0;
+               break;
+
        case BRCMF_E_PFN_NET_FOUND:
        case BRCMF_E_PFN_NET_LOST:
        case BRCMF_E_PFN_SCAN_COMPLETE:
@@ -425,13 +479,7 @@ brcmf_c_show_host_event(struct brcmf_event_msg *event, void *event_data)
        }
 
        /* show any appended data */
-       if (datalen) {
-               buf = (unsigned char *) event_data;
-               brcmf_dbg(EVENT, " data (%d) : ", datalen);
-               for (i = 0; i < datalen; i++)
-                       brcmf_dbg(EVENT, " 0x%02x ", *buf++);
-               brcmf_dbg(EVENT, "\n");
-       }
+       brcmf_dbg_hex_dump(datalen, event_data, datalen, "Received data");
 }
 #endif                         /* DEBUG */
 
@@ -522,8 +570,9 @@ brcmf_c_host_event(struct brcmf_pub *drvr, int *ifidx, void *pktdata,
        }
 
 #ifdef DEBUG
-       brcmf_c_show_host_event(event, event_data);
-#endif                         /* DEBUG */
+       if (BRCMF_EVENT_ON())
+               brcmf_c_show_host_event(event, event_data);
+#endif /* DEBUG */
 
        return 0;
 }
index b784920532d31b3ae9deb1ca0453244efb010b39..fb508c2256ddc8a7244b61ad1e6b3fd13b41bf30 100644 (file)
@@ -55,6 +55,7 @@ do {                                                                  \
 #define BRCMF_HDRS_ON()                (brcmf_msg_level & BRCMF_HDRS_VAL)
 #define BRCMF_BYTES_ON()       (brcmf_msg_level & BRCMF_BYTES_VAL)
 #define BRCMF_GLOM_ON()                (brcmf_msg_level & BRCMF_GLOM_VAL)
+#define BRCMF_EVENT_ON()       (brcmf_msg_level & BRCMF_EVENT_VAL)
 
 #else  /* (defined DEBUG) || (defined DEBUG) */
 
@@ -65,6 +66,7 @@ do {                                                                  \
 #define BRCMF_HDRS_ON()                0
 #define BRCMF_BYTES_ON()       0
 #define BRCMF_GLOM_ON()                0
+#define BRCMF_EVENT_ON()       0
 
 #endif                         /* defined(DEBUG) */
 
index 9ab24528f9b9c0e54d40854ae6e43e8ffd9f654c..d7c76ce9d8cb3d74dccb79c863e5db1efd9a7446 100644 (file)
@@ -272,30 +272,6 @@ static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
        schedule_work(&drvr->multicast_work);
 }
 
-int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx, struct sk_buff *pktbuf)
-{
-       /* Reject if down */
-       if (!drvr->bus_if->drvr_up || (drvr->bus_if->state == BRCMF_BUS_DOWN))
-               return -ENODEV;
-
-       /* Update multicast statistic */
-       if (pktbuf->len >= ETH_ALEN) {
-               u8 *pktdata = (u8 *) (pktbuf->data);
-               struct ethhdr *eh = (struct ethhdr *)pktdata;
-
-               if (is_multicast_ether_addr(eh->h_dest))
-                       drvr->tx_multicast++;
-               if (ntohs(eh->h_proto) == ETH_P_PAE)
-                       atomic_inc(&drvr->pend_8021x_cnt);
-       }
-
-       /* If the protocol uses a data header, apply it */
-       brcmf_proto_hdrpush(drvr, ifidx, pktbuf);
-
-       /* Use bus module to send data frame */
-       return drvr->bus_if->brcmf_bus_txdata(drvr->dev, pktbuf);
-}
-
 static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 {
        int ret;
@@ -338,7 +314,22 @@ static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                }
        }
 
-       ret = brcmf_sendpkt(drvr, ifp->idx, skb);
+       /* Update multicast statistic */
+       if (skb->len >= ETH_ALEN) {
+               u8 *pktdata = (u8 *)(skb->data);
+               struct ethhdr *eh = (struct ethhdr *)pktdata;
+
+               if (is_multicast_ether_addr(eh->h_dest))
+                       drvr->tx_multicast++;
+               if (ntohs(eh->h_proto) == ETH_P_PAE)
+                       atomic_inc(&drvr->pend_8021x_cnt);
+       }
+
+       /* If the protocol uses a data header, apply it */
+       brcmf_proto_hdrpush(drvr, ifp->idx, skb);
+
+       /* Use bus module to send data frame */
+       ret =  drvr->bus_if->brcmf_bus_txdata(drvr->dev, skb);
 
 done:
        if (ret)
@@ -350,19 +341,23 @@ done:
        return 0;
 }
 
-void brcmf_txflowcontrol(struct device *dev, int ifidx, bool state)
+void brcmf_txflowblock(struct device *dev, bool state)
 {
        struct net_device *ndev;
        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
        struct brcmf_pub *drvr = bus_if->drvr;
+       int i;
 
        brcmf_dbg(TRACE, "Enter\n");
 
-       ndev = drvr->iflist[ifidx]->ndev;
-       if (state == ON)
-               netif_stop_queue(ndev);
-       else
-               netif_wake_queue(ndev);
+       for (i = 0; i < BRCMF_MAX_IFS; i++)
+               if (drvr->iflist[i]) {
+                       ndev = drvr->iflist[i]->ndev;
+                       if (state)
+                               netif_stop_queue(ndev);
+                       else
+                               netif_wake_queue(ndev);
+               }
 }
 
 static int brcmf_host_event(struct brcmf_pub *drvr, int *ifidx,
@@ -775,6 +770,14 @@ done:
        return err;
 }
 
+int brcmf_netlink_dcmd(struct net_device *ndev, struct brcmf_dcmd *dcmd)
+{
+       brcmf_dbg(TRACE, "enter: cmd %x buf %p len %d\n",
+                 dcmd->cmd, dcmd->buf, dcmd->len);
+
+       return brcmf_exec_dcmd(ndev, dcmd->cmd, dcmd->buf, dcmd->len);
+}
+
 static int brcmf_netdev_stop(struct net_device *ndev)
 {
        struct brcmf_if *ifp = netdev_priv(ndev);
index 472f2ef5c65237b9bb52f577723a3fed734a5041..3564686add9a1099aa048c5ba4526f5052288abd 100644 (file)
@@ -482,6 +482,15 @@ struct sdpcm_shared_le {
        __le32 brpt_addr;
 };
 
+/* SDIO read frame info */
+struct brcmf_sdio_read {
+       u8 seq_num;
+       u8 channel;
+       u16 len;
+       u16 len_left;
+       u16 len_nxtfrm;
+       u8 dat_offset;
+};
 
 /* misc chip info needed by some of the routines */
 /* Private data for SDIO bus interaction */
@@ -494,9 +503,8 @@ struct brcmf_sdio {
        u32 ramsize;            /* Size of RAM in SOCRAM (bytes) */
 
        u32 hostintmask;        /* Copy of Host Interrupt Mask */
-       u32 intstatus;  /* Intstatus bits (events) pending */
-       bool dpc_sched;         /* Indicates DPC schedule (intrpt rcvd) */
-       bool fcstate;           /* State of dongle flow-control */
+       atomic_t intstatus;     /* Intstatus bits (events) pending */
+       atomic_t fcstate;       /* State of dongle flow-control */
 
        uint blocksize;         /* Block size of SDIO transfers */
        uint roundup;           /* Max roundup limit */
@@ -508,9 +516,11 @@ struct brcmf_sdio {
 
        u8 hdrbuf[MAX_HDR_READ + BRCMF_SDALIGN];
        u8 *rxhdr;              /* Header of current rx frame (in hdrbuf) */
-       u16 nextlen;            /* Next Read Len from last header */
        u8 rx_seq;              /* Receive sequence number (expected) */
+       struct brcmf_sdio_read cur_read;
+                               /* info of current read frame */
        bool rxskip;            /* Skip receive (awaiting NAK ACK) */
+       bool rxpending;         /* Data frame pending in dongle */
 
        uint rxbound;           /* Rx frames to read before resched */
        uint txbound;           /* Tx frames to send before resched */
@@ -531,7 +541,7 @@ struct brcmf_sdio {
 
        bool intr;              /* Use interrupts */
        bool poll;              /* Use polling */
-       bool ipend;             /* Device interrupt is pending */
+       atomic_t ipend;         /* Device interrupt is pending */
        uint spurious;          /* Count of spurious interrupts */
        uint pollrate;          /* Ticks between device polls */
        uint polltick;          /* Tick counter */
@@ -549,12 +559,9 @@ struct brcmf_sdio {
        s32 idleclock;  /* How to set bus driver when idle */
        s32 sd_rxchain;
        bool use_rxchain;       /* If brcmf should use PKT chains */
-       bool sleeping;          /* Is SDIO bus sleeping? */
        bool rxflow_mode;       /* Rx flow control mode */
        bool rxflow;            /* Is rx flow control on */
        bool alp_only;          /* Don't use HT clock (ALP only) */
-/* Field to decide if rx of control frames happen in rxbuf or lb-pool */
-       bool usebufpool;
 
        u8 *ctrl_frame_buf;
        u32 ctrl_frame_len;
@@ -570,8 +577,8 @@ struct brcmf_sdio {
        bool wd_timer_valid;
        uint save_ms;
 
-       struct task_struct *dpc_tsk;
-       struct completion dpc_wait;
+       struct workqueue_struct *brcmf_wq;
+       struct work_struct datawork;
        struct list_head dpc_tsklst;
        spinlock_t dpc_tl_lock;
 
@@ -657,15 +664,6 @@ w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
 
 #define HOSTINTMASK            (I_HMB_SW_MASK | I_CHIPACTIVE)
 
-/* Packet free applicable unconditionally for sdio and sdspi.
- * Conditional if bufpool was present for gspi bus.
- */
-static void brcmf_sdbrcm_pktfree2(struct brcmf_sdio *bus, struct sk_buff *pkt)
-{
-       if (bus->usebufpool)
-               brcmu_pkt_buf_free_skb(pkt);
-}
-
 /* Turn backplane clock on or off */
 static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
 {
@@ -853,81 +851,6 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
        return 0;
 }
 
-static int brcmf_sdbrcm_bussleep(struct brcmf_sdio *bus, bool sleep)
-{
-       int ret;
-
-       brcmf_dbg(INFO, "request %s (currently %s)\n",
-                 sleep ? "SLEEP" : "WAKE",
-                 bus->sleeping ? "SLEEP" : "WAKE");
-
-       /* Done if we're already in the requested state */
-       if (sleep == bus->sleeping)
-               return 0;
-
-       /* Going to sleep: set the alarm and turn off the lights... */
-       if (sleep) {
-               /* Don't sleep if something is pending */
-               if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq))
-                       return -EBUSY;
-
-               /* Make sure the controller has the bus up */
-               brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
-
-               /* Tell device to start using OOB wakeup */
-               ret = w_sdreg32(bus, SMB_USE_OOB,
-                               offsetof(struct sdpcmd_regs, tosbmailbox));
-               if (ret != 0)
-                       brcmf_dbg(ERROR, "CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n");
-
-               /* Turn off our contribution to the HT clock request */
-               brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
-
-               brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-                                SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
-
-               /* Isolate the bus */
-               brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
-                                SBSDIO_DEVCTL_PADS_ISO, NULL);
-
-               /* Change state */
-               bus->sleeping = true;
-
-       } else {
-               /* Waking up: bus power up is ok, set local state */
-
-               brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-                                0, NULL);
-
-               /* Make sure the controller has the bus up */
-               brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
-
-               /* Send misc interrupt to indicate OOB not needed */
-               ret = w_sdreg32(bus, 0,
-                               offsetof(struct sdpcmd_regs, tosbmailboxdata));
-               if (ret == 0)
-                       ret = w_sdreg32(bus, SMB_DEV_INT,
-                               offsetof(struct sdpcmd_regs, tosbmailbox));
-
-               if (ret != 0)
-                       brcmf_dbg(ERROR, "CANNOT SIGNAL CHIP TO CLEAR OOB!!\n");
-
-               /* Make sure we have SD bus access */
-               brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
-
-               /* Change state */
-               bus->sleeping = false;
-       }
-
-       return 0;
-}
-
-static void bus_wake(struct brcmf_sdio *bus)
-{
-       if (bus->sleeping)
-               brcmf_sdbrcm_bussleep(bus, false);
-}
-
 static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
 {
        u32 intstatus = 0;
@@ -1056,7 +979,7 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
        }
 
        /* Clear partial in any case */
-       bus->nextlen = 0;
+       bus->cur_read.len = 0;
 
        /* If we can't reach the device, signal failure */
        if (err)
@@ -1108,6 +1031,96 @@ static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
        }
 }
 
+static bool brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
+                               struct brcmf_sdio_read *rd)
+{
+       u16 len, checksum;
+       u8 rx_seq, fc, tx_seq_max;
+
+       /*
+        * 4 bytes hardware header (frame tag)
+        * Byte 0~1: Frame length
+        * Byte 2~3: Checksum, bit-wise inverse of frame length
+        */
+       len = get_unaligned_le16(header);
+       checksum = get_unaligned_le16(header + sizeof(u16));
+       /* All zero means no more to read */
+       if (!(len | checksum)) {
+               bus->rxpending = false;
+               return false;
+       }
+       if ((u16)(~(len ^ checksum))) {
+               brcmf_dbg(ERROR, "HW header checksum error\n");
+               bus->sdcnt.rx_badhdr++;
+               brcmf_sdbrcm_rxfail(bus, false, false);
+               return false;
+       }
+       if (len < SDPCM_HDRLEN) {
+               brcmf_dbg(ERROR, "HW header length error\n");
+               return false;
+       }
+       rd->len = len;
+
+       /*
+        * 8 bytes hardware header
+        * Byte 0: Rx sequence number
+        * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag
+        * Byte 2: Length of next data frame
+        * Byte 3: Data offset
+        * Byte 4: Flow control bits
+        * Byte 5: Maximum Sequence number allow for Tx
+        * Byte 6~7: Reserved
+        */
+       rx_seq = SDPCM_PACKET_SEQUENCE(&header[SDPCM_FRAMETAG_LEN]);
+       rd->channel = SDPCM_PACKET_CHANNEL(&header[SDPCM_FRAMETAG_LEN]);
+       if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL) {
+               brcmf_dbg(ERROR, "HW header length too long\n");
+               bus->sdiodev->bus_if->dstats.rx_errors++;
+               bus->sdcnt.rx_toolong++;
+               brcmf_sdbrcm_rxfail(bus, false, false);
+               rd->len = 0;
+               return false;
+       }
+       rd->dat_offset = SDPCM_DOFFSET_VALUE(&header[SDPCM_FRAMETAG_LEN]);
+       if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) {
+               brcmf_dbg(ERROR, "seq %d: bad data offset\n", rx_seq);
+               bus->sdcnt.rx_badhdr++;
+               brcmf_sdbrcm_rxfail(bus, false, false);
+               rd->len = 0;
+               return false;
+       }
+       if (rd->seq_num != rx_seq) {
+               brcmf_dbg(ERROR, "seq %d: sequence number error, expect %d\n",
+                         rx_seq, rd->seq_num);
+               bus->sdcnt.rx_badseq++;
+               rd->seq_num = rx_seq;
+       }
+       rd->len_nxtfrm = header[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+       if (rd->len_nxtfrm << 4 > MAX_RX_DATASZ) {
+               /* only warm for NON glom packet */
+               if (rd->channel != SDPCM_GLOM_CHANNEL)
+                       brcmf_dbg(ERROR, "seq %d: next length error\n", rx_seq);
+               rd->len_nxtfrm = 0;
+       }
+       fc = SDPCM_FCMASK_VALUE(&header[SDPCM_FRAMETAG_LEN]);
+       if (bus->flowcontrol != fc) {
+               if (~bus->flowcontrol & fc)
+                       bus->sdcnt.fc_xoff++;
+               if (bus->flowcontrol & ~fc)
+                       bus->sdcnt.fc_xon++;
+               bus->sdcnt.fc_rcvd++;
+               bus->flowcontrol = fc;
+       }
+       tx_seq_max = SDPCM_WINDOW_VALUE(&header[SDPCM_FRAMETAG_LEN]);
+       if ((u8)(tx_seq_max - bus->tx_seq) > 0x40) {
+               brcmf_dbg(ERROR, "seq %d: max tx seq number error\n", rx_seq);
+               tx_seq_max = bus->tx_seq + 2;
+       }
+       bus->tx_max = tx_seq_max;
+
+       return true;
+}
+
 static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
 {
        u16 dlen, totlen;
@@ -1122,6 +1135,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
 
        int ifidx = 0;
        bool usechain = bus->use_rxchain;
+       u16 next_len;
 
        /* If packets, issue read(s) and send up packet chain */
        /* Return sequence numbers consumed? */
@@ -1185,10 +1199,10 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
                if (pnext) {
                        brcmf_dbg(GLOM, "allocated %d-byte packet chain for %d subframes\n",
                                  totlen, num);
-                       if (BRCMF_GLOM_ON() && bus->nextlen &&
-                           totlen != bus->nextlen) {
+                       if (BRCMF_GLOM_ON() && bus->cur_read.len &&
+                           totlen != bus->cur_read.len) {
                                brcmf_dbg(GLOM, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n",
-                                         bus->nextlen, totlen, rxseq);
+                                         bus->cur_read.len, totlen, rxseq);
                        }
                        pfirst = pnext = NULL;
                } else {
@@ -1199,7 +1213,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
                /* Done with descriptor packet */
                brcmu_pkt_buf_free_skb(bus->glomd);
                bus->glomd = NULL;
-               bus->nextlen = 0;
+               bus->cur_read.len = 0;
        }
 
        /* Ok -- either we just generated a packet chain,
@@ -1272,12 +1286,13 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
 
                chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
                seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
-               bus->nextlen = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
-               if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+               next_len = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+               if ((next_len << 4) > MAX_RX_DATASZ) {
                        brcmf_dbg(INFO, "nextlen too large (%d) seq %d\n",
-                                 bus->nextlen, seq);
-                       bus->nextlen = 0;
+                                 next_len, seq);
+                       next_len = 0;
                }
+               bus->cur_read.len = next_len << 4;
                doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
                txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
 
@@ -1378,7 +1393,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
                                bus->sdcnt.rxglomfail++;
                                brcmf_sdbrcm_free_glom(bus);
                        }
-                       bus->nextlen = 0;
+                       bus->cur_read.len = 0;
                        return 0;
                }
 
@@ -1573,422 +1588,166 @@ static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
        }
 }
 
-static void
-brcmf_alloc_pkt_and_read(struct brcmf_sdio *bus, u16 rdlen,
-                        struct sk_buff **pkt, u8 **rxbuf)
+static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
 {
-       int sdret;              /* Return code from calls */
-
-       *pkt = brcmu_pkt_buf_get_skb(rdlen + BRCMF_SDALIGN);
-       if (*pkt == NULL)
-               return;
-
-       pkt_align(*pkt, rdlen, BRCMF_SDALIGN);
-       *rxbuf = (u8 *) ((*pkt)->data);
-       /* Read the entire frame */
-       sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
-                                     SDIO_FUNC_2, F2SYNC, *pkt);
-       bus->sdcnt.f2rxdata++;
-
-       if (sdret < 0) {
-               brcmf_dbg(ERROR, "(nextlen): read %d bytes failed: %d\n",
-                         rdlen, sdret);
-               brcmu_pkt_buf_free_skb(*pkt);
-               bus->sdiodev->bus_if->dstats.rx_errors++;
-               /* Force retry w/normal header read.
-                * Don't attempt NAK for
-                * gSPI
-                */
-               brcmf_sdbrcm_rxfail(bus, true, true);
-               *pkt = NULL;
-       }
-}
-
-/* Checks the header */
-static int
-brcmf_check_rxbuf(struct brcmf_sdio *bus, struct sk_buff *pkt, u8 *rxbuf,
-                 u8 rxseq, u16 nextlen, u16 *len)
-{
-       u16 check;
-       bool len_consistent;    /* Result of comparing readahead len and
-                                  len from hw-hdr */
-
-       memcpy(bus->rxhdr, rxbuf, SDPCM_HDRLEN);
-
-       /* Extract hardware header fields */
-       *len = get_unaligned_le16(bus->rxhdr);
-       check = get_unaligned_le16(bus->rxhdr + sizeof(u16));
-
-       /* All zeros means readahead info was bad */
-       if (!(*len | check)) {
-               brcmf_dbg(INFO, "(nextlen): read zeros in HW header???\n");
-               goto fail;
-       }
-
-       /* Validate check bytes */
-       if ((u16)~(*len ^ check)) {
-               brcmf_dbg(ERROR, "(nextlen): HW hdr error: nextlen/len/check 0x%04x/0x%04x/0x%04x\n",
-                         nextlen, *len, check);
-               bus->sdcnt.rx_badhdr++;
-               brcmf_sdbrcm_rxfail(bus, false, false);
-               goto fail;
-       }
-
-       /* Validate frame length */
-       if (*len < SDPCM_HDRLEN) {
-               brcmf_dbg(ERROR, "(nextlen): HW hdr length invalid: %d\n",
-                         *len);
-               goto fail;
-       }
-
-       /* Check for consistency with readahead info */
-       len_consistent = (nextlen != (roundup(*len, 16) >> 4));
-       if (len_consistent) {
-               /* Mismatch, force retry w/normal
-                       header (may be >4K) */
-               brcmf_dbg(ERROR, "(nextlen): mismatch, nextlen %d len %d rnd %d; expected rxseq %d\n",
-                         nextlen, *len, roundup(*len, 16),
-                         rxseq);
-               brcmf_sdbrcm_rxfail(bus, true, true);
-               goto fail;
-       }
-
-       return 0;
-
-fail:
-       brcmf_sdbrcm_pktfree2(bus, pkt);
-       return -EINVAL;
-}
-
-/* Return true if there may be more frames to read */
-static uint
-brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
-{
-       u16 len, check; /* Extracted hardware header fields */
-       u8 chan, seq, doff;     /* Extracted software header fields */
-       u8 fcbits;              /* Extracted fcbits from software header */
-
        struct sk_buff *pkt;            /* Packet for event or data frames */
        u16 pad;                /* Number of pad bytes to read */
-       u16 rdlen;              /* Total number of bytes to read */
-       u8 rxseq;               /* Next sequence number to expect */
        uint rxleft = 0;        /* Remaining number of frames allowed */
        int sdret;              /* Return code from calls */
-       u8 txmax;               /* Maximum tx sequence offered */
-       u8 *rxbuf;
        int ifidx = 0;
        uint rxcount = 0;       /* Total frames read */
+       struct brcmf_sdio_read *rd = &bus->cur_read, rd_new;
+       u8 head_read = 0;
 
        brcmf_dbg(TRACE, "Enter\n");
 
        /* Not finished unless we encounter no more frames indication */
-       *finished = false;
+       bus->rxpending = true;
 
-       for (rxseq = bus->rx_seq, rxleft = maxframes;
+       for (rd->seq_num = bus->rx_seq, rxleft = maxframes;
             !bus->rxskip && rxleft &&
             bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN;
-            rxseq++, rxleft--) {
+            rd->seq_num++, rxleft--) {
 
                /* Handle glomming separately */
                if (bus->glomd || !skb_queue_empty(&bus->glom)) {
                        u8 cnt;
                        brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n",
                                  bus->glomd, skb_peek(&bus->glom));
-                       cnt = brcmf_sdbrcm_rxglom(bus, rxseq);
+                       cnt = brcmf_sdbrcm_rxglom(bus, rd->seq_num);
                        brcmf_dbg(GLOM, "rxglom returned %d\n", cnt);
-                       rxseq += cnt - 1;
+                       rd->seq_num += cnt - 1;
                        rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
                        continue;
                }
 
-               /* Try doing single read if we can */
-               if (bus->nextlen) {
-                       u16 nextlen = bus->nextlen;
-                       bus->nextlen = 0;
-
-                       rdlen = len = nextlen << 4;
-                       brcmf_pad(bus, &pad, &rdlen);
-
-                       /*
-                        * After the frame is received we have to
-                        * distinguish whether it is data
-                        * or non-data frame.
-                        */
-                       brcmf_alloc_pkt_and_read(bus, rdlen, &pkt, &rxbuf);
-                       if (pkt == NULL) {
-                               /* Give up on data, request rtx of events */
-                               brcmf_dbg(ERROR, "(nextlen): brcmf_alloc_pkt_and_read failed: len %d rdlen %d expected rxseq %d\n",
-                                         len, rdlen, rxseq);
-                               continue;
-                       }
-
-                       if (brcmf_check_rxbuf(bus, pkt, rxbuf, rxseq, nextlen,
-                                             &len) < 0)
+               rd->len_left = rd->len;
+               /* read header first for unknow frame length */
+               if (!rd->len) {
+                       sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
+                                                     bus->sdiodev->sbwad,
+                                                     SDIO_FUNC_2, F2SYNC,
+                                                     bus->rxhdr,
+                                                     BRCMF_FIRSTREAD);
+                       bus->sdcnt.f2rxhdrs++;
+                       if (sdret < 0) {
+                               brcmf_dbg(ERROR, "RXHEADER FAILED: %d\n",
+                                         sdret);
+                               bus->sdcnt.rx_hdrfail++;
+                               brcmf_sdbrcm_rxfail(bus, true, true);
                                continue;
-
-                       /* Extract software header fields */
-                       chan = SDPCM_PACKET_CHANNEL(
-                                       &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
-                       seq = SDPCM_PACKET_SEQUENCE(
-                                       &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
-                       doff = SDPCM_DOFFSET_VALUE(
-                                       &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
-                       txmax = SDPCM_WINDOW_VALUE(
-                                       &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
-
-                       bus->nextlen =
-                           bus->rxhdr[SDPCM_FRAMETAG_LEN +
-                                      SDPCM_NEXTLEN_OFFSET];
-                       if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
-                               brcmf_dbg(INFO, "(nextlen): got frame w/nextlen too large (%d), seq %d\n",
-                                         bus->nextlen, seq);
-                               bus->nextlen = 0;
                        }
 
-                       bus->sdcnt.rx_readahead_cnt++;
-
-                       /* Handle Flow Control */
-                       fcbits = SDPCM_FCMASK_VALUE(
-                                       &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
-
-                       if (bus->flowcontrol != fcbits) {
-                               if (~bus->flowcontrol & fcbits)
-                                       bus->sdcnt.fc_xoff++;
-
-                               if (bus->flowcontrol & ~fcbits)
-                                       bus->sdcnt.fc_xon++;
-
-                               bus->sdcnt.fc_rcvd++;
-                               bus->flowcontrol = fcbits;
-                       }
-
-                       /* Check and update sequence number */
-                       if (rxseq != seq) {
-                               brcmf_dbg(INFO, "(nextlen): rx_seq %d, expected %d\n",
-                                         seq, rxseq);
-                               bus->sdcnt.rx_badseq++;
-                               rxseq = seq;
-                       }
-
-                       /* Check window for sanity */
-                       if ((u8) (txmax - bus->tx_seq) > 0x40) {
-                               brcmf_dbg(ERROR, "got unlikely tx max %d with tx_seq %d\n",
-                                         txmax, bus->tx_seq);
-                               txmax = bus->tx_seq + 2;
-                       }
-                       bus->tx_max = txmax;
-
-                       brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
-                                          rxbuf, len, "Rx Data:\n");
-                       brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
-                                            BRCMF_DATA_ON()) &&
-                                          BRCMF_HDRS_ON(),
+                       brcmf_dbg_hex_dump(BRCMF_BYTES_ON() || BRCMF_HDRS_ON(),
                                           bus->rxhdr, SDPCM_HDRLEN,
                                           "RxHdr:\n");
 
-                       if (chan == SDPCM_CONTROL_CHANNEL) {
-                               brcmf_dbg(ERROR, "(nextlen): readahead on control packet %d?\n",
-                                         seq);
-                               /* Force retry w/normal header read */
-                               bus->nextlen = 0;
-                               brcmf_sdbrcm_rxfail(bus, false, true);
-                               brcmf_sdbrcm_pktfree2(bus, pkt);
-                               continue;
+                       if (!brcmf_sdio_hdparser(bus, bus->rxhdr, rd)) {
+                               if (!bus->rxpending)
+                                       break;
+                               else
+                                       continue;
                        }
 
-                       /* Validate data offset */
-                       if ((doff < SDPCM_HDRLEN) || (doff > len)) {
-                               brcmf_dbg(ERROR, "(nextlen): bad data offset %d: HW len %d min %d\n",
-                                         doff, len, SDPCM_HDRLEN);
-                               brcmf_sdbrcm_rxfail(bus, false, false);
-                               brcmf_sdbrcm_pktfree2(bus, pkt);
+                       if (rd->channel == SDPCM_CONTROL_CHANNEL) {
+                               brcmf_sdbrcm_read_control(bus, bus->rxhdr,
+                                                         rd->len,
+                                                         rd->dat_offset);
+                               /* prepare the descriptor for the next read */
+                               rd->len = rd->len_nxtfrm << 4;
+                               rd->len_nxtfrm = 0;
+                               /* treat all packet as event if we don't know */
+                               rd->channel = SDPCM_EVENT_CHANNEL;
                                continue;
                        }
-
-                       /* All done with this one -- now deliver the packet */
-                       goto deliver;
-               }
-
-               /* Read frame header (hardware and software) */
-               sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
-                                             SDIO_FUNC_2, F2SYNC, bus->rxhdr,
-                                             BRCMF_FIRSTREAD);
-               bus->sdcnt.f2rxhdrs++;
-
-               if (sdret < 0) {
-                       brcmf_dbg(ERROR, "RXHEADER FAILED: %d\n", sdret);
-                       bus->sdcnt.rx_hdrfail++;
-                       brcmf_sdbrcm_rxfail(bus, true, true);
-                       continue;
-               }
-               brcmf_dbg_hex_dump(BRCMF_BYTES_ON() || BRCMF_HDRS_ON(),
-                                  bus->rxhdr, SDPCM_HDRLEN, "RxHdr:\n");
-
-
-               /* Extract hardware header fields */
-               len = get_unaligned_le16(bus->rxhdr);
-               check = get_unaligned_le16(bus->rxhdr + sizeof(u16));
-
-               /* All zeros means no more frames */
-               if (!(len | check)) {
-                       *finished = true;
-                       break;
-               }
-
-               /* Validate check bytes */
-               if ((u16) ~(len ^ check)) {
-                       brcmf_dbg(ERROR, "HW hdr err: len/check 0x%04x/0x%04x\n",
-                                 len, check);
-                       bus->sdcnt.rx_badhdr++;
-                       brcmf_sdbrcm_rxfail(bus, false, false);
-                       continue;
-               }
-
-               /* Validate frame length */
-               if (len < SDPCM_HDRLEN) {
-                       brcmf_dbg(ERROR, "HW hdr length invalid: %d\n", len);
-                       continue;
-               }
-
-               /* Extract software header fields */
-               chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
-               seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
-               doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
-               txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
-
-               /* Validate data offset */
-               if ((doff < SDPCM_HDRLEN) || (doff > len)) {
-                       brcmf_dbg(ERROR, "Bad data offset %d: HW len %d, min %d seq %d\n",
-                                 doff, len, SDPCM_HDRLEN, seq);
-                       bus->sdcnt.rx_badhdr++;
-                       brcmf_sdbrcm_rxfail(bus, false, false);
-                       continue;
-               }
-
-               /* Save the readahead length if there is one */
-               bus->nextlen =
-                   bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
-               if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
-                       brcmf_dbg(INFO, "(nextlen): got frame w/nextlen too large (%d), seq %d\n",
-                                 bus->nextlen, seq);
-                       bus->nextlen = 0;
-               }
-
-               /* Handle Flow Control */
-               fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
-
-               if (bus->flowcontrol != fcbits) {
-                       if (~bus->flowcontrol & fcbits)
-                               bus->sdcnt.fc_xoff++;
-
-                       if (bus->flowcontrol & ~fcbits)
-                               bus->sdcnt.fc_xon++;
-
-                       bus->sdcnt.fc_rcvd++;
-                       bus->flowcontrol = fcbits;
-               }
-
-               /* Check and update sequence number */
-               if (rxseq != seq) {
-                       brcmf_dbg(INFO, "rx_seq %d, expected %d\n", seq, rxseq);
-                       bus->sdcnt.rx_badseq++;
-                       rxseq = seq;
-               }
-
-               /* Check window for sanity */
-               if ((u8) (txmax - bus->tx_seq) > 0x40) {
-                       brcmf_dbg(ERROR, "unlikely tx max %d with tx_seq %d\n",
-                                 txmax, bus->tx_seq);
-                       txmax = bus->tx_seq + 2;
-               }
-               bus->tx_max = txmax;
-
-               /* Call a separate function for control frames */
-               if (chan == SDPCM_CONTROL_CHANNEL) {
-                       brcmf_sdbrcm_read_control(bus, bus->rxhdr, len, doff);
-                       continue;
-               }
-
-               /* precondition: chan is either SDPCM_DATA_CHANNEL,
-                  SDPCM_EVENT_CHANNEL, SDPCM_TEST_CHANNEL or
-                  SDPCM_GLOM_CHANNEL */
-
-               /* Length to read */
-               rdlen = (len > BRCMF_FIRSTREAD) ? (len - BRCMF_FIRSTREAD) : 0;
-
-               /* May pad read to blocksize for efficiency */
-               if (bus->roundup && bus->blocksize &&
-                       (rdlen > bus->blocksize)) {
-                       pad = bus->blocksize - (rdlen % bus->blocksize);
-                       if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
-                           ((rdlen + pad + BRCMF_FIRSTREAD) < MAX_RX_DATASZ))
-                               rdlen += pad;
-               } else if (rdlen % BRCMF_SDALIGN) {
-                       rdlen += BRCMF_SDALIGN - (rdlen % BRCMF_SDALIGN);
+                       rd->len_left = rd->len > BRCMF_FIRSTREAD ?
+                                      rd->len - BRCMF_FIRSTREAD : 0;
+                       head_read = BRCMF_FIRSTREAD;
                }
 
-               /* Satisfy length-alignment requirements */
-               if (rdlen & (ALIGNMENT - 1))
-                       rdlen = roundup(rdlen, ALIGNMENT);
-
-               if ((rdlen + BRCMF_FIRSTREAD) > MAX_RX_DATASZ) {
-                       /* Too long -- skip this frame */
-                       brcmf_dbg(ERROR, "too long: len %d rdlen %d\n",
-                                 len, rdlen);
-                       bus->sdiodev->bus_if->dstats.rx_errors++;
-                       bus->sdcnt.rx_toolong++;
-                       brcmf_sdbrcm_rxfail(bus, false, false);
-                       continue;
-               }
+               brcmf_pad(bus, &pad, &rd->len_left);
 
-               pkt = brcmu_pkt_buf_get_skb(rdlen +
-                                           BRCMF_FIRSTREAD + BRCMF_SDALIGN);
+               pkt = brcmu_pkt_buf_get_skb(rd->len_left + head_read +
+                                           BRCMF_SDALIGN);
                if (!pkt) {
                        /* Give up on data, request rtx of events */
-                       brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: rdlen %d chan %d\n",
-                                 rdlen, chan);
+                       brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed\n");
                        bus->sdiodev->bus_if->dstats.rx_dropped++;
-                       brcmf_sdbrcm_rxfail(bus, false, RETRYCHAN(chan));
+                       brcmf_sdbrcm_rxfail(bus, false,
+                                           RETRYCHAN(rd->channel));
                        continue;
                }
+               skb_pull(pkt, head_read);
+               pkt_align(pkt, rd->len_left, BRCMF_SDALIGN);
 
-               /* Leave room for what we already read, and align remainder */
-               skb_pull(pkt, BRCMF_FIRSTREAD);
-               pkt_align(pkt, rdlen, BRCMF_SDALIGN);
-
-               /* Read the remaining frame data */
                sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
                                              SDIO_FUNC_2, F2SYNC, pkt);
                bus->sdcnt.f2rxdata++;
 
                if (sdret < 0) {
-                       brcmf_dbg(ERROR, "read %d %s bytes failed: %d\n", rdlen,
-                                 ((chan == SDPCM_EVENT_CHANNEL) ? "event"
-                                  : ((chan == SDPCM_DATA_CHANNEL) ? "data"
-                                     : "test")), sdret);
+                       brcmf_dbg(ERROR, "read %d bytes from channel %d failed: %d\n",
+                                 rd->len, rd->channel, sdret);
                        brcmu_pkt_buf_free_skb(pkt);
                        bus->sdiodev->bus_if->dstats.rx_errors++;
-                       brcmf_sdbrcm_rxfail(bus, true, RETRYCHAN(chan));
+                       brcmf_sdbrcm_rxfail(bus, true,
+                                           RETRYCHAN(rd->channel));
                        continue;
                }
 
-               /* Copy the already-read portion */
-               skb_push(pkt, BRCMF_FIRSTREAD);
-               memcpy(pkt->data, bus->rxhdr, BRCMF_FIRSTREAD);
+               if (head_read) {
+                       skb_push(pkt, head_read);
+                       memcpy(pkt->data, bus->rxhdr, head_read);
+                       head_read = 0;
+               } else {
+                       memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN);
+                       rd_new.seq_num = rd->seq_num;
+                       if (!brcmf_sdio_hdparser(bus, bus->rxhdr, &rd_new)) {
+                               rd->len = 0;
+                               brcmu_pkt_buf_free_skb(pkt);
+                       }
+                       bus->sdcnt.rx_readahead_cnt++;
+                       if (rd->len != roundup(rd_new.len, 16)) {
+                               brcmf_dbg(ERROR, "frame length mismatch:read %d, should be %d\n",
+                                         rd->len,
+                                         roundup(rd_new.len, 16) >> 4);
+                               rd->len = 0;
+                               brcmf_sdbrcm_rxfail(bus, true, true);
+                               brcmu_pkt_buf_free_skb(pkt);
+                               continue;
+                       }
+                       rd->len_nxtfrm = rd_new.len_nxtfrm;
+                       rd->channel = rd_new.channel;
+                       rd->dat_offset = rd_new.dat_offset;
+
+                       brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
+                                            BRCMF_DATA_ON()) &&
+                                          BRCMF_HDRS_ON(),
+                                          bus->rxhdr, SDPCM_HDRLEN,
+                                          "RxHdr:\n");
+
+                       if (rd_new.channel == SDPCM_CONTROL_CHANNEL) {
+                               brcmf_dbg(ERROR, "readahead on control packet %d?\n",
+                                         rd_new.seq_num);
+                               /* Force retry w/normal header read */
+                               rd->len = 0;
+                               brcmf_sdbrcm_rxfail(bus, false, true);
+                               brcmu_pkt_buf_free_skb(pkt);
+                               continue;
+                       }
+               }
 
                brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
-                                  pkt->data, len, "Rx Data:\n");
+                                  pkt->data, rd->len, "Rx Data:\n");
 
-deliver:
                /* Save superframe descriptor and allocate packet frame */
-               if (chan == SDPCM_GLOM_CHANNEL) {
+               if (rd->channel == SDPCM_GLOM_CHANNEL) {
                        if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) {
                                brcmf_dbg(GLOM, "glom descriptor, %d bytes:\n",
-                                         len);
+                                         rd->len);
                                brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
-                                                  pkt->data, len,
+                                                  pkt->data, rd->len,
                                                   "Glom Data:\n");
-                               __skb_trim(pkt, len);
+                               __skb_trim(pkt, rd->len);
                                skb_pull(pkt, SDPCM_HDRLEN);
                                bus->glomd = pkt;
                        } else {
@@ -1996,12 +1755,23 @@ deliver:
                                          "descriptor!\n", __func__);
                                brcmf_sdbrcm_rxfail(bus, false, false);
                        }
+                       /* prepare the descriptor for the next read */
+                       rd->len = rd->len_nxtfrm << 4;
+                       rd->len_nxtfrm = 0;
+                       /* treat all packet as event if we don't know */
+                       rd->channel = SDPCM_EVENT_CHANNEL;
                        continue;
                }
 
                /* Fill in packet len and prio, deliver upward */
-               __skb_trim(pkt, len);
-               skb_pull(pkt, doff);
+               __skb_trim(pkt, rd->len);
+               skb_pull(pkt, rd->dat_offset);
+
+               /* prepare the descriptor for the next read */
+               rd->len = rd->len_nxtfrm << 4;
+               rd->len_nxtfrm = 0;
+               /* treat all packet as event if we don't know */
+               rd->channel = SDPCM_EVENT_CHANNEL;
 
                if (pkt->len == 0) {
                        brcmu_pkt_buf_free_skb(pkt);
@@ -2019,17 +1789,17 @@ deliver:
                brcmf_rx_packet(bus->sdiodev->dev, ifidx, pkt);
                down(&bus->sdsem);
        }
+
        rxcount = maxframes - rxleft;
        /* Message if we hit the limit */
        if (!rxleft)
-               brcmf_dbg(DATA, "hit rx limit of %d frames\n",
-                         maxframes);
+               brcmf_dbg(DATA, "hit rx limit of %d frames\n", maxframes);
        else
                brcmf_dbg(DATA, "processed %d frames\n", rxcount);
        /* Back off rxseq if awaiting rtx, update rx_seq */
        if (bus->rxskip)
-               rxseq--;
-       bus->rx_seq = rxseq;
+               rd->seq_num--;
+       bus->rx_seq = rd->seq_num;
 
        return rxcount;
 }
@@ -2227,7 +1997,7 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
                        if (ret != 0)
                                break;
                        if (intstatus & bus->hostintmask)
-                               bus->ipend = true;
+                               atomic_set(&bus->ipend, 1);
                }
        }
 
@@ -2235,8 +2005,8 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
        if (bus->sdiodev->bus_if->drvr_up &&
            (bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) &&
            bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
-               bus->txoff = OFF;
-               brcmf_txflowcontrol(bus->sdiodev->dev, 0, OFF);
+               bus->txoff = false;
+               brcmf_txflowblock(bus->sdiodev->dev, false);
        }
 
        return cnt;
@@ -2259,16 +2029,8 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev)
                bus->watchdog_tsk = NULL;
        }
 
-       if (bus->dpc_tsk && bus->dpc_tsk != current) {
-               send_sig(SIGTERM, bus->dpc_tsk, 1);
-               kthread_stop(bus->dpc_tsk);
-               bus->dpc_tsk = NULL;
-       }
-
        down(&bus->sdsem);
 
-       bus_wake(bus);
-
        /* Enable clock for device interrupts */
        brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
 
@@ -2327,7 +2089,7 @@ static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
        unsigned long flags;
 
        spin_lock_irqsave(&bus->sdiodev->irq_en_lock, flags);
-       if (!bus->sdiodev->irq_en && !bus->ipend) {
+       if (!bus->sdiodev->irq_en && !atomic_read(&bus->ipend)) {
                enable_irq(bus->sdiodev->irq);
                bus->sdiodev->irq_en = true;
        }
@@ -2339,21 +2101,69 @@ static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
 }
 #endif         /* CONFIG_BRCMFMAC_SDIO_OOB */
 
-static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
+static inline void brcmf_sdbrcm_adddpctsk(struct brcmf_sdio *bus)
 {
-       u32 intstatus, newstatus = 0;
+       struct list_head *new_hd;
+       unsigned long flags;
+
+       if (in_interrupt())
+               new_hd = kzalloc(sizeof(struct list_head), GFP_ATOMIC);
+       else
+               new_hd = kzalloc(sizeof(struct list_head), GFP_KERNEL);
+       if (new_hd == NULL)
+               return;
+
+       spin_lock_irqsave(&bus->dpc_tl_lock, flags);
+       list_add_tail(new_hd, &bus->dpc_tsklst);
+       spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
+}
+
+static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
+{
+       u8 idx;
+       u32 addr;
+       unsigned long val;
+       int n, ret;
+
+       idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
+       addr = bus->ci->c_inf[idx].base +
+              offsetof(struct sdpcmd_regs, intstatus);
+
+       ret = brcmf_sdio_regrw_helper(bus->sdiodev, addr, &val, false);
+       bus->sdcnt.f1regdata++;
+       if (ret != 0)
+               val = 0;
+
+       val &= bus->hostintmask;
+       atomic_set(&bus->fcstate, !!(val & I_HMB_FC_STATE));
+
+       /* Clear interrupts */
+       if (val) {
+               ret = brcmf_sdio_regrw_helper(bus->sdiodev, addr, &val, true);
+               bus->sdcnt.f1regdata++;
+       }
+
+       if (ret) {
+               atomic_set(&bus->intstatus, 0);
+       } else if (val) {
+               for_each_set_bit(n, &val, 32)
+                       set_bit(n, (unsigned long *)&bus->intstatus.counter);
+       }
+
+       return ret;
+}
+
+static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
+{
+       u32 newstatus = 0;
+       unsigned long intstatus;
        uint rxlimit = bus->rxbound;    /* Rx frames to read before resched */
        uint txlimit = bus->txbound;    /* Tx frames to send before resched */
        uint framecnt = 0;      /* Temporary counter of tx/rx frames */
-       bool rxdone = true;     /* Flag for no more read data */
-       bool resched = false;   /* Flag indicating resched wanted */
-       int err;
+       int err = 0, n;
 
        brcmf_dbg(TRACE, "Enter\n");
 
-       /* Start with leftover status bits */
-       intstatus = bus->intstatus;
-
        down(&bus->sdsem);
 
        /* If waiting for HTAVAIL, check status */
@@ -2399,39 +2209,22 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
                                bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
                        }
                        bus->clkstate = CLK_AVAIL;
-               } else {
-                       goto clkwait;
                }
        }
 
-       bus_wake(bus);
-
        /* Make sure backplane clock is on */
        brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, true);
-       if (bus->clkstate == CLK_PENDING)
-               goto clkwait;
 
        /* Pending interrupt indicates new device status */
-       if (bus->ipend) {
-               bus->ipend = false;
-               err = r_sdreg32(bus, &newstatus,
-                               offsetof(struct sdpcmd_regs, intstatus));
-               bus->sdcnt.f1regdata++;
-               if (err != 0)
-                       newstatus = 0;
-               newstatus &= bus->hostintmask;
-               bus->fcstate = !!(newstatus & I_HMB_FC_STATE);
-               if (newstatus) {
-                       err = w_sdreg32(bus, newstatus,
-                                       offsetof(struct sdpcmd_regs,
-                                                intstatus));
-                       bus->sdcnt.f1regdata++;
-               }
+       if (atomic_read(&bus->ipend) > 0) {
+               atomic_set(&bus->ipend, 0);
+               sdio_claim_host(bus->sdiodev->func[1]);
+               err = brcmf_sdio_intr_rstatus(bus);
+               sdio_release_host(bus->sdiodev->func[1]);
        }
 
-       /* Merge new bits with previous */
-       intstatus |= newstatus;
-       bus->intstatus = 0;
+       /* Start with leftover status bits */
+       intstatus = atomic_xchg(&bus->intstatus, 0);
 
        /* Handle flow-control change: read new state in case our ack
         * crossed another change interrupt.  If change still set, assume
@@ -2445,8 +2238,8 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
                err = r_sdreg32(bus, &newstatus,
                                offsetof(struct sdpcmd_regs, intstatus));
                bus->sdcnt.f1regdata += 2;
-               bus->fcstate =
-                   !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE));
+               atomic_set(&bus->fcstate,
+                          !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)));
                intstatus |= (newstatus & bus->hostintmask);
        }
 
@@ -2483,32 +2276,34 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
                intstatus &= ~I_HMB_FRAME_IND;
 
        /* On frame indication, read available frames */
-       if (PKT_AVAILABLE()) {
-               framecnt = brcmf_sdbrcm_readframes(bus, rxlimit, &rxdone);
-               if (rxdone || bus->rxskip)
+       if (PKT_AVAILABLE() && bus->clkstate == CLK_AVAIL) {
+               framecnt = brcmf_sdio_readframes(bus, rxlimit);
+               if (!bus->rxpending)
                        intstatus &= ~I_HMB_FRAME_IND;
                rxlimit -= min(framecnt, rxlimit);
        }
 
        /* Keep still-pending events for next scheduling */
-       bus->intstatus = intstatus;
+       if (intstatus) {
+               for_each_set_bit(n, &intstatus, 32)
+                       set_bit(n, (unsigned long *)&bus->intstatus.counter);
+       }
 
-clkwait:
        brcmf_sdbrcm_clrintr(bus);
 
        if (data_ok(bus) && bus->ctrl_frame_stat &&
                (bus->clkstate == CLK_AVAIL)) {
-               int ret, i;
+               int i;
 
-               ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
+               err = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
                        SDIO_FUNC_2, F2SYNC, bus->ctrl_frame_buf,
                        (u32) bus->ctrl_frame_len);
 
-               if (ret < 0) {
+               if (err < 0) {
                        /* On failure, abort the command and
                                terminate the frame */
                        brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
-                                 ret);
+                                 err);
                        bus->sdcnt.tx_sderrs++;
 
                        brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
@@ -2530,42 +2325,34 @@ clkwait:
                                        break;
                        }
 
-               }
-               if (ret == 0)
+               } else {
                        bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
-
-               brcmf_dbg(INFO, "Return_dpc value is : %d\n", ret);
+               }
                bus->ctrl_frame_stat = false;
                brcmf_sdbrcm_wait_event_wakeup(bus);
        }
        /* Send queued frames (limit 1 if rx may still be pending) */
-       else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate &&
+       else if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
                 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit
                 && data_ok(bus)) {
-               framecnt = rxdone ? txlimit : min(txlimit, bus->txminmax);
+               framecnt = bus->rxpending ? min(txlimit, bus->txminmax) :
+                                           txlimit;
                framecnt = brcmf_sdbrcm_sendfromq(bus, framecnt);
                txlimit -= framecnt;
        }
 
-       /* Resched if events or tx frames are pending,
-                else await next interrupt */
-       /* On failed register access, all bets are off:
-                no resched or interrupts */
        if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) || (err != 0)) {
                brcmf_dbg(ERROR, "failed backplane access over SDIO, halting operation\n");
                bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
-               bus->intstatus = 0;
-       } else if (bus->clkstate == CLK_PENDING) {
-               brcmf_dbg(INFO, "rescheduled due to CLK_PENDING awaiting I_CHIPACTIVE interrupt\n");
-               resched = true;
-       } else if (bus->intstatus || bus->ipend ||
-               (!bus->fcstate && brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol)
-                && data_ok(bus)) || PKT_AVAILABLE()) {
-               resched = true;
+               atomic_set(&bus->intstatus, 0);
+       } else if (atomic_read(&bus->intstatus) ||
+                  atomic_read(&bus->ipend) > 0 ||
+                  (!atomic_read(&bus->fcstate) &&
+                   brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
+                   data_ok(bus)) || PKT_AVAILABLE()) {
+               brcmf_sdbrcm_adddpctsk(bus);
        }
 
-       bus->dpc_sched = resched;
-
        /* If we're done for now, turn off clock request. */
        if ((bus->clkstate != CLK_PENDING)
            && bus->idletime == BRCMF_IDLE_IMMEDIATE) {
@@ -2574,65 +2361,6 @@ clkwait:
        }
 
        up(&bus->sdsem);
-
-       return resched;
-}
-
-static inline void brcmf_sdbrcm_adddpctsk(struct brcmf_sdio *bus)
-{
-       struct list_head *new_hd;
-       unsigned long flags;
-
-       if (in_interrupt())
-               new_hd = kzalloc(sizeof(struct list_head), GFP_ATOMIC);
-       else
-               new_hd = kzalloc(sizeof(struct list_head), GFP_KERNEL);
-       if (new_hd == NULL)
-               return;
-
-       spin_lock_irqsave(&bus->dpc_tl_lock, flags);
-       list_add_tail(new_hd, &bus->dpc_tsklst);
-       spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
-}
-
-static int brcmf_sdbrcm_dpc_thread(void *data)
-{
-       struct brcmf_sdio *bus = (struct brcmf_sdio *) data;
-       struct list_head *cur_hd, *tmp_hd;
-       unsigned long flags;
-
-       allow_signal(SIGTERM);
-       /* Run until signal received */
-       while (1) {
-               if (kthread_should_stop())
-                       break;
-
-               if (list_empty(&bus->dpc_tsklst))
-                       if (wait_for_completion_interruptible(&bus->dpc_wait))
-                               break;
-
-               spin_lock_irqsave(&bus->dpc_tl_lock, flags);
-               list_for_each_safe(cur_hd, tmp_hd, &bus->dpc_tsklst) {
-                       spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
-
-                       if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
-                               /* after stopping the bus, exit thread */
-                               brcmf_sdbrcm_bus_stop(bus->sdiodev->dev);
-                               bus->dpc_tsk = NULL;
-                               spin_lock_irqsave(&bus->dpc_tl_lock, flags);
-                               break;
-                       }
-
-                       if (brcmf_sdbrcm_dpc(bus))
-                               brcmf_sdbrcm_adddpctsk(bus);
-
-                       spin_lock_irqsave(&bus->dpc_tl_lock, flags);
-                       list_del(cur_hd);
-                       kfree(cur_hd);
-               }
-               spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
-       }
-       return 0;
 }
 
 static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
@@ -2642,6 +2370,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
        struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
        struct brcmf_sdio *bus = sdiodev->bus;
+       unsigned long flags;
 
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -2672,21 +2401,23 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
        spin_unlock_bh(&bus->txqlock);
 
        if (pktq_len(&bus->txq) >= TXHI) {
-               bus->txoff = ON;
-               brcmf_txflowcontrol(bus->sdiodev->dev, 0, ON);
+               bus->txoff = true;
+               brcmf_txflowblock(bus->sdiodev->dev, true);
        }
 
 #ifdef DEBUG
        if (pktq_plen(&bus->txq, prec) > qcount[prec])
                qcount[prec] = pktq_plen(&bus->txq, prec);
 #endif
-       /* Schedule DPC if needed to send queued packet(s) */
-       if (!bus->dpc_sched) {
-               bus->dpc_sched = true;
-               if (bus->dpc_tsk) {
-                       brcmf_sdbrcm_adddpctsk(bus);
-                       complete(&bus->dpc_wait);
-               }
+
+       spin_lock_irqsave(&bus->dpc_tl_lock, flags);
+       if (list_empty(&bus->dpc_tsklst)) {
+               spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
+
+               brcmf_sdbrcm_adddpctsk(bus);
+               queue_work(bus->brcmf_wq, &bus->datawork);
+       } else {
+               spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
        }
 
        return ret;
@@ -2707,6 +2438,8 @@ brcmf_sdbrcm_membytes(struct brcmf_sdio *bus, bool write, u32 address, u8 *data,
        else
                dsize = size;
 
+       sdio_claim_host(bus->sdiodev->func[1]);
+
        /* Set the backplane window to include the start address */
        bcmerror = brcmf_sdcard_set_sbaddr_window(bus->sdiodev, address);
        if (bcmerror) {
@@ -2748,6 +2481,8 @@ xfer_done:
                brcmf_dbg(ERROR, "FAILED to set window back to 0x%x\n",
                          bus->sdiodev->sbwad);
 
+       sdio_release_host(bus->sdiodev->func[1]);
+
        return bcmerror;
 }
 
@@ -2882,6 +2617,7 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
        struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
        struct brcmf_sdio *bus = sdiodev->bus;
+       unsigned long flags;
 
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -2918,8 +2654,6 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
        /* Need to lock here to protect txseq and SDIO tx calls */
        down(&bus->sdsem);
 
-       bus_wake(bus);
-
        /* Make sure backplane clock is on */
        brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
 
@@ -2967,9 +2701,15 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
                } while (ret < 0 && retries++ < TXRETRIES);
        }
 
-       if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+       spin_lock_irqsave(&bus->dpc_tl_lock, flags);
+       if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) &&
+           list_empty(&bus->dpc_tsklst)) {
+               spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
+
                bus->activity = false;
                brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
+       } else {
+               spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
        }
 
        up(&bus->sdsem);
@@ -3774,23 +3514,20 @@ void brcmf_sdbrcm_isr(void *arg)
        }
        /* Count the interrupt call */
        bus->sdcnt.intrcount++;
-       bus->ipend = true;
-
-       /* Shouldn't get this interrupt if we're sleeping? */
-       if (bus->sleeping) {
-               brcmf_dbg(ERROR, "INTERRUPT WHILE SLEEPING??\n");
-               return;
-       }
+       if (in_interrupt())
+               atomic_set(&bus->ipend, 1);
+       else
+               if (brcmf_sdio_intr_rstatus(bus)) {
+                       brcmf_dbg(ERROR, "failed backplane access\n");
+                       bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
+               }
 
        /* Disable additional interrupts (is this needed now)? */
        if (!bus->intr)
                brcmf_dbg(ERROR, "isr w/o interrupt configured!\n");
 
-       bus->dpc_sched = true;
-       if (bus->dpc_tsk) {
-               brcmf_sdbrcm_adddpctsk(bus);
-               complete(&bus->dpc_wait);
-       }
+       brcmf_sdbrcm_adddpctsk(bus);
+       queue_work(bus->brcmf_wq, &bus->datawork);
 }
 
 static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
@@ -3798,13 +3535,10 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
 #ifdef DEBUG
        struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
 #endif /* DEBUG */
+       unsigned long flags;
 
        brcmf_dbg(TIMER, "Enter\n");
 
-       /* Ignore the timer if simulating bus down */
-       if (bus->sleeping)
-               return false;
-
        down(&bus->sdsem);
 
        /* Poll period: check device if appropriate. */
@@ -3818,27 +3552,30 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
                if (!bus->intr ||
                    (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) {
 
-                       if (!bus->dpc_sched) {
+                       spin_lock_irqsave(&bus->dpc_tl_lock, flags);
+                       if (list_empty(&bus->dpc_tsklst)) {
                                u8 devpend;
+                               spin_unlock_irqrestore(&bus->dpc_tl_lock,
+                                                      flags);
                                devpend = brcmf_sdio_regrb(bus->sdiodev,
                                                           SDIO_CCCR_INTx,
                                                           NULL);
                                intstatus =
                                    devpend & (INTR_STATUS_FUNC1 |
                                               INTR_STATUS_FUNC2);
+                       } else {
+                               spin_unlock_irqrestore(&bus->dpc_tl_lock,
+                                                      flags);
                        }
 
                        /* If there is something, make like the ISR and
                                 schedule the DPC */
                        if (intstatus) {
                                bus->sdcnt.pollcnt++;
-                               bus->ipend = true;
+                               atomic_set(&bus->ipend, 1);
 
-                               bus->dpc_sched = true;
-                               if (bus->dpc_tsk) {
-                                       brcmf_sdbrcm_adddpctsk(bus);
-                                       complete(&bus->dpc_wait);
-                               }
+                               brcmf_sdbrcm_adddpctsk(bus);
+                               queue_work(bus->brcmf_wq, &bus->datawork);
                        }
                }
 
@@ -3876,11 +3613,13 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
 
        up(&bus->sdsem);
 
-       return bus->ipend;
+       return (atomic_read(&bus->ipend) > 0);
 }
 
 static bool brcmf_sdbrcm_chipmatch(u16 chipid)
 {
+       if (chipid == BCM43241_CHIP_ID)
+               return true;
        if (chipid == BCM4329_CHIP_ID)
                return true;
        if (chipid == BCM4330_CHIP_ID)
@@ -3890,6 +3629,26 @@ static bool brcmf_sdbrcm_chipmatch(u16 chipid)
        return false;
 }
 
+static void brcmf_sdio_dataworker(struct work_struct *work)
+{
+       struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio,
+                                             datawork);
+       struct list_head *cur_hd, *tmp_hd;
+       unsigned long flags;
+
+       spin_lock_irqsave(&bus->dpc_tl_lock, flags);
+       list_for_each_safe(cur_hd, tmp_hd, &bus->dpc_tsklst) {
+               spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
+
+               brcmf_sdbrcm_dpc(bus);
+
+               spin_lock_irqsave(&bus->dpc_tl_lock, flags);
+               list_del(cur_hd);
+               kfree(cur_hd);
+       }
+       spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
+}
+
 static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
 {
        brcmf_dbg(TRACE, "Enter\n");
@@ -4022,7 +3781,6 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
                         SDIO_FUNC_ENABLE_1, NULL);
 
        bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
-       bus->sleeping = false;
        bus->rxflow = false;
 
        /* Done with backplane-dependent accesses, can drop clock... */
@@ -4103,6 +3861,9 @@ static void brcmf_sdbrcm_release(struct brcmf_sdio *bus)
                /* De-register interrupt handler */
                brcmf_sdio_intr_unregister(bus->sdiodev);
 
+               cancel_work_sync(&bus->datawork);
+               destroy_workqueue(bus->brcmf_wq);
+
                if (bus->sdiodev->bus_if->drvr) {
                        brcmf_detach(bus->sdiodev->dev);
                        brcmf_sdbrcm_release_dongle(bus);
@@ -4142,8 +3903,6 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
        bus->rxbound = BRCMF_RXBOUND;
        bus->txminmax = BRCMF_TXMINMAX;
        bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
-       bus->usebufpool = false;        /* Use bufpool if allocated,
-                                        else use locally malloced rxbuf */
 
        /* attempt to attach to the dongle */
        if (!(brcmf_sdbrcm_probe_attach(bus, regsva))) {
@@ -4155,6 +3914,13 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
        init_waitqueue_head(&bus->ctrl_wait);
        init_waitqueue_head(&bus->dcmd_resp_wait);
 
+       bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq");
+       if (bus->brcmf_wq == NULL) {
+               brcmf_dbg(ERROR, "insufficient memory to create txworkqueue\n");
+               goto fail;
+       }
+       INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
+
        /* Set up the watchdog timer */
        init_timer(&bus->timer);
        bus->timer.data = (unsigned long)bus;
@@ -4172,15 +3938,8 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
                bus->watchdog_tsk = NULL;
        }
        /* Initialize DPC thread */
-       init_completion(&bus->dpc_wait);
        INIT_LIST_HEAD(&bus->dpc_tsklst);
        spin_lock_init(&bus->dpc_tl_lock);
-       bus->dpc_tsk = kthread_run(brcmf_sdbrcm_dpc_thread,
-                                  bus, "brcmf_dpc");
-       if (IS_ERR(bus->dpc_tsk)) {
-               pr_warn("brcmf_dpc thread failed to start\n");
-               bus->dpc_tsk = NULL;
-       }
 
        /* Assign bus interface call back */
        bus->sdiodev->bus_if->brcmf_bus_stop = brcmf_sdbrcm_bus_stop;
index 58155e23d220fecc5f985be109901bbfce42070b..9434440bbc6536054b592588f9ef4d1bce73bd22 100644 (file)
@@ -377,6 +377,23 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
 
        /* Address of cores for new chips should be added here */
        switch (ci->chip) {
+       case BCM43241_CHIP_ID:
+               ci->c_inf[0].wrapbase = 0x18100000;
+               ci->c_inf[0].cib = 0x2a084411;
+               ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+               ci->c_inf[1].base = 0x18002000;
+               ci->c_inf[1].wrapbase = 0x18102000;
+               ci->c_inf[1].cib = 0x0e004211;
+               ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+               ci->c_inf[2].base = 0x18004000;
+               ci->c_inf[2].wrapbase = 0x18104000;
+               ci->c_inf[2].cib = 0x14080401;
+               ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+               ci->c_inf[3].base = 0x18003000;
+               ci->c_inf[3].wrapbase = 0x18103000;
+               ci->c_inf[3].cib = 0x07004211;
+               ci->ramsize = 0x90000;
+               break;
        case BCM4329_CHIP_ID:
                ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
                ci->c_inf[1].base = BCM4329_CORE_BUS_BASE;
index 29bf78d264e096d9b81cb2efd3ac527a34d80d61..0d30afd8c672affe82ec26cac18ba5e0138d6ac2 100644 (file)
@@ -174,6 +174,8 @@ extern void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
                             u8 data, int *ret);
 extern void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
                             u32 data, int *ret);
+extern int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
+                                  void *data, bool write);
 
 /* Buffer transfer to/from device (client) core via cmd53.
  *   fn:       function number
index 58f89fa9c9f8a218ed29cfb93ad253c41471c648..a2b4b1e71017230b6d7ab262f52b45c4fa353bd7 100644 (file)
@@ -66,7 +66,9 @@
 #define BRCMF_USB_CBCTL_READ   1
 #define BRCMF_USB_MAX_PKT_SIZE 1600
 
+#define BRCMF_USB_43143_FW_NAME        "brcm/brcmfmac43143.bin"
 #define BRCMF_USB_43236_FW_NAME        "brcm/brcmfmac43236b.bin"
+#define BRCMF_USB_43242_FW_NAME        "brcm/brcmfmac43242a.bin"
 
 enum usbdev_suspend_state {
        USBOS_SUSPEND_STATE_DEVICE_ACTIVE = 0, /* Device is busy, won't allow
@@ -78,25 +80,13 @@ enum usbdev_suspend_state {
        USBOS_SUSPEND_STATE_SUSPENDED   /* Device suspended */
 };
 
-struct brcmf_usb_probe_info {
-       void *usbdev_info;
-       struct usb_device *usb; /* USB device pointer from OS */
-       uint rx_pipe, tx_pipe, intr_pipe, rx_pipe2;
-       int intr_size; /* Size of interrupt message */
-       int interval;  /* Interrupt polling interval */
-       int vid;
-       int pid;
-       enum usb_device_speed device_speed;
-       enum usbdev_suspend_state suspend_state;
-       struct usb_interface *intf;
-};
-static struct brcmf_usb_probe_info usbdev_probe_info;
-
 struct brcmf_usb_image {
-       void *data;
-       u32 len;
+       struct list_head list;
+       s8 *fwname;
+       u8 *image;
+       int image_len;
 };
-static struct brcmf_usb_image g_image = { NULL, 0 };
+static struct list_head fw_image_list;
 
 struct intr_transfer_buf {
        u32 notification;
@@ -117,9 +107,8 @@ struct brcmf_usbdev_info {
        int rx_low_watermark;
        int tx_low_watermark;
        int tx_high_watermark;
-       bool txoff;
-       bool rxoff;
-       bool txoverride;
+       int tx_freecount;
+       bool tx_flowblock;
 
        struct brcmf_usbreq *tx_reqs;
        struct brcmf_usbreq *rx_reqs;
@@ -133,7 +122,6 @@ struct brcmf_usbdev_info {
 
        struct usb_device *usbdev;
        struct device *dev;
-       enum usb_device_speed  device_speed;
 
        int ctl_in_pipe, ctl_out_pipe;
        struct urb *ctl_urb; /* URB for control endpoint */
@@ -146,16 +134,11 @@ struct brcmf_usbdev_info {
        wait_queue_head_t ctrl_wait;
        ulong ctl_op;
 
-       bool rxctl_deferrespok;
-
        struct urb *bulk_urb; /* used for FW download */
        struct urb *intr_urb; /* URB for interrupt endpoint */
        int intr_size;          /* Size of interrupt message */
        int interval;           /* Interrupt polling interval */
        struct intr_transfer_buf intr; /* Data buffer for interrupt endpoint */
-
-       struct brcmf_usb_probe_info probe_info;
-
 };
 
 static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
@@ -177,48 +160,17 @@ static struct brcmf_usbdev_info *brcmf_usb_get_businfo(struct device *dev)
        return brcmf_usb_get_buspub(dev)->devinfo;
 }
 
-#if 0
-static void
-brcmf_usb_txflowcontrol(struct brcmf_usbdev_info *devinfo, bool onoff)
+static int brcmf_usb_ioctl_resp_wait(struct brcmf_usbdev_info *devinfo)
 {
-       dhd_txflowcontrol(devinfo->bus_pub.netdev, 0, onoff);
+       return wait_event_timeout(devinfo->ioctl_resp_wait,
+                                 devinfo->ctl_completed,
+                                 msecs_to_jiffies(IOCTL_RESP_TIMEOUT));
 }
-#endif
 
-static int brcmf_usb_ioctl_resp_wait(struct brcmf_usbdev_info *devinfo,
-        uint *condition, bool *pending)
-{
-       DECLARE_WAITQUEUE(wait, current);
-       int timeout = IOCTL_RESP_TIMEOUT;
-
-       /* Convert timeout in millsecond to jiffies */
-       timeout = msecs_to_jiffies(timeout);
-       /* Wait until control frame is available */
-       add_wait_queue(&devinfo->ioctl_resp_wait, &wait);
-       set_current_state(TASK_INTERRUPTIBLE);
-
-       smp_mb();
-       while (!(*condition) && (!signal_pending(current) && timeout)) {
-               timeout = schedule_timeout(timeout);
-               /* Wait until control frame is available */
-               smp_mb();
-       }
-
-       if (signal_pending(current))
-               *pending = true;
-
-       set_current_state(TASK_RUNNING);
-       remove_wait_queue(&devinfo->ioctl_resp_wait, &wait);
-
-       return timeout;
-}
-
-static int brcmf_usb_ioctl_resp_wake(struct brcmf_usbdev_info *devinfo)
+static void brcmf_usb_ioctl_resp_wake(struct brcmf_usbdev_info *devinfo)
 {
        if (waitqueue_active(&devinfo->ioctl_resp_wait))
-               wake_up_interruptible(&devinfo->ioctl_resp_wait);
-
-       return 0;
+               wake_up(&devinfo->ioctl_resp_wait);
 }
 
 static void
@@ -324,17 +276,9 @@ brcmf_usb_recv_ctl(struct brcmf_usbdev_info *devinfo, u8 *buf, int len)
        devinfo->ctl_read.wLength = cpu_to_le16p(&size);
        devinfo->ctl_urb->transfer_buffer_length = size;
 
-       if (devinfo->rxctl_deferrespok) {
-               /* BMAC model */
-               devinfo->ctl_read.bRequestType = USB_DIR_IN
-                       | USB_TYPE_VENDOR | USB_RECIP_INTERFACE;
-               devinfo->ctl_read.bRequest = DL_DEFER_RESP_OK;
-       } else {
-               /* full dongle model */
-               devinfo->ctl_read.bRequestType = USB_DIR_IN
-                       | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
-               devinfo->ctl_read.bRequest = 1;
-       }
+       devinfo->ctl_read.bRequestType = USB_DIR_IN
+               | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+       devinfo->ctl_read.bRequest = 1;
 
        usb_fill_control_urb(devinfo->ctl_urb,
                devinfo->usbdev,
@@ -355,7 +299,6 @@ static int brcmf_usb_tx_ctlpkt(struct device *dev, u8 *buf, u32 len)
 {
        int err = 0;
        int timeout = 0;
-       bool pending;
        struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
 
        if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) {
@@ -366,15 +309,14 @@ static int brcmf_usb_tx_ctlpkt(struct device *dev, u8 *buf, u32 len)
        if (test_and_set_bit(0, &devinfo->ctl_op))
                return -EIO;
 
+       devinfo->ctl_completed = false;
        err = brcmf_usb_send_ctl(devinfo, buf, len);
        if (err) {
                brcmf_dbg(ERROR, "fail %d bytes: %d\n", err, len);
+               clear_bit(0, &devinfo->ctl_op);
                return err;
        }
-
-       devinfo->ctl_completed = false;
-       timeout = brcmf_usb_ioctl_resp_wait(devinfo, &devinfo->ctl_completed,
-                                           &pending);
+       timeout = brcmf_usb_ioctl_resp_wait(devinfo);
        clear_bit(0, &devinfo->ctl_op);
        if (!timeout) {
                brcmf_dbg(ERROR, "Txctl wait timed out\n");
@@ -387,7 +329,6 @@ static int brcmf_usb_rx_ctlpkt(struct device *dev, u8 *buf, u32 len)
 {
        int err = 0;
        int timeout = 0;
-       bool pending;
        struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
 
        if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) {
@@ -397,14 +338,14 @@ static int brcmf_usb_rx_ctlpkt(struct device *dev, u8 *buf, u32 len)
        if (test_and_set_bit(0, &devinfo->ctl_op))
                return -EIO;
 
+       devinfo->ctl_completed = false;
        err = brcmf_usb_recv_ctl(devinfo, buf, len);
        if (err) {
                brcmf_dbg(ERROR, "fail %d bytes: %d\n", err, len);
+               clear_bit(0, &devinfo->ctl_op);
                return err;
        }
-       devinfo->ctl_completed = false;
-       timeout = brcmf_usb_ioctl_resp_wait(devinfo, &devinfo->ctl_completed,
-                                           &pending);
+       timeout = brcmf_usb_ioctl_resp_wait(devinfo);
        err = devinfo->ctl_urb_status;
        clear_bit(0, &devinfo->ctl_op);
        if (!timeout) {
@@ -418,7 +359,7 @@ static int brcmf_usb_rx_ctlpkt(struct device *dev, u8 *buf, u32 len)
 }
 
 static struct brcmf_usbreq *brcmf_usb_deq(struct brcmf_usbdev_info *devinfo,
-                                         struct list_head *q)
+                                         struct list_head *q, int *counter)
 {
        unsigned long flags;
        struct brcmf_usbreq  *req;
@@ -429,17 +370,22 @@ static struct brcmf_usbreq *brcmf_usb_deq(struct brcmf_usbdev_info *devinfo,
        }
        req = list_entry(q->next, struct brcmf_usbreq, list);
        list_del_init(q->next);
+       if (counter)
+               (*counter)--;
        spin_unlock_irqrestore(&devinfo->qlock, flags);
        return req;
 
 }
 
 static void brcmf_usb_enq(struct brcmf_usbdev_info *devinfo,
-                         struct list_head *q, struct brcmf_usbreq *req)
+                         struct list_head *q, struct brcmf_usbreq *req,
+                         int *counter)
 {
        unsigned long flags;
        spin_lock_irqsave(&devinfo->qlock, flags);
        list_add_tail(&req->list, q);
+       if (counter)
+               (*counter)++;
        spin_unlock_irqrestore(&devinfo->qlock, flags);
 }
 
@@ -519,10 +465,16 @@ static void brcmf_usb_tx_complete(struct urb *urb)
        else
                devinfo->bus_pub.bus->dstats.tx_errors++;
 
+       brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0);
+
        brcmu_pkt_buf_free_skb(req->skb);
        req->skb = NULL;
-       brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req);
-
+       brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, &devinfo->tx_freecount);
+       if (devinfo->tx_freecount > devinfo->tx_high_watermark &&
+               devinfo->tx_flowblock) {
+               brcmf_txflowblock(devinfo->dev, false);
+               devinfo->tx_flowblock = false;
+       }
 }
 
 static void brcmf_usb_rx_complete(struct urb *urb)
@@ -541,7 +493,7 @@ static void brcmf_usb_rx_complete(struct urb *urb)
        } else {
                devinfo->bus_pub.bus->dstats.rx_errors++;
                brcmu_pkt_buf_free_skb(skb);
-               brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req);
+               brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
                return;
        }
 
@@ -550,15 +502,13 @@ static void brcmf_usb_rx_complete(struct urb *urb)
                if (brcmf_proto_hdrpull(devinfo->dev, &ifidx, skb) != 0) {
                        brcmf_dbg(ERROR, "rx protocol error\n");
                        brcmu_pkt_buf_free_skb(skb);
-                       brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req);
                        devinfo->bus_pub.bus->dstats.rx_errors++;
-               } else {
+               } else
                        brcmf_rx_packet(devinfo->dev, ifidx, skb);
-                       brcmf_usb_rx_refill(devinfo, req);
-               }
+               brcmf_usb_rx_refill(devinfo, req);
        } else {
                brcmu_pkt_buf_free_skb(skb);
-               brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req);
+               brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
        }
        return;
 
@@ -575,7 +525,7 @@ static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
 
        skb = dev_alloc_skb(devinfo->bus_pub.bus_mtu);
        if (!skb) {
-               brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req);
+               brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
                return;
        }
        req->skb = skb;
@@ -584,14 +534,14 @@ static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
                          skb->data, skb_tailroom(skb), brcmf_usb_rx_complete,
                          req);
        req->devinfo = devinfo;
-       brcmf_usb_enq(devinfo, &devinfo->rx_postq, req);
+       brcmf_usb_enq(devinfo, &devinfo->rx_postq, req, NULL);
 
        ret = usb_submit_urb(req->urb, GFP_ATOMIC);
        if (ret) {
                brcmf_usb_del_fromq(devinfo, req);
                brcmu_pkt_buf_free_skb(req->skb);
                req->skb = NULL;
-               brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req);
+               brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
        }
        return;
 }
@@ -604,7 +554,7 @@ static void brcmf_usb_rx_fill_all(struct brcmf_usbdev_info *devinfo)
                brcmf_dbg(ERROR, "bus is not up\n");
                return;
        }
-       while ((req = brcmf_usb_deq(devinfo, &devinfo->rx_freeq)) != NULL)
+       while ((req = brcmf_usb_deq(devinfo, &devinfo->rx_freeq, NULL)) != NULL)
                brcmf_usb_rx_refill(devinfo, req);
 }
 
@@ -682,7 +632,8 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
                return -EIO;
        }
 
-       req = brcmf_usb_deq(devinfo, &devinfo->tx_freeq);
+       req = brcmf_usb_deq(devinfo, &devinfo->tx_freeq,
+                                       &devinfo->tx_freecount);
        if (!req) {
                brcmu_pkt_buf_free_skb(skb);
                brcmf_dbg(ERROR, "no req to send\n");
@@ -694,14 +645,21 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
        usb_fill_bulk_urb(req->urb, devinfo->usbdev, devinfo->tx_pipe,
                          skb->data, skb->len, brcmf_usb_tx_complete, req);
        req->urb->transfer_flags |= URB_ZERO_PACKET;
-       brcmf_usb_enq(devinfo, &devinfo->tx_postq, req);
+       brcmf_usb_enq(devinfo, &devinfo->tx_postq, req, NULL);
        ret = usb_submit_urb(req->urb, GFP_ATOMIC);
        if (ret) {
                brcmf_dbg(ERROR, "brcmf_usb_tx usb_submit_urb FAILED\n");
                brcmf_usb_del_fromq(devinfo, req);
                brcmu_pkt_buf_free_skb(req->skb);
                req->skb = NULL;
-               brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req);
+               brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req,
+                                               &devinfo->tx_freecount);
+       } else {
+               if (devinfo->tx_freecount < devinfo->tx_low_watermark &&
+                       !devinfo->tx_flowblock) {
+                       brcmf_txflowblock(dev, true);
+                       devinfo->tx_flowblock = true;
+               }
        }
 
        return ret;
@@ -1112,10 +1070,14 @@ static int brcmf_usb_dlrun(struct brcmf_usbdev_info *devinfo)
 static bool brcmf_usb_chip_support(int chipid, int chiprev)
 {
        switch(chipid) {
+       case 43143:
+               return true;
        case 43235:
        case 43236:
        case 43238:
                return (chiprev == 3);
+       case 43242:
+               return true;
        default:
                break;
        }
@@ -1154,17 +1116,10 @@ brcmf_usb_fw_download(struct brcmf_usbdev_info *devinfo)
 }
 
 
-static void brcmf_usb_detach(const struct brcmf_usbdev *bus_pub)
+static void brcmf_usb_detach(struct brcmf_usbdev_info *devinfo)
 {
-       struct brcmf_usbdev_info *devinfo =
-               (struct brcmf_usbdev_info *)bus_pub;
-
        brcmf_dbg(TRACE, "devinfo %p\n", devinfo);
 
-       /* store the image globally */
-       g_image.data = devinfo->image;
-       g_image.len = devinfo->image_len;
-
        /* free the URBS */
        brcmf_usb_free_q(&devinfo->rx_freeq, false);
        brcmf_usb_free_q(&devinfo->tx_freeq, false);
@@ -1175,7 +1130,6 @@ static void brcmf_usb_detach(const struct brcmf_usbdev *bus_pub)
 
        kfree(devinfo->tx_reqs);
        kfree(devinfo->rx_reqs);
-       kfree(devinfo);
 }
 
 #define TRX_MAGIC       0x30524448      /* "HDR0" */
@@ -1217,19 +1171,34 @@ static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo)
 {
        s8 *fwname;
        const struct firmware *fw;
+       struct brcmf_usb_image *fw_image;
        int err;
 
-       devinfo->image = g_image.data;
-       devinfo->image_len = g_image.len;
-
-       /*
-        * if we have an image we can leave here.
-        */
-       if (devinfo->image)
-               return 0;
-
-       fwname = BRCMF_USB_43236_FW_NAME;
+       switch (devinfo->bus_pub.devid) {
+       case 43143:
+               fwname = BRCMF_USB_43143_FW_NAME;
+               break;
+       case 43235:
+       case 43236:
+       case 43238:
+               fwname = BRCMF_USB_43236_FW_NAME;
+               break;
+       case 43242:
+               fwname = BRCMF_USB_43242_FW_NAME;
+               break;
+       default:
+               return -EINVAL;
+               break;
+       }
 
+       list_for_each_entry(fw_image, &fw_image_list, list) {
+               if (fw_image->fwname == fwname) {
+                       devinfo->image = fw_image->image;
+                       devinfo->image_len = fw_image->image_len;
+                       return 0;
+               }
+       }
+       /* fw image not yet loaded. Load it now and add to list */
        err = request_firmware(&fw, fwname, devinfo->dev);
        if (!fw) {
                brcmf_dbg(ERROR, "fail to request firmware %s\n", fwname);
@@ -1240,27 +1209,32 @@ static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo)
                return -EINVAL;
        }
 
-       devinfo->image = vmalloc(fw->size); /* plus nvram */
-       if (!devinfo->image)
+       fw_image = kzalloc(sizeof(*fw_image), GFP_ATOMIC);
+       if (!fw_image)
+               return -ENOMEM;
+       INIT_LIST_HEAD(&fw_image->list);
+       list_add_tail(&fw_image->list, &fw_image_list);
+       fw_image->fwname = fwname;
+       fw_image->image = vmalloc(fw->size);
+       if (!fw_image->image)
                return -ENOMEM;
 
-       memcpy(devinfo->image, fw->data, fw->size);
-       devinfo->image_len = fw->size;
+       memcpy(fw_image->image, fw->data, fw->size);
+       fw_image->image_len = fw->size;
 
        release_firmware(fw);
+
+       devinfo->image = fw_image->image;
+       devinfo->image_len = fw_image->image_len;
+
        return 0;
 }
 
 
 static
-struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev)
+struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo,
+                                     int nrxq, int ntxq)
 {
-       struct brcmf_usbdev_info *devinfo;
-
-       devinfo = kzalloc(sizeof(struct brcmf_usbdev_info), GFP_ATOMIC);
-       if (devinfo == NULL)
-               return NULL;
-
        devinfo->bus_pub.nrxq = nrxq;
        devinfo->rx_low_watermark = nrxq / 2;
        devinfo->bus_pub.devinfo = devinfo;
@@ -1269,18 +1243,6 @@ struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev)
        /* flow control when too many tx urbs posted */
        devinfo->tx_low_watermark = ntxq / 4;
        devinfo->tx_high_watermark = devinfo->tx_low_watermark * 3;
-       devinfo->dev = dev;
-       devinfo->usbdev = usbdev_probe_info.usb;
-       devinfo->tx_pipe = usbdev_probe_info.tx_pipe;
-       devinfo->rx_pipe = usbdev_probe_info.rx_pipe;
-       devinfo->rx_pipe2 = usbdev_probe_info.rx_pipe2;
-       devinfo->intr_pipe = usbdev_probe_info.intr_pipe;
-
-       devinfo->interval = usbdev_probe_info.interval;
-       devinfo->intr_size = usbdev_probe_info.intr_size;
-
-       memcpy(&devinfo->probe_info, &usbdev_probe_info,
-               sizeof(struct brcmf_usb_probe_info));
        devinfo->bus_pub.bus_mtu = BRCMF_USB_MAX_PKT_SIZE;
 
        /* Initialize other structure content */
@@ -1295,6 +1257,8 @@ struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev)
        INIT_LIST_HEAD(&devinfo->tx_freeq);
        INIT_LIST_HEAD(&devinfo->tx_postq);
 
+       devinfo->tx_flowblock = false;
+
        devinfo->rx_reqs = brcmf_usbdev_qinit(&devinfo->rx_freeq, nrxq);
        if (!devinfo->rx_reqs)
                goto error;
@@ -1302,6 +1266,7 @@ struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev)
        devinfo->tx_reqs = brcmf_usbdev_qinit(&devinfo->tx_freeq, ntxq);
        if (!devinfo->tx_reqs)
                goto error;
+       devinfo->tx_freecount = ntxq;
 
        devinfo->intr_urb = usb_alloc_urb(0, GFP_ATOMIC);
        if (!devinfo->intr_urb) {
@@ -1313,8 +1278,6 @@ struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev)
                brcmf_dbg(ERROR, "usb_alloc_urb (ctl) failed\n");
                goto error;
        }
-       devinfo->rxctl_deferrespok = 0;
-
        devinfo->bulk_urb = usb_alloc_urb(0, GFP_ATOMIC);
        if (!devinfo->bulk_urb) {
                brcmf_dbg(ERROR, "usb_alloc_urb (bulk) failed\n");
@@ -1336,23 +1299,21 @@ struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev)
 
 error:
        brcmf_dbg(ERROR, "failed!\n");
-       brcmf_usb_detach(&devinfo->bus_pub);
+       brcmf_usb_detach(devinfo);
        return NULL;
 }
 
-static int brcmf_usb_probe_cb(struct device *dev, const char *desc,
-                               u32 bustype, u32 hdrlen)
+static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo,
+                             const char *desc, u32 bustype, u32 hdrlen)
 {
        struct brcmf_bus *bus = NULL;
        struct brcmf_usbdev *bus_pub = NULL;
        int ret;
+       struct device *dev = devinfo->dev;
 
-
-       bus_pub = brcmf_usb_attach(BRCMF_USB_NRXQ, BRCMF_USB_NTXQ, dev);
-       if (!bus_pub) {
-               ret = -ENODEV;
-               goto fail;
-       }
+       bus_pub = brcmf_usb_attach(devinfo, BRCMF_USB_NRXQ, BRCMF_USB_NTXQ);
+       if (!bus_pub)
+               return -ENODEV;
 
        bus = kzalloc(sizeof(struct brcmf_bus), GFP_ATOMIC);
        if (!bus) {
@@ -1387,23 +1348,21 @@ static int brcmf_usb_probe_cb(struct device *dev, const char *desc,
        return 0;
 fail:
        /* Release resources in reverse order */
-       if (bus_pub)
-               brcmf_usb_detach(bus_pub);
        kfree(bus);
+       brcmf_usb_detach(devinfo);
        return ret;
 }
 
 static void
-brcmf_usb_disconnect_cb(struct brcmf_usbdev *bus_pub)
+brcmf_usb_disconnect_cb(struct brcmf_usbdev_info *devinfo)
 {
-       if (!bus_pub)
+       if (!devinfo)
                return;
-       brcmf_dbg(TRACE, "enter: bus_pub %p\n", bus_pub);
-
-       brcmf_detach(bus_pub->devinfo->dev);
-       kfree(bus_pub->bus);
-       brcmf_usb_detach(bus_pub);
+       brcmf_dbg(TRACE, "enter: bus_pub %p\n", devinfo);
 
+       brcmf_detach(devinfo->dev);
+       kfree(devinfo->bus_pub.bus);
+       brcmf_usb_detach(devinfo);
 }
 
 static int
@@ -1415,18 +1374,18 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
        struct usb_device *usb = interface_to_usbdev(intf);
        int num_of_eps;
        u8 endpoint_num;
+       struct brcmf_usbdev_info *devinfo;
 
        brcmf_dbg(TRACE, "enter\n");
 
-       usbdev_probe_info.usb = usb;
-       usbdev_probe_info.intf = intf;
+       devinfo = kzalloc(sizeof(*devinfo), GFP_ATOMIC);
+       if (devinfo == NULL)
+               return -ENOMEM;
 
-       if (id != NULL) {
-               usbdev_probe_info.vid = id->idVendor;
-               usbdev_probe_info.pid = id->idProduct;
-       }
+       devinfo->usbdev = usb;
+       devinfo->dev = &usb->dev;
 
-       usb_set_intfdata(intf, &usbdev_probe_info);
+       usb_set_intfdata(intf, devinfo);
 
        /* Check that the device supports only one configuration */
        if (usb->descriptor.bNumConfigurations != 1) {
@@ -1475,11 +1434,11 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
        }
 
        endpoint_num = endpoint->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
-       usbdev_probe_info.intr_pipe = usb_rcvintpipe(usb, endpoint_num);
+       devinfo->intr_pipe = usb_rcvintpipe(usb, endpoint_num);
 
-       usbdev_probe_info.rx_pipe = 0;
-       usbdev_probe_info.rx_pipe2 = 0;
-       usbdev_probe_info.tx_pipe = 0;
+       devinfo->rx_pipe = 0;
+       devinfo->rx_pipe2 = 0;
+       devinfo->tx_pipe = 0;
        num_of_eps = IFDESC(usb, BULK_IF).bNumEndpoints - 1;
 
        /* Check data endpoints and get pipes */
@@ -1496,35 +1455,33 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
                               USB_ENDPOINT_NUMBER_MASK;
                if ((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
                        == USB_DIR_IN) {
-                       if (!usbdev_probe_info.rx_pipe) {
-                               usbdev_probe_info.rx_pipe =
+                       if (!devinfo->rx_pipe) {
+                               devinfo->rx_pipe =
                                        usb_rcvbulkpipe(usb, endpoint_num);
                        } else {
-                               usbdev_probe_info.rx_pipe2 =
+                               devinfo->rx_pipe2 =
                                        usb_rcvbulkpipe(usb, endpoint_num);
                        }
                } else {
-                       usbdev_probe_info.tx_pipe =
-                                       usb_sndbulkpipe(usb, endpoint_num);
+                       devinfo->tx_pipe = usb_sndbulkpipe(usb, endpoint_num);
                }
        }
 
        /* Allocate interrupt URB and data buffer */
        /* RNDIS says 8-byte intr, our old drivers used 4-byte */
        if (IFEPDESC(usb, CONTROL_IF, 0).wMaxPacketSize == cpu_to_le16(16))
-               usbdev_probe_info.intr_size = 8;
+               devinfo->intr_size = 8;
        else
-               usbdev_probe_info.intr_size = 4;
+               devinfo->intr_size = 4;
 
-       usbdev_probe_info.interval = IFEPDESC(usb, CONTROL_IF, 0).bInterval;
+       devinfo->interval = IFEPDESC(usb, CONTROL_IF, 0).bInterval;
 
-       usbdev_probe_info.device_speed = usb->speed;
        if (usb->speed == USB_SPEED_HIGH)
                brcmf_dbg(INFO, "Broadcom high speed USB wireless device detected\n");
        else
                brcmf_dbg(INFO, "Broadcom full speed USB wireless device detected\n");
 
-       ret = brcmf_usb_probe_cb(&usb->dev, "", USB_BUS, 0);
+       ret = brcmf_usb_probe_cb(devinfo, "", USB_BUS, 0);
        if (ret)
                goto fail;
 
@@ -1533,6 +1490,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
 
 fail:
        brcmf_dbg(ERROR, "failed with errno %d\n", ret);
+       kfree(devinfo);
        usb_set_intfdata(intf, NULL);
        return ret;
 
@@ -1541,11 +1499,12 @@ fail:
 static void
 brcmf_usb_disconnect(struct usb_interface *intf)
 {
-       struct usb_device *usb = interface_to_usbdev(intf);
+       struct brcmf_usbdev_info *devinfo;
 
        brcmf_dbg(TRACE, "enter\n");
-       brcmf_usb_disconnect_cb(brcmf_usb_get_buspub(&usb->dev));
-       usb_set_intfdata(intf, NULL);
+       devinfo = (struct brcmf_usbdev_info *)usb_get_intfdata(intf);
+       brcmf_usb_disconnect_cb(devinfo);
+       kfree(devinfo);
 }
 
 /*
@@ -1577,17 +1536,23 @@ static int brcmf_usb_resume(struct usb_interface *intf)
 }
 
 #define BRCMF_USB_VENDOR_ID_BROADCOM   0x0a5c
+#define BRCMF_USB_DEVICE_ID_43143      0xbd1e
 #define BRCMF_USB_DEVICE_ID_43236      0xbd17
+#define BRCMF_USB_DEVICE_ID_43242      0xbd1f
 #define BRCMF_USB_DEVICE_ID_BCMFW      0x0bdc
 
 static struct usb_device_id brcmf_usb_devid_table[] = {
+       { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43143) },
        { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43236) },
+       { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43242) },
        /* special entry for device with firmware loaded and running */
        { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_BCMFW) },
        { }
 };
 MODULE_DEVICE_TABLE(usb, brcmf_usb_devid_table);
+MODULE_FIRMWARE(BRCMF_USB_43143_FW_NAME);
 MODULE_FIRMWARE(BRCMF_USB_43236_FW_NAME);
+MODULE_FIRMWARE(BRCMF_USB_43242_FW_NAME);
 
 /* TODO: suspend and resume entries */
 static struct usb_driver brcmf_usbdrvr = {
@@ -1601,15 +1566,25 @@ static struct usb_driver brcmf_usbdrvr = {
        .disable_hub_initiated_lpm = 1,
 };
 
+static void brcmf_release_fw(struct list_head *q)
+{
+       struct brcmf_usb_image *fw_image, *next;
+
+       list_for_each_entry_safe(fw_image, next, q, list) {
+               vfree(fw_image->image);
+               list_del_init(&fw_image->list);
+       }
+}
+
+
 void brcmf_usb_exit(void)
 {
        usb_deregister(&brcmf_usbdrvr);
-       vfree(g_image.data);
-       g_image.data = NULL;
-       g_image.len = 0;
+       brcmf_release_fw(&fw_image_list);
 }
 
 void brcmf_usb_init(void)
 {
+       INIT_LIST_HEAD(&fw_image_list);
        usb_register(&brcmf_usbdrvr);
 }
index 50b5553b6964b95dffecf40f4d28fdb91796efc8..c1abaa6db59ec97fc9da4a0419ba81ea922cfb42 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/ieee80211.h>
 #include <linux/uaccess.h>
 #include <net/cfg80211.h>
+#include <net/netlink.h>
 
 #include <brcmu_utils.h>
 #include <defs.h>
 #include "dhd.h"
 #include "wl_cfg80211.h"
 
+#define BRCMF_SCAN_IE_LEN_MAX          2048
+#define BRCMF_PNO_VERSION              2
+#define BRCMF_PNO_TIME                 30
+#define BRCMF_PNO_REPEAT               4
+#define BRCMF_PNO_FREQ_EXPO_MAX                3
+#define BRCMF_PNO_MAX_PFN_COUNT                16
+#define BRCMF_PNO_ENABLE_ADAPTSCAN_BIT 6
+#define BRCMF_PNO_HIDDEN_BIT           2
+#define BRCMF_PNO_WPA_AUTH_ANY         0xFFFFFFFF
+#define BRCMF_PNO_SCAN_COMPLETE                1
+#define BRCMF_PNO_SCAN_INCOMPLETE      0
+
+#define TLV_LEN_OFF                    1       /* length offset */
+#define TLV_HDR_LEN                    2       /* header length */
+#define TLV_BODY_OFF                   2       /* body offset */
+#define TLV_OUI_LEN                    3       /* oui id length */
+#define WPA_OUI                                "\x00\x50\xF2"  /* WPA OUI */
+#define WPA_OUI_TYPE                   1
+#define RSN_OUI                                "\x00\x0F\xAC"  /* RSN OUI */
+#define        WME_OUI_TYPE                    2
+
+#define VS_IE_FIXED_HDR_LEN            6
+#define WPA_IE_VERSION_LEN             2
+#define WPA_IE_MIN_OUI_LEN             4
+#define WPA_IE_SUITE_COUNT_LEN         2
+
+#define WPA_CIPHER_NONE                        0       /* None */
+#define WPA_CIPHER_WEP_40              1       /* WEP (40-bit) */
+#define WPA_CIPHER_TKIP                        2       /* TKIP: default for WPA */
+#define WPA_CIPHER_AES_CCM             4       /* AES (CCM) */
+#define WPA_CIPHER_WEP_104             5       /* WEP (104-bit) */
+
+#define RSN_AKM_NONE                   0       /* None (IBSS) */
+#define RSN_AKM_UNSPECIFIED            1       /* Over 802.1x */
+#define RSN_AKM_PSK                    2       /* Pre-shared Key */
+#define RSN_CAP_LEN                    2       /* Length of RSN capabilities */
+#define RSN_CAP_PTK_REPLAY_CNTR_MASK   0x000C
+
+#define VNDR_IE_CMD_LEN                        4       /* length of the set command
+                                                * string :"add", "del" (+ NUL)
+                                                */
+#define VNDR_IE_COUNT_OFFSET           4
+#define VNDR_IE_PKTFLAG_OFFSET         8
+#define VNDR_IE_VSIE_OFFSET            12
+#define VNDR_IE_HDR_SIZE               12
+#define VNDR_IE_BEACON_FLAG            0x1
+#define VNDR_IE_PRBRSP_FLAG            0x2
+#define MAX_VNDR_IE_NUMBER             5
+
+#define        DOT11_MGMT_HDR_LEN              24      /* d11 management header len */
+#define        DOT11_BCN_PRB_FIXED_LEN         12      /* beacon/probe fixed length */
+
 #define BRCMF_ASSOC_PARAMS_FIXED_SIZE \
        (sizeof(struct brcmf_assoc_params_le) - sizeof(u16))
 
@@ -42,33 +95,12 @@ static const u8 ether_bcast[ETH_ALEN] = {255, 255, 255, 255, 255, 255};
 
 static u32 brcmf_dbg_level = WL_DBG_ERR;
 
-static void brcmf_set_drvdata(struct brcmf_cfg80211_dev *dev, void *data)
-{
-       dev->driver_data = data;
-}
-
-static void *brcmf_get_drvdata(struct brcmf_cfg80211_dev *dev)
-{
-       void *data = NULL;
-
-       if (dev)
-               data = dev->driver_data;
-       return data;
-}
-
-static
-struct brcmf_cfg80211_priv *brcmf_priv_get(struct brcmf_cfg80211_dev *cfg_dev)
-{
-       struct brcmf_cfg80211_iface *ci = brcmf_get_drvdata(cfg_dev);
-       return ci->cfg_priv;
-}
-
 static bool check_sys_up(struct wiphy *wiphy)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
-       if (!test_bit(WL_STATUS_READY, &cfg_priv->status)) {
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       if (!test_bit(WL_STATUS_READY, &cfg->status)) {
                WL_INFO("device is not ready : status (%d)\n",
-                       (int)cfg_priv->status);
+                       (int)cfg->status);
                return false;
        }
        return true;
@@ -256,6 +288,25 @@ struct brcmf_tlv {
        u8 data[1];
 };
 
+/* Vendor specific ie. id = 221, oui and type defines exact ie */
+struct brcmf_vs_tlv {
+       u8 id;
+       u8 len;
+       u8 oui[3];
+       u8 oui_type;
+};
+
+struct parsed_vndr_ie_info {
+       u8 *ie_ptr;
+       u32 ie_len;     /* total length including id & length field */
+       struct brcmf_vs_tlv vndrie;
+};
+
+struct parsed_vndr_ies {
+       u32 count;
+       struct parsed_vndr_ie_info ie_info[MAX_VNDR_IE_NUMBER];
+};
+
 /* Quarter dBm units to mW
  * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
  * Table is offset so the last entry is largest mW value that fits in
@@ -353,6 +404,44 @@ brcmf_exec_dcmd_u32(struct net_device *ndev, u32 cmd, u32 *par)
        return err;
 }
 
+static s32
+brcmf_dev_iovar_setbuf_bsscfg(struct net_device *ndev, s8 *name,
+                             void *param, s32 paramlen,
+                             void *buf, s32 buflen, s32 bssidx)
+{
+       s32 err = -ENOMEM;
+       u32 len;
+
+       len = brcmf_c_mkiovar_bsscfg(name, param, paramlen,
+                                    buf, buflen, bssidx);
+       BUG_ON(!len);
+       if (len > 0)
+               err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, buf, len);
+       if (err)
+               WL_ERR("error (%d)\n", err);
+
+       return err;
+}
+
+static s32
+brcmf_dev_iovar_getbuf_bsscfg(struct net_device *ndev, s8 *name,
+                             void *param, s32 paramlen,
+                             void *buf, s32 buflen, s32 bssidx)
+{
+       s32 err = -ENOMEM;
+       u32 len;
+
+       len = brcmf_c_mkiovar_bsscfg(name, param, paramlen,
+                                    buf, buflen, bssidx);
+       BUG_ON(!len);
+       if (len > 0)
+               err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, buf, len);
+       if (err)
+               WL_ERR("error (%d)\n", err);
+
+       return err;
+}
+
 static void convert_key_from_CPU(struct brcmf_wsec_key *key,
                                 struct brcmf_wsec_key_le *key_le)
 {
@@ -367,16 +456,22 @@ static void convert_key_from_CPU(struct brcmf_wsec_key *key,
        memcpy(key_le->ea, key->ea, sizeof(key->ea));
 }
 
-static int send_key_to_dongle(struct net_device *ndev,
-                             struct brcmf_wsec_key *key)
+static int
+send_key_to_dongle(struct brcmf_cfg80211_info *cfg, s32 bssidx,
+                  struct net_device *ndev, struct brcmf_wsec_key *key)
 {
        int err;
        struct brcmf_wsec_key_le key_le;
 
        convert_key_from_CPU(key, &key_le);
-       err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_KEY, &key_le, sizeof(key_le));
+
+       err  = brcmf_dev_iovar_setbuf_bsscfg(ndev, "wsec_key", &key_le,
+                                            sizeof(key_le),
+                                            cfg->extra_buf,
+                                            WL_EXTRA_BUF_MAX, bssidx);
+
        if (err)
-               WL_ERR("WLC_SET_KEY error (%d)\n", err);
+               WL_ERR("wsec_key error (%d)\n", err);
        return err;
 }
 
@@ -385,14 +480,12 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
                         enum nl80211_iftype type, u32 *flags,
                         struct vif_params *params)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
-       struct wireless_dev *wdev;
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
        s32 infra = 0;
+       s32 ap = 0;
        s32 err = 0;
 
-       WL_TRACE("Enter\n");
-       if (!check_sys_up(wiphy))
-               return -EIO;
+       WL_TRACE("Enter, ndev=%p, type=%d\n", ndev, type);
 
        switch (type) {
        case NL80211_IFTYPE_MONITOR:
@@ -401,29 +494,44 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
                       type);
                return -EOPNOTSUPP;
        case NL80211_IFTYPE_ADHOC:
-               cfg_priv->conf->mode = WL_MODE_IBSS;
+               cfg->conf->mode = WL_MODE_IBSS;
                infra = 0;
                break;
        case NL80211_IFTYPE_STATION:
-               cfg_priv->conf->mode = WL_MODE_BSS;
+               cfg->conf->mode = WL_MODE_BSS;
                infra = 1;
                break;
+       case NL80211_IFTYPE_AP:
+               cfg->conf->mode = WL_MODE_AP;
+               ap = 1;
+               break;
        default:
                err = -EINVAL;
                goto done;
        }
 
-       err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &infra);
-       if (err) {
-               WL_ERR("WLC_SET_INFRA error (%d)\n", err);
-               err = -EAGAIN;
+       if (ap) {
+               set_bit(WL_STATUS_AP_CREATING, &cfg->status);
+               if (!cfg->ap_info)
+                       cfg->ap_info = kzalloc(sizeof(*cfg->ap_info),
+                                              GFP_KERNEL);
+               if (!cfg->ap_info) {
+                       err = -ENOMEM;
+                       goto done;
+               }
+               WL_INFO("IF Type = AP\n");
        } else {
-               wdev = ndev->ieee80211_ptr;
-               wdev->iftype = type;
+               err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &infra);
+               if (err) {
+                       WL_ERR("WLC_SET_INFRA error (%d)\n", err);
+                       err = -EAGAIN;
+                       goto done;
+               }
+               WL_INFO("IF Type = %s\n",
+                       (cfg->conf->mode == WL_MODE_IBSS) ?
+                       "Adhoc" : "Infra");
        }
-
-       WL_INFO("IF Type = %s\n",
-               (cfg_priv->conf->mode == WL_MODE_IBSS) ? "Adhoc" : "Infra");
+       ndev->ieee80211_ptr->iftype = type;
 
 done:
        WL_TRACE("Exit\n");
@@ -474,12 +582,55 @@ brcmf_dev_intvar_get(struct net_device *ndev, s8 *name, s32 *retval)
        return err;
 }
 
+static s32
+brcmf_dev_intvar_set_bsscfg(struct net_device *ndev, s8 *name, u32 val,
+                           s32 bssidx)
+{
+       s8 buf[BRCMF_DCMD_SMLEN];
+       __le32 val_le;
+
+       val_le = cpu_to_le32(val);
+
+       return brcmf_dev_iovar_setbuf_bsscfg(ndev, name, &val_le,
+                                            sizeof(val_le), buf, sizeof(buf),
+                                            bssidx);
+}
+
+static s32
+brcmf_dev_intvar_get_bsscfg(struct net_device *ndev, s8 *name, s32 *val,
+                           s32 bssidx)
+{
+       s8 buf[BRCMF_DCMD_SMLEN];
+       s32 err;
+       __le32 val_le;
+
+       memset(buf, 0, sizeof(buf));
+       err = brcmf_dev_iovar_getbuf_bsscfg(ndev, name, val, sizeof(*val), buf,
+                                           sizeof(buf), bssidx);
+       if (err == 0) {
+               memcpy(&val_le, buf, sizeof(val_le));
+               *val = le32_to_cpu(val_le);
+       }
+       return err;
+}
+
+
+/*
+ * For now brcmf_find_bssidx will return 0. Once p2p gets implemented this
+ * should return the ndev matching bssidx.
+ */
+static s32
+brcmf_find_bssidx(struct brcmf_cfg80211_info *cfg, struct net_device *ndev)
+{
+       return 0;
+}
+
 static void brcmf_set_mpc(struct net_device *ndev, int mpc)
 {
        s32 err = 0;
-       struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+       struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
 
-       if (test_bit(WL_STATUS_READY, &cfg_priv->status)) {
+       if (test_bit(WL_STATUS_READY, &cfg->status)) {
                err = brcmf_dev_intvar_set(ndev, "mpc", mpc);
                if (err) {
                        WL_ERR("fail to set mpc\n");
@@ -489,8 +640,8 @@ static void brcmf_set_mpc(struct net_device *ndev, int mpc)
        }
 }
 
-static void wl_iscan_prep(struct brcmf_scan_params_le *params_le,
-                         struct brcmf_ssid *ssid)
+static void brcmf_iscan_prep(struct brcmf_scan_params_le *params_le,
+                            struct brcmf_ssid *ssid)
 {
        memcpy(params_le->bssid, ether_bcast, ETH_ALEN);
        params_le->bss_type = DOT11_BSSTYPE_ANY;
@@ -546,7 +697,7 @@ brcmf_run_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan,
                return -ENOMEM;
        BUG_ON(params_size >= BRCMF_DCMD_SMLEN);
 
-       wl_iscan_prep(&params->params_le, ssid);
+       brcmf_iscan_prep(&params->params_le, ssid);
 
        params->version = cpu_to_le32(BRCMF_ISCAN_REQ_VERSION);
        params->action = cpu_to_le16(action);
@@ -565,10 +716,10 @@ brcmf_run_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan,
        return err;
 }
 
-static s32 brcmf_do_iscan(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_do_iscan(struct brcmf_cfg80211_info *cfg)
 {
-       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv);
-       struct net_device *ndev = cfg_to_ndev(cfg_priv);
+       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
+       struct net_device *ndev = cfg_to_ndev(cfg);
        struct brcmf_ssid ssid;
        __le32 passive_scan;
        s32 err = 0;
@@ -578,19 +729,19 @@ static s32 brcmf_do_iscan(struct brcmf_cfg80211_priv *cfg_priv)
 
        iscan->state = WL_ISCAN_STATE_SCANING;
 
-       passive_scan = cfg_priv->active_scan ? 0 : cpu_to_le32(1);
-       err = brcmf_exec_dcmd(cfg_to_ndev(cfg_priv), BRCMF_C_SET_PASSIVE_SCAN,
+       passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
+       err = brcmf_exec_dcmd(cfg_to_ndev(cfg), BRCMF_C_SET_PASSIVE_SCAN,
                        &passive_scan, sizeof(passive_scan));
        if (err) {
                WL_ERR("error (%d)\n", err);
                return err;
        }
        brcmf_set_mpc(ndev, 0);
-       cfg_priv->iscan_kickstart = true;
+       cfg->iscan_kickstart = true;
        err = brcmf_run_iscan(iscan, &ssid, BRCMF_SCAN_ACTION_START);
        if (err) {
                brcmf_set_mpc(ndev, 1);
-               cfg_priv->iscan_kickstart = false;
+               cfg->iscan_kickstart = false;
                return err;
        }
        mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
@@ -599,31 +750,31 @@ static s32 brcmf_do_iscan(struct brcmf_cfg80211_priv *cfg_priv)
 }
 
 static s32
-__brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
-                  struct cfg80211_scan_request *request,
-                  struct cfg80211_ssid *this_ssid)
+brcmf_cfg80211_iscan(struct wiphy *wiphy, struct net_device *ndev,
+                    struct cfg80211_scan_request *request,
+                    struct cfg80211_ssid *this_ssid)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+       struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
        struct cfg80211_ssid *ssids;
-       struct brcmf_cfg80211_scan_req *sr = cfg_priv->scan_req_int;
+       struct brcmf_cfg80211_scan_req *sr = cfg->scan_req_int;
        __le32 passive_scan;
        bool iscan_req;
        bool spec_scan;
        s32 err = 0;
        u32 SSID_len;
 
-       if (test_bit(WL_STATUS_SCANNING, &cfg_priv->status)) {
-               WL_ERR("Scanning already : status (%lu)\n", cfg_priv->status);
+       if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
+               WL_ERR("Scanning already : status (%lu)\n", cfg->status);
                return -EAGAIN;
        }
-       if (test_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status)) {
+       if (test_bit(WL_STATUS_SCAN_ABORTING, &cfg->status)) {
                WL_ERR("Scanning being aborted : status (%lu)\n",
-                      cfg_priv->status);
+                      cfg->status);
                return -EAGAIN;
        }
-       if (test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) {
+       if (test_bit(WL_STATUS_CONNECTING, &cfg->status)) {
                WL_ERR("Connecting : status (%lu)\n",
-                      cfg_priv->status);
+                      cfg->status);
                return -EAGAIN;
        }
 
@@ -632,7 +783,7 @@ __brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
        if (request) {
                /* scan bss */
                ssids = request->ssids;
-               if (cfg_priv->iscan_on && (!ssids || !ssids->ssid_len))
+               if (cfg->iscan_on && (!ssids || !ssids->ssid_len))
                        iscan_req = true;
        } else {
                /* scan in ibss */
@@ -640,10 +791,10 @@ __brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
                ssids = this_ssid;
        }
 
-       cfg_priv->scan_request = request;
-       set_bit(WL_STATUS_SCANNING, &cfg_priv->status);
+       cfg->scan_request = request;
+       set_bit(WL_STATUS_SCANNING, &cfg->status);
        if (iscan_req) {
-               err = brcmf_do_iscan(cfg_priv);
+               err = brcmf_do_iscan(cfg);
                if (!err)
                        return err;
                else
@@ -662,7 +813,7 @@ __brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
                        WL_SCAN("Broadcast scan\n");
                }
 
-               passive_scan = cfg_priv->active_scan ? 0 : cpu_to_le32(1);
+               passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
                err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN,
                                &passive_scan, sizeof(passive_scan));
                if (err) {
@@ -687,8 +838,346 @@ __brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
        return 0;
 
 scan_out:
-       clear_bit(WL_STATUS_SCANNING, &cfg_priv->status);
-       cfg_priv->scan_request = NULL;
+       clear_bit(WL_STATUS_SCANNING, &cfg->status);
+       cfg->scan_request = NULL;
+       return err;
+}
+
+static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le,
+                            struct cfg80211_scan_request *request)
+{
+       u32 n_ssids;
+       u32 n_channels;
+       s32 i;
+       s32 offset;
+       u16 chanspec;
+       u16 channel;
+       struct ieee80211_channel *req_channel;
+       char *ptr;
+       struct brcmf_ssid_le ssid_le;
+
+       memcpy(params_le->bssid, ether_bcast, ETH_ALEN);
+       params_le->bss_type = DOT11_BSSTYPE_ANY;
+       params_le->scan_type = 0;
+       params_le->channel_num = 0;
+       params_le->nprobes = cpu_to_le32(-1);
+       params_le->active_time = cpu_to_le32(-1);
+       params_le->passive_time = cpu_to_le32(-1);
+       params_le->home_time = cpu_to_le32(-1);
+       memset(&params_le->ssid_le, 0, sizeof(params_le->ssid_le));
+
+       /* if request is null exit so it will be all channel broadcast scan */
+       if (!request)
+               return;
+
+       n_ssids = request->n_ssids;
+       n_channels = request->n_channels;
+       /* Copy channel array if applicable */
+       WL_SCAN("### List of channelspecs to scan ### %d\n", n_channels);
+       if (n_channels > 0) {
+               for (i = 0; i < n_channels; i++) {
+                       chanspec = 0;
+                       req_channel = request->channels[i];
+                       channel = ieee80211_frequency_to_channel(
+                                       req_channel->center_freq);
+                       if (req_channel->band == IEEE80211_BAND_2GHZ)
+                               chanspec |= WL_CHANSPEC_BAND_2G;
+                       else
+                               chanspec |= WL_CHANSPEC_BAND_5G;
+
+                       if (req_channel->flags & IEEE80211_CHAN_NO_HT40) {
+                               chanspec |= WL_CHANSPEC_BW_20;
+                               chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+                       } else {
+                               chanspec |= WL_CHANSPEC_BW_40;
+                               if (req_channel->flags &
+                                               IEEE80211_CHAN_NO_HT40PLUS)
+                                       chanspec |= WL_CHANSPEC_CTL_SB_LOWER;
+                               else
+                                       chanspec |= WL_CHANSPEC_CTL_SB_UPPER;
+                       }
+
+                       chanspec |= (channel & WL_CHANSPEC_CHAN_MASK);
+                       WL_SCAN("Chan : %d, Channel spec: %x\n",
+                               channel, chanspec);
+                       params_le->channel_list[i] = cpu_to_le16(chanspec);
+               }
+       } else {
+               WL_SCAN("Scanning all channels\n");
+       }
+       /* Copy ssid array if applicable */
+       WL_SCAN("### List of SSIDs to scan ### %d\n", n_ssids);
+       if (n_ssids > 0) {
+               offset = offsetof(struct brcmf_scan_params_le, channel_list) +
+                               n_channels * sizeof(u16);
+               offset = roundup(offset, sizeof(u32));
+               ptr = (char *)params_le + offset;
+               for (i = 0; i < n_ssids; i++) {
+                       memset(&ssid_le, 0, sizeof(ssid_le));
+                       ssid_le.SSID_len =
+                                       cpu_to_le32(request->ssids[i].ssid_len);
+                       memcpy(ssid_le.SSID, request->ssids[i].ssid,
+                              request->ssids[i].ssid_len);
+                       if (!ssid_le.SSID_len)
+                               WL_SCAN("%d: Broadcast scan\n", i);
+                       else
+                               WL_SCAN("%d: scan for  %s size =%d\n", i,
+                                       ssid_le.SSID, ssid_le.SSID_len);
+                       memcpy(ptr, &ssid_le, sizeof(ssid_le));
+                       ptr += sizeof(ssid_le);
+               }
+       } else {
+               WL_SCAN("Broadcast scan %p\n", request->ssids);
+               if ((request->ssids) && request->ssids->ssid_len) {
+                       WL_SCAN("SSID %s len=%d\n", params_le->ssid_le.SSID,
+                               request->ssids->ssid_len);
+                       params_le->ssid_le.SSID_len =
+                               cpu_to_le32(request->ssids->ssid_len);
+                       memcpy(&params_le->ssid_le.SSID, request->ssids->ssid,
+                               request->ssids->ssid_len);
+               }
+       }
+       /* Adding mask to channel numbers */
+       params_le->channel_num =
+               cpu_to_le32((n_ssids << BRCMF_SCAN_PARAMS_NSSID_SHIFT) |
+                       (n_channels & BRCMF_SCAN_PARAMS_COUNT_MASK));
+}
+
+static s32
+brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
+                           struct net_device *ndev,
+                           bool aborted, bool fw_abort)
+{
+       struct brcmf_scan_params_le params_le;
+       struct cfg80211_scan_request *scan_request;
+       s32 err = 0;
+
+       WL_SCAN("Enter\n");
+
+       /* clear scan request, because the FW abort can cause a second call */
+       /* to this functon and might cause a double cfg80211_scan_done      */
+       scan_request = cfg->scan_request;
+       cfg->scan_request = NULL;
+
+       if (timer_pending(&cfg->escan_timeout))
+               del_timer_sync(&cfg->escan_timeout);
+
+       if (fw_abort) {
+               /* Do a scan abort to stop the driver's scan engine */
+               WL_SCAN("ABORT scan in firmware\n");
+               memset(&params_le, 0, sizeof(params_le));
+               memcpy(params_le.bssid, ether_bcast, ETH_ALEN);
+               params_le.bss_type = DOT11_BSSTYPE_ANY;
+               params_le.scan_type = 0;
+               params_le.channel_num = cpu_to_le32(1);
+               params_le.nprobes = cpu_to_le32(1);
+               params_le.active_time = cpu_to_le32(-1);
+               params_le.passive_time = cpu_to_le32(-1);
+               params_le.home_time = cpu_to_le32(-1);
+               /* Scan is aborted by setting channel_list[0] to -1 */
+               params_le.channel_list[0] = cpu_to_le16(-1);
+               /* E-Scan (or anyother type) can be aborted by SCAN */
+               err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN, &params_le,
+                       sizeof(params_le));
+               if (err)
+                       WL_ERR("Scan abort  failed\n");
+       }
+       /*
+        * e-scan can be initiated by scheduled scan
+        * which takes precedence.
+        */
+       if (cfg->sched_escan) {
+               WL_SCAN("scheduled scan completed\n");
+               cfg->sched_escan = false;
+               if (!aborted)
+                       cfg80211_sched_scan_results(cfg_to_wiphy(cfg));
+               brcmf_set_mpc(ndev, 1);
+       } else if (scan_request) {
+               WL_SCAN("ESCAN Completed scan: %s\n",
+                               aborted ? "Aborted" : "Done");
+               cfg80211_scan_done(scan_request, aborted);
+               brcmf_set_mpc(ndev, 1);
+       }
+       if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg->status)) {
+               WL_ERR("Scan complete while device not scanning\n");
+               return -EPERM;
+       }
+
+       return err;
+}
+
+static s32
+brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct net_device *ndev,
+               struct cfg80211_scan_request *request, u16 action)
+{
+       s32 params_size = BRCMF_SCAN_PARAMS_FIXED_SIZE +
+                         offsetof(struct brcmf_escan_params_le, params_le);
+       struct brcmf_escan_params_le *params;
+       s32 err = 0;
+
+       WL_SCAN("E-SCAN START\n");
+
+       if (request != NULL) {
+               /* Allocate space for populating ssids in struct */
+               params_size += sizeof(u32) * ((request->n_channels + 1) / 2);
+
+               /* Allocate space for populating ssids in struct */
+               params_size += sizeof(struct brcmf_ssid) * request->n_ssids;
+       }
+
+       params = kzalloc(params_size, GFP_KERNEL);
+       if (!params) {
+               err = -ENOMEM;
+               goto exit;
+       }
+       BUG_ON(params_size + sizeof("escan") >= BRCMF_DCMD_MEDLEN);
+       brcmf_escan_prep(&params->params_le, request);
+       params->version = cpu_to_le32(BRCMF_ESCAN_REQ_VERSION);
+       params->action = cpu_to_le16(action);
+       params->sync_id = cpu_to_le16(0x1234);
+
+       err = brcmf_dev_iovar_setbuf(ndev, "escan", params, params_size,
+                       cfg->escan_ioctl_buf, BRCMF_DCMD_MEDLEN);
+       if (err) {
+               if (err == -EBUSY)
+                       WL_INFO("system busy : escan canceled\n");
+               else
+                       WL_ERR("error (%d)\n", err);
+       }
+
+       kfree(params);
+exit:
+       return err;
+}
+
+static s32
+brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy,
+              struct net_device *ndev, struct cfg80211_scan_request *request)
+{
+       s32 err;
+       __le32 passive_scan;
+       struct brcmf_scan_results *results;
+
+       WL_SCAN("Enter\n");
+       cfg->escan_info.ndev = ndev;
+       cfg->escan_info.wiphy = wiphy;
+       cfg->escan_info.escan_state = WL_ESCAN_STATE_SCANNING;
+       passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
+       err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN,
+                       &passive_scan, sizeof(passive_scan));
+       if (err) {
+               WL_ERR("error (%d)\n", err);
+               return err;
+       }
+       brcmf_set_mpc(ndev, 0);
+       results = (struct brcmf_scan_results *)cfg->escan_info.escan_buf;
+       results->version = 0;
+       results->count = 0;
+       results->buflen = WL_ESCAN_RESULTS_FIXED_SIZE;
+
+       err = brcmf_run_escan(cfg, ndev, request, WL_ESCAN_ACTION_START);
+       if (err)
+               brcmf_set_mpc(ndev, 1);
+       return err;
+}
+
+static s32
+brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
+                    struct cfg80211_scan_request *request,
+                    struct cfg80211_ssid *this_ssid)
+{
+       struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
+       struct cfg80211_ssid *ssids;
+       struct brcmf_cfg80211_scan_req *sr = cfg->scan_req_int;
+       __le32 passive_scan;
+       bool escan_req;
+       bool spec_scan;
+       s32 err;
+       u32 SSID_len;
+
+       WL_SCAN("START ESCAN\n");
+
+       if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
+               WL_ERR("Scanning already : status (%lu)\n", cfg->status);
+               return -EAGAIN;
+       }
+       if (test_bit(WL_STATUS_SCAN_ABORTING, &cfg->status)) {
+               WL_ERR("Scanning being aborted : status (%lu)\n",
+                      cfg->status);
+               return -EAGAIN;
+       }
+       if (test_bit(WL_STATUS_CONNECTING, &cfg->status)) {
+               WL_ERR("Connecting : status (%lu)\n",
+                      cfg->status);
+               return -EAGAIN;
+       }
+
+       /* Arm scan timeout timer */
+       mod_timer(&cfg->escan_timeout, jiffies +
+                       WL_ESCAN_TIMER_INTERVAL_MS * HZ / 1000);
+
+       escan_req = false;
+       if (request) {
+               /* scan bss */
+               ssids = request->ssids;
+               escan_req = true;
+       } else {
+               /* scan in ibss */
+               /* we don't do escan in ibss */
+               ssids = this_ssid;
+       }
+
+       cfg->scan_request = request;
+       set_bit(WL_STATUS_SCANNING, &cfg->status);
+       if (escan_req) {
+               err = brcmf_do_escan(cfg, wiphy, ndev, request);
+               if (!err)
+                       return err;
+               else
+                       goto scan_out;
+       } else {
+               WL_SCAN("ssid \"%s\", ssid_len (%d)\n",
+                      ssids->ssid, ssids->ssid_len);
+               memset(&sr->ssid_le, 0, sizeof(sr->ssid_le));
+               SSID_len = min_t(u8, sizeof(sr->ssid_le.SSID), ssids->ssid_len);
+               sr->ssid_le.SSID_len = cpu_to_le32(0);
+               spec_scan = false;
+               if (SSID_len) {
+                       memcpy(sr->ssid_le.SSID, ssids->ssid, SSID_len);
+                       sr->ssid_le.SSID_len = cpu_to_le32(SSID_len);
+                       spec_scan = true;
+               } else
+                       WL_SCAN("Broadcast scan\n");
+
+               passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
+               err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN,
+                               &passive_scan, sizeof(passive_scan));
+               if (err) {
+                       WL_ERR("WLC_SET_PASSIVE_SCAN error (%d)\n", err);
+                       goto scan_out;
+               }
+               brcmf_set_mpc(ndev, 0);
+               err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN, &sr->ssid_le,
+                                     sizeof(sr->ssid_le));
+               if (err) {
+                       if (err == -EBUSY)
+                               WL_INFO("BUSY: scan for \"%s\" canceled\n",
+                                       sr->ssid_le.SSID);
+                       else
+                               WL_ERR("WLC_SCAN error (%d)\n", err);
+
+                       brcmf_set_mpc(ndev, 1);
+                       goto scan_out;
+               }
+       }
+
+       return 0;
+
+scan_out:
+       clear_bit(WL_STATUS_SCANNING, &cfg->status);
+       if (timer_pending(&cfg->escan_timeout))
+               del_timer_sync(&cfg->escan_timeout);
+       cfg->scan_request = NULL;
        return err;
 }
 
@@ -697,6 +1186,7 @@ brcmf_cfg80211_scan(struct wiphy *wiphy,
                 struct cfg80211_scan_request *request)
 {
        struct net_device *ndev = request->wdev->netdev;
+       struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
        s32 err = 0;
 
        WL_TRACE("Enter\n");
@@ -704,7 +1194,11 @@ brcmf_cfg80211_scan(struct wiphy *wiphy,
        if (!check_sys_up(wiphy))
                return -EIO;
 
-       err = __brcmf_cfg80211_scan(wiphy, ndev, request, NULL);
+       if (cfg->iscan_on)
+               err = brcmf_cfg80211_iscan(wiphy, ndev, request, NULL);
+       else if (cfg->escan_on)
+               err = brcmf_cfg80211_escan(wiphy, ndev, request, NULL);
+
        if (err)
                WL_ERR("scan error (%d)\n", err);
 
@@ -749,8 +1243,8 @@ static s32 brcmf_set_retry(struct net_device *ndev, u32 retry, bool l)
 
 static s32 brcmf_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
-       struct net_device *ndev = cfg_to_ndev(cfg_priv);
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct net_device *ndev = cfg_to_ndev(cfg);
        s32 err = 0;
 
        WL_TRACE("Enter\n");
@@ -758,30 +1252,30 @@ static s32 brcmf_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
                return -EIO;
 
        if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
-           (cfg_priv->conf->rts_threshold != wiphy->rts_threshold)) {
-               cfg_priv->conf->rts_threshold = wiphy->rts_threshold;
-               err = brcmf_set_rts(ndev, cfg_priv->conf->rts_threshold);
+           (cfg->conf->rts_threshold != wiphy->rts_threshold)) {
+               cfg->conf->rts_threshold = wiphy->rts_threshold;
+               err = brcmf_set_rts(ndev, cfg->conf->rts_threshold);
                if (!err)
                        goto done;
        }
        if (changed & WIPHY_PARAM_FRAG_THRESHOLD &&
-           (cfg_priv->conf->frag_threshold != wiphy->frag_threshold)) {
-               cfg_priv->conf->frag_threshold = wiphy->frag_threshold;
-               err = brcmf_set_frag(ndev, cfg_priv->conf->frag_threshold);
+           (cfg->conf->frag_threshold != wiphy->frag_threshold)) {
+               cfg->conf->frag_threshold = wiphy->frag_threshold;
+               err = brcmf_set_frag(ndev, cfg->conf->frag_threshold);
                if (!err)
                        goto done;
        }
        if (changed & WIPHY_PARAM_RETRY_LONG
-           && (cfg_priv->conf->retry_long != wiphy->retry_long)) {
-               cfg_priv->conf->retry_long = wiphy->retry_long;
-               err = brcmf_set_retry(ndev, cfg_priv->conf->retry_long, true);
+           && (cfg->conf->retry_long != wiphy->retry_long)) {
+               cfg->conf->retry_long = wiphy->retry_long;
+               err = brcmf_set_retry(ndev, cfg->conf->retry_long, true);
                if (!err)
                        goto done;
        }
        if (changed & WIPHY_PARAM_RETRY_SHORT
-           && (cfg_priv->conf->retry_short != wiphy->retry_short)) {
-               cfg_priv->conf->retry_short = wiphy->retry_short;
-               err = brcmf_set_retry(ndev, cfg_priv->conf->retry_short, false);
+           && (cfg->conf->retry_short != wiphy->retry_short)) {
+               cfg->conf->retry_short = wiphy->retry_short;
+               err = brcmf_set_retry(ndev, cfg->conf->retry_short, false);
                if (!err)
                        goto done;
        }
@@ -791,74 +1285,19 @@ done:
        return err;
 }
 
-static void *brcmf_read_prof(struct brcmf_cfg80211_priv *cfg_priv, s32 item)
+static void brcmf_init_prof(struct brcmf_cfg80211_profile *prof)
 {
-       switch (item) {
-       case WL_PROF_SEC:
-               return &cfg_priv->profile->sec;
-       case WL_PROF_BSSID:
-               return &cfg_priv->profile->bssid;
-       case WL_PROF_SSID:
-               return &cfg_priv->profile->ssid;
-       }
-       WL_ERR("invalid item (%d)\n", item);
-       return NULL;
+       memset(prof, 0, sizeof(*prof));
 }
 
-static s32
-brcmf_update_prof(struct brcmf_cfg80211_priv *cfg_priv,
-                 const struct brcmf_event_msg *e, void *data, s32 item)
+static void brcmf_ch_to_chanspec(int ch, struct brcmf_join_params *join_params,
+       size_t *join_params_size)
 {
-       s32 err = 0;
-       struct brcmf_ssid *ssid;
+       u16 chanspec = 0;
 
-       switch (item) {
-       case WL_PROF_SSID:
-               ssid = (struct brcmf_ssid *) data;
-               memset(cfg_priv->profile->ssid.SSID, 0,
-                      sizeof(cfg_priv->profile->ssid.SSID));
-               memcpy(cfg_priv->profile->ssid.SSID,
-                      ssid->SSID, ssid->SSID_len);
-               cfg_priv->profile->ssid.SSID_len = ssid->SSID_len;
-               break;
-       case WL_PROF_BSSID:
-               if (data)
-                       memcpy(cfg_priv->profile->bssid, data, ETH_ALEN);
-               else
-                       memset(cfg_priv->profile->bssid, 0, ETH_ALEN);
-               break;
-       case WL_PROF_SEC:
-               memcpy(&cfg_priv->profile->sec, data,
-                      sizeof(cfg_priv->profile->sec));
-               break;
-       case WL_PROF_BEACONINT:
-               cfg_priv->profile->beacon_interval = *(u16 *)data;
-               break;
-       case WL_PROF_DTIMPERIOD:
-               cfg_priv->profile->dtim_period = *(u8 *)data;
-               break;
-       default:
-               WL_ERR("unsupported item (%d)\n", item);
-               err = -EOPNOTSUPP;
-               break;
-       }
-
-       return err;
-}
-
-static void brcmf_init_prof(struct brcmf_cfg80211_profile *prof)
-{
-       memset(prof, 0, sizeof(*prof));
-}
-
-static void brcmf_ch_to_chanspec(int ch, struct brcmf_join_params *join_params,
-       size_t *join_params_size)
-{
-       u16 chanspec = 0;
-
-       if (ch != 0) {
-               if (ch <= CH_MAX_2G_CHANNEL)
-                       chanspec |= WL_CHANSPEC_BAND_2G;
+       if (ch != 0) {
+               if (ch <= CH_MAX_2G_CHANNEL)
+                       chanspec |= WL_CHANSPEC_BAND_2G;
                else
                        chanspec |= WL_CHANSPEC_BAND_5G;
 
@@ -878,20 +1317,20 @@ static void brcmf_ch_to_chanspec(int ch, struct brcmf_join_params *join_params,
        }
 }
 
-static void brcmf_link_down(struct brcmf_cfg80211_priv *cfg_priv)
+static void brcmf_link_down(struct brcmf_cfg80211_info *cfg)
 {
        struct net_device *ndev = NULL;
        s32 err = 0;
 
        WL_TRACE("Enter\n");
 
-       if (cfg_priv->link_up) {
-               ndev = cfg_to_ndev(cfg_priv);
+       if (cfg->link_up) {
+               ndev = cfg_to_ndev(cfg);
                WL_INFO("Call WLC_DISASSOC to stop excess roaming\n ");
                err = brcmf_exec_dcmd(ndev, BRCMF_C_DISASSOC, NULL, 0);
                if (err)
                        WL_ERR("WLC_DISASSOC failed (%d)\n", err);
-               cfg_priv->link_up = false;
+               cfg->link_up = false;
        }
        WL_TRACE("Exit\n");
 }
@@ -900,13 +1339,13 @@ static s32
 brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
                      struct cfg80211_ibss_params *params)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct brcmf_cfg80211_profile *profile = cfg->profile;
        struct brcmf_join_params join_params;
        size_t join_params_size = 0;
        s32 err = 0;
        s32 wsec = 0;
        s32 bcnprd;
-       struct brcmf_ssid ssid;
 
        WL_TRACE("Enter\n");
        if (!check_sys_up(wiphy))
@@ -919,7 +1358,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
                return -EOPNOTSUPP;
        }
 
-       set_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
+       set_bit(WL_STATUS_CONNECTING, &cfg->status);
 
        if (params->bssid)
                WL_CONN("BSSID: %pM\n", params->bssid);
@@ -982,40 +1421,38 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
        memset(&join_params, 0, sizeof(struct brcmf_join_params));
 
        /* SSID */
-       ssid.SSID_len = min_t(u32, params->ssid_len, 32);
-       memcpy(ssid.SSID, params->ssid, ssid.SSID_len);
-       memcpy(join_params.ssid_le.SSID, params->ssid, ssid.SSID_len);
-       join_params.ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len);
+       profile->ssid.SSID_len = min_t(u32, params->ssid_len, 32);
+       memcpy(profile->ssid.SSID, params->ssid, profile->ssid.SSID_len);
+       memcpy(join_params.ssid_le.SSID, params->ssid, profile->ssid.SSID_len);
+       join_params.ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
        join_params_size = sizeof(join_params.ssid_le);
-       brcmf_update_prof(cfg_priv, NULL, &ssid, WL_PROF_SSID);
 
        /* BSSID */
        if (params->bssid) {
                memcpy(join_params.params_le.bssid, params->bssid, ETH_ALEN);
                join_params_size = sizeof(join_params.ssid_le) +
                                   BRCMF_ASSOC_PARAMS_FIXED_SIZE;
+               memcpy(profile->bssid, params->bssid, ETH_ALEN);
        } else {
                memcpy(join_params.params_le.bssid, ether_bcast, ETH_ALEN);
+               memset(profile->bssid, 0, ETH_ALEN);
        }
 
-       brcmf_update_prof(cfg_priv, NULL,
-                         &join_params.params_le.bssid, WL_PROF_BSSID);
-
        /* Channel */
        if (params->channel) {
                u32 target_channel;
 
-               cfg_priv->channel =
+               cfg->channel =
                        ieee80211_frequency_to_channel(
                                params->channel->center_freq);
                if (params->channel_fixed) {
                        /* adding chanspec */
-                       brcmf_ch_to_chanspec(cfg_priv->channel,
+                       brcmf_ch_to_chanspec(cfg->channel,
                                &join_params, &join_params_size);
                }
 
                /* set channel for starter */
-               target_channel = cfg_priv->channel;
+               target_channel = cfg->channel;
                err = brcmf_exec_dcmd_u32(ndev, BRCM_SET_CHANNEL,
                                          &target_channel);
                if (err) {
@@ -1023,9 +1460,9 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
                        goto done;
                }
        } else
-               cfg_priv->channel = 0;
+               cfg->channel = 0;
 
-       cfg_priv->ibss_starter = false;
+       cfg->ibss_starter = false;
 
 
        err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID,
@@ -1037,7 +1474,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
 
 done:
        if (err)
-               clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
+               clear_bit(WL_STATUS_CONNECTING, &cfg->status);
        WL_TRACE("Exit\n");
        return err;
 }
@@ -1045,14 +1482,14 @@ done:
 static s32
 brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
        s32 err = 0;
 
        WL_TRACE("Enter\n");
        if (!check_sys_up(wiphy))
                return -EIO;
 
-       brcmf_link_down(cfg_priv);
+       brcmf_link_down(cfg);
 
        WL_TRACE("Exit\n");
 
@@ -1062,7 +1499,8 @@ brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
 static s32 brcmf_set_wpa_version(struct net_device *ndev,
                                 struct cfg80211_connect_params *sme)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+       struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
+       struct brcmf_cfg80211_profile *profile = cfg->profile;
        struct brcmf_cfg80211_security *sec;
        s32 val = 0;
        s32 err = 0;
@@ -1079,7 +1517,7 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev,
                WL_ERR("set wpa_auth failed (%d)\n", err);
                return err;
        }
-       sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
+       sec = &profile->sec;
        sec->wpa_versions = sme->crypto.wpa_versions;
        return err;
 }
@@ -1087,7 +1525,8 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev,
 static s32 brcmf_set_auth_type(struct net_device *ndev,
                               struct cfg80211_connect_params *sme)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+       struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
+       struct brcmf_cfg80211_profile *profile = cfg->profile;
        struct brcmf_cfg80211_security *sec;
        s32 val = 0;
        s32 err = 0;
@@ -1118,7 +1557,7 @@ static s32 brcmf_set_auth_type(struct net_device *ndev,
                WL_ERR("set auth failed (%d)\n", err);
                return err;
        }
-       sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
+       sec = &profile->sec;
        sec->auth_type = sme->auth_type;
        return err;
 }
@@ -1127,7 +1566,8 @@ static s32
 brcmf_set_set_cipher(struct net_device *ndev,
                     struct cfg80211_connect_params *sme)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+       struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
+       struct brcmf_cfg80211_profile *profile = cfg->profile;
        struct brcmf_cfg80211_security *sec;
        s32 pval = 0;
        s32 gval = 0;
@@ -1183,7 +1623,7 @@ brcmf_set_set_cipher(struct net_device *ndev,
                return err;
        }
 
-       sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
+       sec = &profile->sec;
        sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0];
        sec->cipher_group = sme->crypto.cipher_group;
 
@@ -1193,7 +1633,8 @@ brcmf_set_set_cipher(struct net_device *ndev,
 static s32
 brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+       struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
+       struct brcmf_cfg80211_profile *profile = cfg->profile;
        struct brcmf_cfg80211_security *sec;
        s32 val = 0;
        s32 err = 0;
@@ -1239,74 +1680,76 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
                        return err;
                }
        }
-       sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
+       sec = &profile->sec;
        sec->wpa_auth = sme->crypto.akm_suites[0];
 
        return err;
 }
 
 static s32
-brcmf_set_wep_sharedkey(struct net_device *ndev,
-                    struct cfg80211_connect_params *sme)
+brcmf_set_sharedkey(struct net_device *ndev,
+                   struct cfg80211_connect_params *sme)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+       struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
+       struct brcmf_cfg80211_profile *profile = cfg->profile;
        struct brcmf_cfg80211_security *sec;
        struct brcmf_wsec_key key;
        s32 val;
        s32 err = 0;
+       s32 bssidx;
 
        WL_CONN("key len (%d)\n", sme->key_len);
 
        if (sme->key_len == 0)
                return 0;
 
-       sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
+       sec = &profile->sec;
        WL_CONN("wpa_versions 0x%x cipher_pairwise 0x%x\n",
                sec->wpa_versions, sec->cipher_pairwise);
 
        if (sec->wpa_versions & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2))
                return 0;
 
-       if (sec->cipher_pairwise &
-           (WLAN_CIPHER_SUITE_WEP40 | WLAN_CIPHER_SUITE_WEP104)) {
-               memset(&key, 0, sizeof(key));
-               key.len = (u32) sme->key_len;
-               key.index = (u32) sme->key_idx;
-               if (key.len > sizeof(key.data)) {
-                       WL_ERR("Too long key length (%u)\n", key.len);
-                       return -EINVAL;
-               }
-               memcpy(key.data, sme->key, key.len);
-               key.flags = BRCMF_PRIMARY_KEY;
-               switch (sec->cipher_pairwise) {
-               case WLAN_CIPHER_SUITE_WEP40:
-                       key.algo = CRYPTO_ALGO_WEP1;
-                       break;
-               case WLAN_CIPHER_SUITE_WEP104:
-                       key.algo = CRYPTO_ALGO_WEP128;
-                       break;
-               default:
-                       WL_ERR("Invalid algorithm (%d)\n",
-                              sme->crypto.ciphers_pairwise[0]);
-                       return -EINVAL;
-               }
-               /* Set the new key/index */
-               WL_CONN("key length (%d) key index (%d) algo (%d)\n",
-                       key.len, key.index, key.algo);
-               WL_CONN("key \"%s\"\n", key.data);
-               err = send_key_to_dongle(ndev, &key);
-               if (err)
-                       return err;
+       if (!(sec->cipher_pairwise &
+           (WLAN_CIPHER_SUITE_WEP40 | WLAN_CIPHER_SUITE_WEP104)))
+               return 0;
 
-               if (sec->auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM) {
-                       WL_CONN("set auth_type to shared key\n");
-                       val = 1;        /* shared key */
-                       err = brcmf_dev_intvar_set(ndev, "auth", val);
-                       if (err) {
-                               WL_ERR("set auth failed (%d)\n", err);
-                               return err;
-                       }
-               }
+       memset(&key, 0, sizeof(key));
+       key.len = (u32) sme->key_len;
+       key.index = (u32) sme->key_idx;
+       if (key.len > sizeof(key.data)) {
+               WL_ERR("Too long key length (%u)\n", key.len);
+               return -EINVAL;
+       }
+       memcpy(key.data, sme->key, key.len);
+       key.flags = BRCMF_PRIMARY_KEY;
+       switch (sec->cipher_pairwise) {
+       case WLAN_CIPHER_SUITE_WEP40:
+               key.algo = CRYPTO_ALGO_WEP1;
+               break;
+       case WLAN_CIPHER_SUITE_WEP104:
+               key.algo = CRYPTO_ALGO_WEP128;
+               break;
+       default:
+               WL_ERR("Invalid algorithm (%d)\n",
+                      sme->crypto.ciphers_pairwise[0]);
+               return -EINVAL;
+       }
+       /* Set the new key/index */
+       WL_CONN("key length (%d) key index (%d) algo (%d)\n",
+               key.len, key.index, key.algo);
+       WL_CONN("key \"%s\"\n", key.data);
+       bssidx = brcmf_find_bssidx(cfg, ndev);
+       err = send_key_to_dongle(cfg, bssidx, ndev, &key);
+       if (err)
+               return err;
+
+       if (sec->auth_type == NL80211_AUTHTYPE_SHARED_KEY) {
+               WL_CONN("set auth_type to shared key\n");
+               val = WL_AUTH_SHARED_KEY;       /* shared key */
+               err = brcmf_dev_intvar_set_bsscfg(ndev, "auth", val, bssidx);
+               if (err)
+                       WL_ERR("set auth failed (%d)\n", err);
        }
        return err;
 }
@@ -1315,7 +1758,8 @@ static s32
 brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
                    struct cfg80211_connect_params *sme)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct brcmf_cfg80211_profile *profile = cfg->profile;
        struct ieee80211_channel *chan = sme->channel;
        struct brcmf_join_params join_params;
        size_t join_params_size;
@@ -1332,15 +1776,15 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
                return -EOPNOTSUPP;
        }
 
-       set_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
+       set_bit(WL_STATUS_CONNECTING, &cfg->status);
 
        if (chan) {
-               cfg_priv->channel =
+               cfg->channel =
                        ieee80211_frequency_to_channel(chan->center_freq);
                WL_CONN("channel (%d), center_req (%d)\n",
-                               cfg_priv->channel, chan->center_freq);
+                               cfg->channel, chan->center_freq);
        } else
-               cfg_priv->channel = 0;
+               cfg->channel = 0;
 
        WL_INFO("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len);
 
@@ -1368,20 +1812,20 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
                goto done;
        }
 
-       err = brcmf_set_wep_sharedkey(ndev, sme);
+       err = brcmf_set_sharedkey(ndev, sme);
        if (err) {
-               WL_ERR("brcmf_set_wep_sharedkey failed (%d)\n", err);
+               WL_ERR("brcmf_set_sharedkey failed (%d)\n", err);
                goto done;
        }
 
        memset(&join_params, 0, sizeof(join_params));
        join_params_size = sizeof(join_params.ssid_le);
 
-       ssid.SSID_len = min_t(u32, sizeof(ssid.SSID), (u32)sme->ssid_len);
-       memcpy(&join_params.ssid_le.SSID, sme->ssid, ssid.SSID_len);
-       memcpy(&ssid.SSID, sme->ssid, ssid.SSID_len);
-       join_params.ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len);
-       brcmf_update_prof(cfg_priv, NULL, &ssid, WL_PROF_SSID);
+       profile->ssid.SSID_len = min_t(u32,
+                                      sizeof(ssid.SSID), (u32)sme->ssid_len);
+       memcpy(&join_params.ssid_le.SSID, sme->ssid, profile->ssid.SSID_len);
+       memcpy(&profile->ssid.SSID, sme->ssid, profile->ssid.SSID_len);
+       join_params.ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
 
        memcpy(join_params.params_le.bssid, ether_bcast, ETH_ALEN);
 
@@ -1389,7 +1833,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
                WL_CONN("ssid \"%s\", len (%d)\n",
                       ssid.SSID, ssid.SSID_len);
 
-       brcmf_ch_to_chanspec(cfg_priv->channel,
+       brcmf_ch_to_chanspec(cfg->channel,
                             &join_params, &join_params_size);
        err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID,
                           &join_params, join_params_size);
@@ -1398,7 +1842,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
 
 done:
        if (err)
-               clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
+               clear_bit(WL_STATUS_CONNECTING, &cfg->status);
        WL_TRACE("Exit\n");
        return err;
 }
@@ -1407,7 +1851,8 @@ static s32
 brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
                       u16 reason_code)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct brcmf_cfg80211_profile *profile = cfg->profile;
        struct brcmf_scb_val_le scbval;
        s32 err = 0;
 
@@ -1415,16 +1860,16 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
        if (!check_sys_up(wiphy))
                return -EIO;
 
-       clear_bit(WL_STATUS_CONNECTED, &cfg_priv->status);
+       clear_bit(WL_STATUS_CONNECTED, &cfg->status);
 
-       memcpy(&scbval.ea, brcmf_read_prof(cfg_priv, WL_PROF_BSSID), ETH_ALEN);
+       memcpy(&scbval.ea, &profile->bssid, ETH_ALEN);
        scbval.val = cpu_to_le32(reason_code);
        err = brcmf_exec_dcmd(ndev, BRCMF_C_DISASSOC, &scbval,
                              sizeof(struct brcmf_scb_val_le));
        if (err)
                WL_ERR("error (%d)\n", err);
 
-       cfg_priv->link_up = false;
+       cfg->link_up = false;
 
        WL_TRACE("Exit\n");
        return err;
@@ -1435,8 +1880,8 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
                            enum nl80211_tx_power_setting type, s32 mbm)
 {
 
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
-       struct net_device *ndev = cfg_to_ndev(cfg_priv);
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct net_device *ndev = cfg_to_ndev(cfg);
        u16 txpwrmw;
        s32 err = 0;
        s32 disable = 0;
@@ -1472,7 +1917,7 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
                        (s32) (brcmf_mw_to_qdbm(txpwrmw)));
        if (err)
                WL_ERR("qtxpower error (%d)\n", err);
-       cfg_priv->conf->tx_power = dbm;
+       cfg->conf->tx_power = dbm;
 
 done:
        WL_TRACE("Exit\n");
@@ -1481,8 +1926,8 @@ done:
 
 static s32 brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
-       struct net_device *ndev = cfg_to_ndev(cfg_priv);
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct net_device *ndev = cfg_to_ndev(cfg);
        s32 txpwrdbm;
        u8 result;
        s32 err = 0;
@@ -1509,16 +1954,19 @@ static s32
 brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *ndev,
                               u8 key_idx, bool unicast, bool multicast)
 {
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
        u32 index;
        u32 wsec;
        s32 err = 0;
+       s32 bssidx;
 
        WL_TRACE("Enter\n");
        WL_CONN("key index (%d)\n", key_idx);
        if (!check_sys_up(wiphy))
                return -EIO;
 
-       err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_WSEC, &wsec);
+       bssidx = brcmf_find_bssidx(cfg, ndev);
+       err = brcmf_dev_intvar_get_bsscfg(ndev, "wsec", &wsec, bssidx);
        if (err) {
                WL_ERR("WLC_GET_WSEC error (%d)\n", err);
                goto done;
@@ -1541,9 +1989,11 @@ static s32
 brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
              u8 key_idx, const u8 *mac_addr, struct key_params *params)
 {
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
        struct brcmf_wsec_key key;
        struct brcmf_wsec_key_le key_le;
        s32 err = 0;
+       s32 bssidx;
 
        memset(&key, 0, sizeof(key));
        key.index = (u32) key_idx;
@@ -1552,12 +2002,13 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
        if (!is_multicast_ether_addr(mac_addr))
                memcpy((char *)&key.ea, (void *)mac_addr, ETH_ALEN);
        key.len = (u32) params->key_len;
+       bssidx = brcmf_find_bssidx(cfg, ndev);
        /* check for key index change */
        if (key.len == 0) {
                /* key delete */
-               err = send_key_to_dongle(ndev, &key);
+               err = send_key_to_dongle(cfg, bssidx, ndev, &key);
                if (err)
-                       return err;
+                       WL_ERR("key delete error (%d)\n", err);
        } else {
                if (key.len > sizeof(key.data)) {
                        WL_ERR("Invalid key length (%d)\n", key.len);
@@ -1613,12 +2064,12 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
                convert_key_from_CPU(&key, &key_le);
 
                brcmf_netdev_wait_pend8021x(ndev);
-               err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_KEY, &key_le,
-                                     sizeof(key_le));
-               if (err) {
-                       WL_ERR("WLC_SET_KEY error (%d)\n", err);
-                       return err;
-               }
+               err  = brcmf_dev_iovar_setbuf_bsscfg(ndev, "wsec_key", &key_le,
+                                                    sizeof(key_le),
+                                                    cfg->extra_buf,
+                                                    WL_EXTRA_BUF_MAX, bssidx);
+               if (err)
+                       WL_ERR("wsec_key error (%d)\n", err);
        }
        return err;
 }
@@ -1628,11 +2079,13 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
                    u8 key_idx, bool pairwise, const u8 *mac_addr,
                    struct key_params *params)
 {
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
        struct brcmf_wsec_key key;
        s32 val;
        s32 wsec;
        s32 err = 0;
        u8 keybuf[8];
+       s32 bssidx;
 
        WL_TRACE("Enter\n");
        WL_CONN("key index (%d)\n", key_idx);
@@ -1659,25 +2112,33 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
        switch (params->cipher) {
        case WLAN_CIPHER_SUITE_WEP40:
                key.algo = CRYPTO_ALGO_WEP1;
+               val = WEP_ENABLED;
                WL_CONN("WLAN_CIPHER_SUITE_WEP40\n");
                break;
        case WLAN_CIPHER_SUITE_WEP104:
                key.algo = CRYPTO_ALGO_WEP128;
+               val = WEP_ENABLED;
                WL_CONN("WLAN_CIPHER_SUITE_WEP104\n");
                break;
        case WLAN_CIPHER_SUITE_TKIP:
-               memcpy(keybuf, &key.data[24], sizeof(keybuf));
-               memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
-               memcpy(&key.data[16], keybuf, sizeof(keybuf));
+               if (cfg->conf->mode != WL_MODE_AP) {
+                       WL_CONN("Swapping key\n");
+                       memcpy(keybuf, &key.data[24], sizeof(keybuf));
+                       memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
+                       memcpy(&key.data[16], keybuf, sizeof(keybuf));
+               }
                key.algo = CRYPTO_ALGO_TKIP;
+               val = TKIP_ENABLED;
                WL_CONN("WLAN_CIPHER_SUITE_TKIP\n");
                break;
        case WLAN_CIPHER_SUITE_AES_CMAC:
                key.algo = CRYPTO_ALGO_AES_CCM;
+               val = AES_ENABLED;
                WL_CONN("WLAN_CIPHER_SUITE_AES_CMAC\n");
                break;
        case WLAN_CIPHER_SUITE_CCMP:
                key.algo = CRYPTO_ALGO_AES_CCM;
+               val = AES_ENABLED;
                WL_CONN("WLAN_CIPHER_SUITE_CCMP\n");
                break;
        default:
@@ -1686,28 +2147,23 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
                goto done;
        }
 
-       err = send_key_to_dongle(ndev, &key); /* Set the new key/index */
+       bssidx = brcmf_find_bssidx(cfg, ndev);
+       err = send_key_to_dongle(cfg, bssidx, ndev, &key);
        if (err)
                goto done;
 
-       val = WEP_ENABLED;
-       err = brcmf_dev_intvar_get(ndev, "wsec", &wsec);
+       err = brcmf_dev_intvar_get_bsscfg(ndev, "wsec", &wsec, bssidx);
        if (err) {
                WL_ERR("get wsec error (%d)\n", err);
                goto done;
        }
-       wsec &= ~(WEP_ENABLED);
        wsec |= val;
-       err = brcmf_dev_intvar_set(ndev, "wsec", wsec);
+       err = brcmf_dev_intvar_set_bsscfg(ndev, "wsec", wsec, bssidx);
        if (err) {
                WL_ERR("set wsec error (%d)\n", err);
                goto done;
        }
 
-       val = 1;                /* assume shared key. otherwise 0 */
-       err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AUTH, &val);
-       if (err)
-               WL_ERR("WLC_SET_AUTH error (%d)\n", err);
 done:
        WL_TRACE("Exit\n");
        return err;
@@ -1717,10 +2173,10 @@ static s32
 brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
                    u8 key_idx, bool pairwise, const u8 *mac_addr)
 {
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
        struct brcmf_wsec_key key;
        s32 err = 0;
-       s32 val;
-       s32 wsec;
+       s32 bssidx;
 
        WL_TRACE("Enter\n");
        if (!check_sys_up(wiphy))
@@ -1735,7 +2191,8 @@ brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
        WL_CONN("key index (%d)\n", key_idx);
 
        /* Set the new key/index */
-       err = send_key_to_dongle(ndev, &key);
+       bssidx = brcmf_find_bssidx(cfg, ndev);
+       err = send_key_to_dongle(cfg, bssidx, ndev, &key);
        if (err) {
                if (err == -EINVAL) {
                        if (key.index >= DOT11_MAX_DEFAULT_KEYS)
@@ -1744,35 +2201,8 @@ brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
                }
                /* Ignore this error, may happen during DISASSOC */
                err = -EAGAIN;
-               goto done;
-       }
-
-       val = 0;
-       err = brcmf_dev_intvar_get(ndev, "wsec", &wsec);
-       if (err) {
-               WL_ERR("get wsec error (%d)\n", err);
-               /* Ignore this error, may happen during DISASSOC */
-               err = -EAGAIN;
-               goto done;
-       }
-       wsec &= ~(WEP_ENABLED);
-       wsec |= val;
-       err = brcmf_dev_intvar_set(ndev, "wsec", wsec);
-       if (err) {
-               WL_ERR("set wsec error (%d)\n", err);
-               /* Ignore this error, may happen during DISASSOC */
-               err = -EAGAIN;
-               goto done;
        }
 
-       val = 0;                /* assume open key. otherwise 1 */
-       err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AUTH, &val);
-       if (err) {
-               WL_ERR("WLC_SET_AUTH error (%d)\n", err);
-               /* Ignore this error, may happen during DISASSOC */
-               err = -EAGAIN;
-       }
-done:
        WL_TRACE("Exit\n");
        return err;
 }
@@ -1783,10 +2213,12 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
                    void (*callback) (void *cookie, struct key_params * params))
 {
        struct key_params params;
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct brcmf_cfg80211_profile *profile = cfg->profile;
        struct brcmf_cfg80211_security *sec;
        s32 wsec;
        s32 err = 0;
+       s32 bssidx;
 
        WL_TRACE("Enter\n");
        WL_CONN("key index (%d)\n", key_idx);
@@ -1795,16 +2227,17 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
 
        memset(&params, 0, sizeof(params));
 
-       err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_WSEC, &wsec);
+       bssidx = brcmf_find_bssidx(cfg, ndev);
+       err = brcmf_dev_intvar_get_bsscfg(ndev, "wsec", &wsec, bssidx);
        if (err) {
                WL_ERR("WLC_GET_WSEC error (%d)\n", err);
                /* Ignore this error, may happen during DISASSOC */
                err = -EAGAIN;
                goto done;
        }
-       switch (wsec) {
+       switch (wsec & ~SES_OW_ENABLED) {
        case WEP_ENABLED:
-               sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
+               sec = &profile->sec;
                if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
                        params.cipher = WLAN_CIPHER_SUITE_WEP40;
                        WL_CONN("WLAN_CIPHER_SUITE_WEP40\n");
@@ -1844,53 +2277,73 @@ brcmf_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
 
 static s32
 brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
-                       u8 *mac, struct station_info *sinfo)
+                          u8 *mac, struct station_info *sinfo)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct brcmf_cfg80211_profile *profile = cfg->profile;
        struct brcmf_scb_val_le scb_val;
        int rssi;
        s32 rate;
        s32 err = 0;
-       u8 *bssid = brcmf_read_prof(cfg_priv, WL_PROF_BSSID);
+       u8 *bssid = profile->bssid;
+       struct brcmf_sta_info_le *sta_info_le;
 
-       WL_TRACE("Enter\n");
+       WL_TRACE("Enter, MAC %pM\n", mac);
        if (!check_sys_up(wiphy))
                return -EIO;
 
-       if (memcmp(mac, bssid, ETH_ALEN)) {
-               WL_ERR("Wrong Mac address cfg_mac-%X:%X:%X:%X:%X:%X"
-                       "wl_bssid-%X:%X:%X:%X:%X:%X\n",
-                       mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
-                       bssid[0], bssid[1], bssid[2], bssid[3],
-                       bssid[4], bssid[5]);
-               err = -ENOENT;
-               goto done;
-       }
-
-       /* Report the current tx rate */
-       err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_RATE, &rate);
-       if (err) {
-               WL_ERR("Could not get rate (%d)\n", err);
-       } else {
-               sinfo->filled |= STATION_INFO_TX_BITRATE;
-               sinfo->txrate.legacy = rate * 5;
-               WL_CONN("Rate %d Mbps\n", rate / 2);
-       }
+       if (cfg->conf->mode == WL_MODE_AP) {
+               err = brcmf_dev_iovar_getbuf(ndev, "sta_info", mac, ETH_ALEN,
+                                            cfg->dcmd_buf,
+                                            WL_DCMD_LEN_MAX);
+               if (err < 0) {
+                       WL_ERR("GET STA INFO failed, %d\n", err);
+                       goto done;
+               }
+               sta_info_le = (struct brcmf_sta_info_le *)cfg->dcmd_buf;
 
-       if (test_bit(WL_STATUS_CONNECTED, &cfg_priv->status)) {
-               memset(&scb_val, 0, sizeof(scb_val));
-               err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_RSSI, &scb_val,
-                                     sizeof(struct brcmf_scb_val_le));
+               sinfo->filled = STATION_INFO_INACTIVE_TIME;
+               sinfo->inactive_time = le32_to_cpu(sta_info_le->idle) * 1000;
+               if (le32_to_cpu(sta_info_le->flags) & BRCMF_STA_ASSOC) {
+                       sinfo->filled |= STATION_INFO_CONNECTED_TIME;
+                       sinfo->connected_time = le32_to_cpu(sta_info_le->in);
+               }
+               WL_TRACE("STA idle time : %d ms, connected time :%d sec\n",
+                        sinfo->inactive_time, sinfo->connected_time);
+       } else if (cfg->conf->mode == WL_MODE_BSS) {
+               if (memcmp(mac, bssid, ETH_ALEN)) {
+                       WL_ERR("Wrong Mac address cfg_mac-%pM wl_bssid-%pM\n",
+                              mac, bssid);
+                       err = -ENOENT;
+                       goto done;
+               }
+               /* Report the current tx rate */
+               err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_RATE, &rate);
                if (err) {
-                       WL_ERR("Could not get rssi (%d)\n", err);
+                       WL_ERR("Could not get rate (%d)\n", err);
+                       goto done;
                } else {
-                       rssi = le32_to_cpu(scb_val.val);
-                       sinfo->filled |= STATION_INFO_SIGNAL;
-                       sinfo->signal = rssi;
-                       WL_CONN("RSSI %d dBm\n", rssi);
+                       sinfo->filled |= STATION_INFO_TX_BITRATE;
+                       sinfo->txrate.legacy = rate * 5;
+                       WL_CONN("Rate %d Mbps\n", rate / 2);
                }
-       }
 
+               if (test_bit(WL_STATUS_CONNECTED, &cfg->status)) {
+                       memset(&scb_val, 0, sizeof(scb_val));
+                       err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_RSSI, &scb_val,
+                                             sizeof(scb_val));
+                       if (err) {
+                               WL_ERR("Could not get rssi (%d)\n", err);
+                               goto done;
+                       } else {
+                               rssi = le32_to_cpu(scb_val.val);
+                               sinfo->filled |= STATION_INFO_SIGNAL;
+                               sinfo->signal = rssi;
+                               WL_CONN("RSSI %d dBm\n", rssi);
+                       }
+               }
+       } else
+               err = -EPERM;
 done:
        WL_TRACE("Exit\n");
        return err;
@@ -1902,7 +2355,7 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
 {
        s32 pm;
        s32 err = 0;
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
 
        WL_TRACE("Enter\n");
 
@@ -1910,14 +2363,13 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
         * Powersave enable/disable request is coming from the
         * cfg80211 even before the interface is up. In that
         * scenario, driver will be storing the power save
-        * preference in cfg_priv struct to apply this to
+        * preference in cfg struct to apply this to
         * FW later while initializing the dongle
         */
-       cfg_priv->pwr_save = enabled;
-       if (!test_bit(WL_STATUS_READY, &cfg_priv->status)) {
+       cfg->pwr_save = enabled;
+       if (!test_bit(WL_STATUS_READY, &cfg->status)) {
 
-               WL_INFO("Device is not ready,"
-                       "storing the value in cfg_priv struct\n");
+               WL_INFO("Device is not ready, storing the value in cfg_info struct\n");
                goto done;
        }
 
@@ -1995,10 +2447,10 @@ done:
        return err;
 }
 
-static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_priv *cfg_priv,
+static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
                                   struct brcmf_bss_info_le *bi)
 {
-       struct wiphy *wiphy = cfg_to_wiphy(cfg_priv);
+       struct wiphy *wiphy = cfg_to_wiphy(cfg);
        struct ieee80211_channel *notify_channel;
        struct cfg80211_bss *bss;
        struct ieee80211_supported_band *band;
@@ -2062,14 +2514,14 @@ next_bss_le(struct brcmf_scan_results *list, struct brcmf_bss_info_le *bss)
                                            le32_to_cpu(bss->length));
 }
 
-static s32 brcmf_inform_bss(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_inform_bss(struct brcmf_cfg80211_info *cfg)
 {
        struct brcmf_scan_results *bss_list;
        struct brcmf_bss_info_le *bi = NULL;    /* must be initialized */
        s32 err = 0;
        int i;
 
-       bss_list = cfg_priv->bss_list;
+       bss_list = cfg->bss_list;
        if (bss_list->version != BRCMF_BSS_INFO_VERSION) {
                WL_ERR("Version %d != WL_BSS_INFO_VERSION\n",
                       bss_list->version);
@@ -2078,17 +2530,17 @@ static s32 brcmf_inform_bss(struct brcmf_cfg80211_priv *cfg_priv)
        WL_SCAN("scanned AP count (%d)\n", bss_list->count);
        for (i = 0; i < bss_list->count && i < WL_AP_MAX; i++) {
                bi = next_bss_le(bss_list, bi);
-               err = brcmf_inform_single_bss(cfg_priv, bi);
+               err = brcmf_inform_single_bss(cfg, bi);
                if (err)
                        break;
        }
        return err;
 }
 
-static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv,
+static s32 wl_inform_ibss(struct brcmf_cfg80211_info *cfg,
                          struct net_device *ndev, const u8 *bssid)
 {
-       struct wiphy *wiphy = cfg_to_wiphy(cfg_priv);
+       struct wiphy *wiphy = cfg_to_wiphy(cfg);
        struct ieee80211_channel *notify_channel;
        struct brcmf_bss_info_le *bi = NULL;
        struct ieee80211_supported_band *band;
@@ -2163,9 +2615,9 @@ CleanUp:
        return err;
 }
 
-static bool brcmf_is_ibssmode(struct brcmf_cfg80211_priv *cfg_priv)
+static bool brcmf_is_ibssmode(struct brcmf_cfg80211_info *cfg)
 {
-       return cfg_priv->conf->mode == WL_MODE_IBSS;
+       return cfg->conf->mode == WL_MODE_IBSS;
 }
 
 /*
@@ -2182,22 +2634,62 @@ static struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
        totlen = buflen;
 
        /* find tagged parameter */
-       while (totlen >= 2) {
+       while (totlen >= TLV_HDR_LEN) {
                int len = elt->len;
 
                /* validate remaining totlen */
-               if ((elt->id == key) && (totlen >= (len + 2)))
+               if ((elt->id == key) && (totlen >= (len + TLV_HDR_LEN)))
                        return elt;
 
-               elt = (struct brcmf_tlv *) ((u8 *) elt + (len + 2));
-               totlen -= (len + 2);
+               elt = (struct brcmf_tlv *) ((u8 *) elt + (len + TLV_HDR_LEN));
+               totlen -= (len + TLV_HDR_LEN);
+       }
+
+       return NULL;
+}
+
+/* Is any of the tlvs the expected entry? If
+ * not update the tlvs buffer pointer/length.
+ */
+static bool
+brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
+                u8 *oui, u32 oui_len, u8 type)
+{
+       /* If the contents match the OUI and the type */
+       if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
+           !memcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
+           type == ie[TLV_BODY_OFF + oui_len]) {
+               return true;
        }
 
+       if (tlvs == NULL)
+               return false;
+       /* point to the next ie */
+       ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN;
+       /* calculate the length of the rest of the buffer */
+       *tlvs_len -= (int)(ie - *tlvs);
+       /* update the pointer to the start of the buffer */
+       *tlvs = ie;
+
+       return false;
+}
+
+struct brcmf_vs_tlv *
+brcmf_find_wpaie(u8 *parse, u32 len)
+{
+       struct brcmf_tlv *ie;
+
+       while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_WPA))) {
+               if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
+                                    WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE))
+                       return (struct brcmf_vs_tlv *)ie;
+       }
        return NULL;
 }
 
-static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg)
 {
+       struct brcmf_cfg80211_profile *profile = cfg->profile;
        struct brcmf_bss_info_le *bi;
        struct brcmf_ssid *ssid;
        struct brcmf_tlv *tim;
@@ -2208,21 +2700,21 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv)
        s32 err = 0;
 
        WL_TRACE("Enter\n");
-       if (brcmf_is_ibssmode(cfg_priv))
+       if (brcmf_is_ibssmode(cfg))
                return err;
 
-       ssid = (struct brcmf_ssid *)brcmf_read_prof(cfg_priv, WL_PROF_SSID);
+       ssid = &profile->ssid;
 
-       *(__le32 *)cfg_priv->extra_buf = cpu_to_le32(WL_EXTRA_BUF_MAX);
-       err = brcmf_exec_dcmd(cfg_to_ndev(cfg_priv), BRCMF_C_GET_BSS_INFO,
-                       cfg_priv->extra_buf, WL_EXTRA_BUF_MAX);
+       *(__le32 *)cfg->extra_buf = cpu_to_le32(WL_EXTRA_BUF_MAX);
+       err = brcmf_exec_dcmd(cfg_to_ndev(cfg), BRCMF_C_GET_BSS_INFO,
+                       cfg->extra_buf, WL_EXTRA_BUF_MAX);
        if (err) {
                WL_ERR("Could not get bss info %d\n", err);
                goto update_bss_info_out;
        }
 
-       bi = (struct brcmf_bss_info_le *)(cfg_priv->extra_buf + 4);
-       err = brcmf_inform_single_bss(cfg_priv, bi);
+       bi = (struct brcmf_bss_info_le *)(cfg->extra_buf + 4);
+       err = brcmf_inform_single_bss(cfg, bi);
        if (err)
                goto update_bss_info_out;
 
@@ -2240,7 +2732,7 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv)
                * so we speficially query dtim information to dongle.
                */
                u32 var;
-               err = brcmf_dev_intvar_get(cfg_to_ndev(cfg_priv),
+               err = brcmf_dev_intvar_get(cfg_to_ndev(cfg),
                                           "dtim_assoc", &var);
                if (err) {
                        WL_ERR("wl dtim_assoc failed (%d)\n", err);
@@ -2249,20 +2741,22 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv)
                dtim_period = (u8)var;
        }
 
-       brcmf_update_prof(cfg_priv, NULL, &beacon_interval, WL_PROF_BEACONINT);
-       brcmf_update_prof(cfg_priv, NULL, &dtim_period, WL_PROF_DTIMPERIOD);
+       profile->beacon_interval = beacon_interval;
+       profile->dtim_period = dtim_period;
 
 update_bss_info_out:
        WL_TRACE("Exit");
        return err;
 }
 
-static void brcmf_term_iscan(struct brcmf_cfg80211_priv *cfg_priv)
+static void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg)
 {
-       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv);
+       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
+       struct escan_info *escan = &cfg->escan_info;
        struct brcmf_ssid ssid;
 
-       if (cfg_priv->iscan_on) {
+       set_bit(WL_STATUS_SCAN_ABORTING, &cfg->status);
+       if (cfg->iscan_on) {
                iscan->state = WL_ISCAN_STATE_IDLE;
 
                if (iscan->timer_on) {
@@ -2275,27 +2769,40 @@ static void brcmf_term_iscan(struct brcmf_cfg80211_priv *cfg_priv)
                /* Abort iscan running in FW */
                memset(&ssid, 0, sizeof(ssid));
                brcmf_run_iscan(iscan, &ssid, WL_SCAN_ACTION_ABORT);
+
+               if (cfg->scan_request) {
+                       /* Indidate scan abort to cfg80211 layer */
+                       WL_INFO("Terminating scan in progress\n");
+                       cfg80211_scan_done(cfg->scan_request, true);
+                       cfg->scan_request = NULL;
+               }
        }
+       if (cfg->escan_on && cfg->scan_request) {
+               escan->escan_state = WL_ESCAN_STATE_IDLE;
+               brcmf_notify_escan_complete(cfg, escan->ndev, true, true);
+       }
+       clear_bit(WL_STATUS_SCANNING, &cfg->status);
+       clear_bit(WL_STATUS_SCAN_ABORTING, &cfg->status);
 }
 
 static void brcmf_notify_iscan_complete(struct brcmf_cfg80211_iscan_ctrl *iscan,
                                        bool aborted)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = iscan_to_cfg(iscan);
-       struct net_device *ndev = cfg_to_ndev(cfg_priv);
+       struct brcmf_cfg80211_info *cfg = iscan_to_cfg(iscan);
+       struct net_device *ndev = cfg_to_ndev(cfg);
 
-       if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg_priv->status)) {
+       if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg->status)) {
                WL_ERR("Scan complete while device not scanning\n");
                return;
        }
-       if (cfg_priv->scan_request) {
+       if (cfg->scan_request) {
                WL_SCAN("ISCAN Completed scan: %s\n",
                                aborted ? "Aborted" : "Done");
-               cfg80211_scan_done(cfg_priv->scan_request, aborted);
+               cfg80211_scan_done(cfg->scan_request, aborted);
                brcmf_set_mpc(ndev, 1);
-               cfg_priv->scan_request = NULL;
+               cfg->scan_request = NULL;
        }
-       cfg_priv->iscan_kickstart = false;
+       cfg->iscan_kickstart = false;
 }
 
 static s32 brcmf_wakeup_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan)
@@ -2348,21 +2855,21 @@ brcmf_get_iscan_results(struct brcmf_cfg80211_iscan_ctrl *iscan, u32 *status,
        return err;
 }
 
-static s32 brcmf_iscan_done(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_iscan_done(struct brcmf_cfg80211_info *cfg)
 {
-       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan;
+       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
        s32 err = 0;
 
        iscan->state = WL_ISCAN_STATE_IDLE;
-       brcmf_inform_bss(cfg_priv);
+       brcmf_inform_bss(cfg);
        brcmf_notify_iscan_complete(iscan, false);
 
        return err;
 }
 
-static s32 brcmf_iscan_pending(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_iscan_pending(struct brcmf_cfg80211_info *cfg)
 {
-       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan;
+       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
        s32 err = 0;
 
        /* Reschedule the timer */
@@ -2372,12 +2879,12 @@ static s32 brcmf_iscan_pending(struct brcmf_cfg80211_priv *cfg_priv)
        return err;
 }
 
-static s32 brcmf_iscan_inprogress(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_iscan_inprogress(struct brcmf_cfg80211_info *cfg)
 {
-       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan;
+       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
        s32 err = 0;
 
-       brcmf_inform_bss(cfg_priv);
+       brcmf_inform_bss(cfg);
        brcmf_run_iscan(iscan, NULL, BRCMF_SCAN_ACTION_CONTINUE);
        /* Reschedule the timer */
        mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
@@ -2386,9 +2893,9 @@ static s32 brcmf_iscan_inprogress(struct brcmf_cfg80211_priv *cfg_priv)
        return err;
 }
 
-static s32 brcmf_iscan_aborted(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_iscan_aborted(struct brcmf_cfg80211_info *cfg)
 {
-       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan;
+       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
        s32 err = 0;
 
        iscan->state = WL_ISCAN_STATE_IDLE;
@@ -2402,7 +2909,7 @@ static void brcmf_cfg80211_iscan_handler(struct work_struct *work)
        struct brcmf_cfg80211_iscan_ctrl *iscan =
                        container_of(work, struct brcmf_cfg80211_iscan_ctrl,
                                     work);
-       struct brcmf_cfg80211_priv *cfg_priv = iscan_to_cfg(iscan);
+       struct brcmf_cfg80211_info *cfg = iscan_to_cfg(iscan);
        struct brcmf_cfg80211_iscan_eloop *el = &iscan->el;
        u32 status = BRCMF_SCAN_RESULTS_PARTIAL;
 
@@ -2411,12 +2918,12 @@ static void brcmf_cfg80211_iscan_handler(struct work_struct *work)
                iscan->timer_on = 0;
        }
 
-       if (brcmf_get_iscan_results(iscan, &status, &cfg_priv->bss_list)) {
+       if (brcmf_get_iscan_results(iscan, &status, &cfg->bss_list)) {
                status = BRCMF_SCAN_RESULTS_ABORTED;
                WL_ERR("Abort iscan\n");
        }
 
-       el->handler[status](cfg_priv);
+       el->handler[status](cfg);
 }
 
 static void brcmf_iscan_timer(unsigned long data)
@@ -2431,11 +2938,11 @@ static void brcmf_iscan_timer(unsigned long data)
        }
 }
 
-static s32 brcmf_invoke_iscan(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_invoke_iscan(struct brcmf_cfg80211_info *cfg)
 {
-       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv);
+       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
 
-       if (cfg_priv->iscan_on) {
+       if (cfg->iscan_on) {
                iscan->state = WL_ISCAN_STATE_IDLE;
                INIT_WORK(&iscan->work, brcmf_cfg80211_iscan_handler);
        }
@@ -2453,26 +2960,192 @@ static void brcmf_init_iscan_eloop(struct brcmf_cfg80211_iscan_eloop *el)
        el->handler[BRCMF_SCAN_RESULTS_NO_MEM] = brcmf_iscan_aborted;
 }
 
-static s32 brcmf_init_iscan(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_init_iscan(struct brcmf_cfg80211_info *cfg)
 {
-       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv);
+       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
        int err = 0;
 
-       if (cfg_priv->iscan_on) {
-               iscan->ndev = cfg_to_ndev(cfg_priv);
+       if (cfg->iscan_on) {
+               iscan->ndev = cfg_to_ndev(cfg);
                brcmf_init_iscan_eloop(&iscan->el);
                iscan->timer_ms = WL_ISCAN_TIMER_INTERVAL_MS;
                init_timer(&iscan->timer);
                iscan->timer.data = (unsigned long) iscan;
                iscan->timer.function = brcmf_iscan_timer;
-               err = brcmf_invoke_iscan(cfg_priv);
+               err = brcmf_invoke_iscan(cfg);
                if (!err)
-                       iscan->data = cfg_priv;
+                       iscan->data = cfg;
+       }
+
+       return err;
+}
+
+static void brcmf_cfg80211_escan_timeout_worker(struct work_struct *work)
+{
+       struct brcmf_cfg80211_info *cfg =
+                       container_of(work, struct brcmf_cfg80211_info,
+                                    escan_timeout_work);
+
+       brcmf_notify_escan_complete(cfg,
+               cfg->escan_info.ndev, true, true);
+}
+
+static void brcmf_escan_timeout(unsigned long data)
+{
+       struct brcmf_cfg80211_info *cfg =
+                       (struct brcmf_cfg80211_info *)data;
+
+       if (cfg->scan_request) {
+               WL_ERR("timer expired\n");
+               if (cfg->escan_on)
+                       schedule_work(&cfg->escan_timeout_work);
+       }
+}
+
+static s32
+brcmf_compare_update_same_bss(struct brcmf_bss_info_le *bss,
+                             struct brcmf_bss_info_le *bss_info_le)
+{
+       if (!memcmp(&bss_info_le->BSSID, &bss->BSSID, ETH_ALEN) &&
+               (CHSPEC_BAND(le16_to_cpu(bss_info_le->chanspec)) ==
+               CHSPEC_BAND(le16_to_cpu(bss->chanspec))) &&
+               bss_info_le->SSID_len == bss->SSID_len &&
+               !memcmp(bss_info_le->SSID, bss->SSID, bss_info_le->SSID_len)) {
+               if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) ==
+                       (bss_info_le->flags & WLC_BSS_RSSI_ON_CHANNEL)) {
+                       s16 bss_rssi = le16_to_cpu(bss->RSSI);
+                       s16 bss_info_rssi = le16_to_cpu(bss_info_le->RSSI);
+
+                       /* preserve max RSSI if the measurements are
+                       * both on-channel or both off-channel
+                       */
+                       if (bss_info_rssi > bss_rssi)
+                               bss->RSSI = bss_info_le->RSSI;
+               } else if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) &&
+                       (bss_info_le->flags & WLC_BSS_RSSI_ON_CHANNEL) == 0) {
+                       /* preserve the on-channel rssi measurement
+                       * if the new measurement is off channel
+                       */
+                       bss->RSSI = bss_info_le->RSSI;
+                       bss->flags |= WLC_BSS_RSSI_ON_CHANNEL;
+               }
+               return 1;
        }
+       return 0;
+}
+
+static s32
+brcmf_cfg80211_escan_handler(struct brcmf_cfg80211_info *cfg,
+                            struct net_device *ndev,
+                            const struct brcmf_event_msg *e, void *data)
+{
+       s32 status;
+       s32 err = 0;
+       struct brcmf_escan_result_le *escan_result_le;
+       struct brcmf_bss_info_le *bss_info_le;
+       struct brcmf_bss_info_le *bss = NULL;
+       u32 bi_length;
+       struct brcmf_scan_results *list;
+       u32 i;
+       bool aborted;
+
+       status = be32_to_cpu(e->status);
+
+       if (!ndev || !cfg->escan_on ||
+                       !test_bit(WL_STATUS_SCANNING, &cfg->status)) {
+               WL_ERR("scan not ready ndev %p wl->escan_on %d drv_status %x\n",
+                       ndev, cfg->escan_on,
+                       !test_bit(WL_STATUS_SCANNING, &cfg->status));
+               return -EPERM;
+       }
+
+       if (status == BRCMF_E_STATUS_PARTIAL) {
+               WL_SCAN("ESCAN Partial result\n");
+               escan_result_le = (struct brcmf_escan_result_le *) data;
+               if (!escan_result_le) {
+                       WL_ERR("Invalid escan result (NULL pointer)\n");
+                       goto exit;
+               }
+               if (!cfg->scan_request) {
+                       WL_SCAN("result without cfg80211 request\n");
+                       goto exit;
+               }
+
+               if (le16_to_cpu(escan_result_le->bss_count) != 1) {
+                       WL_ERR("Invalid bss_count %d: ignoring\n",
+                               escan_result_le->bss_count);
+                       goto exit;
+               }
+               bss_info_le = &escan_result_le->bss_info_le;
+
+               bi_length = le32_to_cpu(bss_info_le->length);
+               if (bi_length != (le32_to_cpu(escan_result_le->buflen) -
+                                       WL_ESCAN_RESULTS_FIXED_SIZE)) {
+                       WL_ERR("Invalid bss_info length %d: ignoring\n",
+                               bi_length);
+                       goto exit;
+               }
+
+               if (!(cfg_to_wiphy(cfg)->interface_modes &
+                                       BIT(NL80211_IFTYPE_ADHOC))) {
+                       if (le16_to_cpu(bss_info_le->capability) &
+                                               WLAN_CAPABILITY_IBSS) {
+                               WL_ERR("Ignoring IBSS result\n");
+                               goto exit;
+                       }
+               }
 
+               list = (struct brcmf_scan_results *)
+                               cfg->escan_info.escan_buf;
+               if (bi_length > WL_ESCAN_BUF_SIZE - list->buflen) {
+                       WL_ERR("Buffer is too small: ignoring\n");
+                       goto exit;
+               }
+
+               for (i = 0; i < list->count; i++) {
+                       bss = bss ? (struct brcmf_bss_info_le *)
+                               ((unsigned char *)bss +
+                               le32_to_cpu(bss->length)) : list->bss_info_le;
+                       if (brcmf_compare_update_same_bss(bss, bss_info_le))
+                               goto exit;
+               }
+               memcpy(&(cfg->escan_info.escan_buf[list->buflen]),
+                       bss_info_le, bi_length);
+               list->version = le32_to_cpu(bss_info_le->version);
+               list->buflen += bi_length;
+               list->count++;
+       } else {
+               cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+               if (cfg->scan_request) {
+                       cfg->bss_list = (struct brcmf_scan_results *)
+                               cfg->escan_info.escan_buf;
+                       brcmf_inform_bss(cfg);
+                       aborted = status != BRCMF_E_STATUS_SUCCESS;
+                       brcmf_notify_escan_complete(cfg, ndev, aborted,
+                                                   false);
+               } else
+                       WL_ERR("Unexpected scan result 0x%x\n", status);
+       }
+exit:
        return err;
 }
 
+static void brcmf_init_escan(struct brcmf_cfg80211_info *cfg)
+{
+
+       if (cfg->escan_on) {
+               cfg->el.handler[BRCMF_E_ESCAN_RESULT] =
+                       brcmf_cfg80211_escan_handler;
+               cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+               /* Init scan_timeout timer */
+               init_timer(&cfg->escan_timeout);
+               cfg->escan_timeout.data = (unsigned long) cfg;
+               cfg->escan_timeout.function = brcmf_escan_timeout;
+               INIT_WORK(&cfg->escan_timeout_work,
+                       brcmf_cfg80211_escan_timeout_worker);
+       }
+}
+
 static __always_inline void brcmf_delay(u32 ms)
 {
        if (ms < 1000 / HZ) {
@@ -2481,249 +3154,1197 @@ static __always_inline void brcmf_delay(u32 ms)
        } else {
                msleep(ms);
        }
-}
+}
+
+static s32 brcmf_cfg80211_resume(struct wiphy *wiphy)
+{
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+
+       /*
+        * Check for WL_STATUS_READY before any function call which
+        * could result is bus access. Don't block the resume for
+        * any driver error conditions
+        */
+       WL_TRACE("Enter\n");
+
+       if (test_bit(WL_STATUS_READY, &cfg->status))
+               brcmf_invoke_iscan(wiphy_to_cfg(wiphy));
+
+       WL_TRACE("Exit\n");
+       return 0;
+}
+
+static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
+                                 struct cfg80211_wowlan *wow)
+{
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct net_device *ndev = cfg_to_ndev(cfg);
+
+       WL_TRACE("Enter\n");
+
+       /*
+        * Check for WL_STATUS_READY before any function call which
+        * could result is bus access. Don't block the suspend for
+        * any driver error conditions
+        */
+
+       /*
+        * While going to suspend if associated with AP disassociate
+        * from AP to save power while system is in suspended state
+        */
+       if ((test_bit(WL_STATUS_CONNECTED, &cfg->status) ||
+            test_bit(WL_STATUS_CONNECTING, &cfg->status)) &&
+            test_bit(WL_STATUS_READY, &cfg->status)) {
+               WL_INFO("Disassociating from AP"
+                       " while entering suspend state\n");
+               brcmf_link_down(cfg);
+
+               /*
+                * Make sure WPA_Supplicant receives all the event
+                * generated due to DISASSOC call to the fw to keep
+                * the state fw and WPA_Supplicant state consistent
+                */
+               brcmf_delay(500);
+       }
+
+       if (test_bit(WL_STATUS_READY, &cfg->status))
+               brcmf_abort_scanning(cfg);
+       else
+               clear_bit(WL_STATUS_SCANNING, &cfg->status);
+
+       /* Turn off watchdog timer */
+       if (test_bit(WL_STATUS_READY, &cfg->status))
+               brcmf_set_mpc(ndev, 1);
+
+       WL_TRACE("Exit\n");
+
+       return 0;
+}
+
+static __used s32
+brcmf_dev_bufvar_set(struct net_device *ndev, s8 *name, s8 *buf, s32 len)
+{
+       struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
+       u32 buflen;
+
+       buflen = brcmf_c_mkiovar(name, buf, len, cfg->dcmd_buf,
+                              WL_DCMD_LEN_MAX);
+       BUG_ON(!buflen);
+
+       return brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, cfg->dcmd_buf,
+                              buflen);
+}
+
+static s32
+brcmf_dev_bufvar_get(struct net_device *ndev, s8 *name, s8 *buf,
+                 s32 buf_len)
+{
+       struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
+       u32 len;
+       s32 err = 0;
+
+       len = brcmf_c_mkiovar(name, NULL, 0, cfg->dcmd_buf,
+                           WL_DCMD_LEN_MAX);
+       BUG_ON(!len);
+       err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, cfg->dcmd_buf,
+                             WL_DCMD_LEN_MAX);
+       if (err) {
+               WL_ERR("error (%d)\n", err);
+               return err;
+       }
+       memcpy(buf, cfg->dcmd_buf, buf_len);
+
+       return err;
+}
+
+static __used s32
+brcmf_update_pmklist(struct net_device *ndev,
+                    struct brcmf_cfg80211_pmk_list *pmk_list, s32 err)
+{
+       int i, j;
+       int pmkid_len;
+
+       pmkid_len = le32_to_cpu(pmk_list->pmkids.npmkid);
+
+       WL_CONN("No of elements %d\n", pmkid_len);
+       for (i = 0; i < pmkid_len; i++) {
+               WL_CONN("PMKID[%d]: %pM =\n", i,
+                       &pmk_list->pmkids.pmkid[i].BSSID);
+               for (j = 0; j < WLAN_PMKID_LEN; j++)
+                       WL_CONN("%02x\n", pmk_list->pmkids.pmkid[i].PMKID[j]);
+       }
+
+       if (!err)
+               brcmf_dev_bufvar_set(ndev, "pmkid_info", (char *)pmk_list,
+                                       sizeof(*pmk_list));
+
+       return err;
+}
+
+static s32
+brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev,
+                        struct cfg80211_pmksa *pmksa)
+{
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct pmkid_list *pmkids = &cfg->pmk_list->pmkids;
+       s32 err = 0;
+       int i;
+       int pmkid_len;
+
+       WL_TRACE("Enter\n");
+       if (!check_sys_up(wiphy))
+               return -EIO;
+
+       pmkid_len = le32_to_cpu(pmkids->npmkid);
+       for (i = 0; i < pmkid_len; i++)
+               if (!memcmp(pmksa->bssid, pmkids->pmkid[i].BSSID, ETH_ALEN))
+                       break;
+       if (i < WL_NUM_PMKIDS_MAX) {
+               memcpy(pmkids->pmkid[i].BSSID, pmksa->bssid, ETH_ALEN);
+               memcpy(pmkids->pmkid[i].PMKID, pmksa->pmkid, WLAN_PMKID_LEN);
+               if (i == pmkid_len) {
+                       pmkid_len++;
+                       pmkids->npmkid = cpu_to_le32(pmkid_len);
+               }
+       } else
+               err = -EINVAL;
+
+       WL_CONN("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n",
+               pmkids->pmkid[pmkid_len].BSSID);
+       for (i = 0; i < WLAN_PMKID_LEN; i++)
+               WL_CONN("%02x\n", pmkids->pmkid[pmkid_len].PMKID[i]);
+
+       err = brcmf_update_pmklist(ndev, cfg->pmk_list, err);
+
+       WL_TRACE("Exit\n");
+       return err;
+}
+
+static s32
+brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev,
+                     struct cfg80211_pmksa *pmksa)
+{
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct pmkid_list pmkid;
+       s32 err = 0;
+       int i, pmkid_len;
+
+       WL_TRACE("Enter\n");
+       if (!check_sys_up(wiphy))
+               return -EIO;
+
+       memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETH_ALEN);
+       memcpy(&pmkid.pmkid[0].PMKID, pmksa->pmkid, WLAN_PMKID_LEN);
+
+       WL_CONN("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n",
+              &pmkid.pmkid[0].BSSID);
+       for (i = 0; i < WLAN_PMKID_LEN; i++)
+               WL_CONN("%02x\n", pmkid.pmkid[0].PMKID[i]);
+
+       pmkid_len = le32_to_cpu(cfg->pmk_list->pmkids.npmkid);
+       for (i = 0; i < pmkid_len; i++)
+               if (!memcmp
+                   (pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].BSSID,
+                    ETH_ALEN))
+                       break;
+
+       if ((pmkid_len > 0)
+           && (i < pmkid_len)) {
+               memset(&cfg->pmk_list->pmkids.pmkid[i], 0,
+                      sizeof(struct pmkid));
+               for (; i < (pmkid_len - 1); i++) {
+                       memcpy(&cfg->pmk_list->pmkids.pmkid[i].BSSID,
+                              &cfg->pmk_list->pmkids.pmkid[i + 1].BSSID,
+                              ETH_ALEN);
+                       memcpy(&cfg->pmk_list->pmkids.pmkid[i].PMKID,
+                              &cfg->pmk_list->pmkids.pmkid[i + 1].PMKID,
+                              WLAN_PMKID_LEN);
+               }
+               cfg->pmk_list->pmkids.npmkid = cpu_to_le32(pmkid_len - 1);
+       } else
+               err = -EINVAL;
+
+       err = brcmf_update_pmklist(ndev, cfg->pmk_list, err);
+
+       WL_TRACE("Exit\n");
+       return err;
+
+}
+
+static s32
+brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *ndev)
+{
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       s32 err = 0;
+
+       WL_TRACE("Enter\n");
+       if (!check_sys_up(wiphy))
+               return -EIO;
+
+       memset(cfg->pmk_list, 0, sizeof(*cfg->pmk_list));
+       err = brcmf_update_pmklist(ndev, cfg->pmk_list, err);
+
+       WL_TRACE("Exit\n");
+       return err;
+
+}
+
+/*
+ * PFN result doesn't have all the info which are
+ * required by the supplicant
+ * (For e.g IEs) Do a target Escan so that sched scan results are reported
+ * via wl_inform_single_bss in the required format. Escan does require the
+ * scan request in the form of cfg80211_scan_request. For timebeing, create
+ * cfg80211_scan_request one out of the received PNO event.
+ */
+static s32
+brcmf_notify_sched_scan_results(struct brcmf_cfg80211_info *cfg,
+                               struct net_device *ndev,
+                               const struct brcmf_event_msg *e, void *data)
+{
+       struct brcmf_pno_net_info_le *netinfo, *netinfo_start;
+       struct cfg80211_scan_request *request = NULL;
+       struct cfg80211_ssid *ssid = NULL;
+       struct ieee80211_channel *channel = NULL;
+       struct wiphy *wiphy = cfg_to_wiphy(cfg);
+       int err = 0;
+       int channel_req = 0;
+       int band = 0;
+       struct brcmf_pno_scanresults_le *pfn_result;
+       u32 result_count;
+       u32 status;
+
+       WL_SCAN("Enter\n");
+
+       if (e->event_type == cpu_to_be32(BRCMF_E_PFN_NET_LOST)) {
+               WL_SCAN("PFN NET LOST event. Do Nothing\n");
+               return 0;
+       }
+
+       pfn_result = (struct brcmf_pno_scanresults_le *)data;
+       result_count = le32_to_cpu(pfn_result->count);
+       status = le32_to_cpu(pfn_result->status);
+
+       /*
+        * PFN event is limited to fit 512 bytes so we may get
+        * multiple NET_FOUND events. For now place a warning here.
+        */
+       WARN_ON(status != BRCMF_PNO_SCAN_COMPLETE);
+       WL_SCAN("PFN NET FOUND event. count: %d\n", result_count);
+       if (result_count > 0) {
+               int i;
+
+               request = kzalloc(sizeof(*request), GFP_KERNEL);
+               ssid = kcalloc(result_count, sizeof(*ssid), GFP_KERNEL);
+               channel = kcalloc(result_count, sizeof(*channel), GFP_KERNEL);
+               if (!request || !ssid || !channel) {
+                       err = -ENOMEM;
+                       goto out_err;
+               }
+
+               request->wiphy = wiphy;
+               data += sizeof(struct brcmf_pno_scanresults_le);
+               netinfo_start = (struct brcmf_pno_net_info_le *)data;
+
+               for (i = 0; i < result_count; i++) {
+                       netinfo = &netinfo_start[i];
+                       if (!netinfo) {
+                               WL_ERR("Invalid netinfo ptr. index: %d\n", i);
+                               err = -EINVAL;
+                               goto out_err;
+                       }
+
+                       WL_SCAN("SSID:%s Channel:%d\n",
+                       netinfo->SSID, netinfo->channel);
+                       memcpy(ssid[i].ssid, netinfo->SSID, netinfo->SSID_len);
+                       ssid[i].ssid_len = netinfo->SSID_len;
+                       request->n_ssids++;
+
+                       channel_req = netinfo->channel;
+                       if (channel_req <= CH_MAX_2G_CHANNEL)
+                               band = NL80211_BAND_2GHZ;
+                       else
+                               band = NL80211_BAND_5GHZ;
+                       channel[i].center_freq =
+                               ieee80211_channel_to_frequency(channel_req,
+                                                              band);
+                       channel[i].band = band;
+                       channel[i].flags |= IEEE80211_CHAN_NO_HT40;
+                       request->channels[i] = &channel[i];
+                       request->n_channels++;
+               }
+
+               /* assign parsed ssid array */
+               if (request->n_ssids)
+                       request->ssids = &ssid[0];
+
+               if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
+                       /* Abort any on-going scan */
+                       brcmf_abort_scanning(cfg);
+               }
+
+               set_bit(WL_STATUS_SCANNING, &cfg->status);
+               err = brcmf_do_escan(cfg, wiphy, ndev, request);
+               if (err) {
+                       clear_bit(WL_STATUS_SCANNING, &cfg->status);
+                       goto out_err;
+               }
+               cfg->sched_escan = true;
+               cfg->scan_request = request;
+       } else {
+               WL_ERR("FALSE PNO Event. (pfn_count == 0)\n");
+               goto out_err;
+       }
+
+       kfree(ssid);
+       kfree(channel);
+       kfree(request);
+       return 0;
+
+out_err:
+       kfree(ssid);
+       kfree(channel);
+       kfree(request);
+       cfg80211_sched_scan_stopped(wiphy);
+       return err;
+}
+
+#ifndef CONFIG_BRCMISCAN
+static int brcmf_dev_pno_clean(struct net_device *ndev)
+{
+       char iovbuf[128];
+       int ret;
+
+       /* Disable pfn */
+       ret = brcmf_dev_intvar_set(ndev, "pfn", 0);
+       if (ret == 0) {
+               /* clear pfn */
+               ret = brcmf_dev_iovar_setbuf(ndev, "pfnclear", NULL, 0,
+                                            iovbuf, sizeof(iovbuf));
+       }
+       if (ret < 0)
+               WL_ERR("failed code %d\n", ret);
+
+       return ret;
+}
+
+static int brcmf_dev_pno_config(struct net_device *ndev)
+{
+       struct brcmf_pno_param_le pfn_param;
+       char iovbuf[128];
+
+       memset(&pfn_param, 0, sizeof(pfn_param));
+       pfn_param.version = cpu_to_le32(BRCMF_PNO_VERSION);
+
+       /* set extra pno params */
+       pfn_param.flags = cpu_to_le16(1 << BRCMF_PNO_ENABLE_ADAPTSCAN_BIT);
+       pfn_param.repeat = BRCMF_PNO_REPEAT;
+       pfn_param.exp = BRCMF_PNO_FREQ_EXPO_MAX;
+
+       /* set up pno scan fr */
+       pfn_param.scan_freq = cpu_to_le32(BRCMF_PNO_TIME);
+
+       return brcmf_dev_iovar_setbuf(ndev, "pfn_set",
+                                     &pfn_param, sizeof(pfn_param),
+                                     iovbuf, sizeof(iovbuf));
+}
+
+static int
+brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
+                               struct net_device *ndev,
+                               struct cfg80211_sched_scan_request *request)
+{
+       char iovbuf[128];
+       struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
+       struct brcmf_pno_net_param_le pfn;
+       int i;
+       int ret = 0;
+
+       WL_SCAN("Enter n_match_sets:%d   n_ssids:%d\n",
+               request->n_match_sets, request->n_ssids);
+       if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
+               WL_ERR("Scanning already : status (%lu)\n", cfg->status);
+               return -EAGAIN;
+       }
+
+       if (!request || !request->n_ssids || !request->n_match_sets) {
+               WL_ERR("Invalid sched scan req!! n_ssids:%d\n",
+                      request->n_ssids);
+               return -EINVAL;
+       }
+
+       if (request->n_ssids > 0) {
+               for (i = 0; i < request->n_ssids; i++) {
+                       /* Active scan req for ssids */
+                       WL_SCAN(">>> Active scan req for ssid (%s)\n",
+                               request->ssids[i].ssid);
+
+                       /*
+                        * match_set ssids is a supert set of n_ssid list,
+                        * so we need not add these set seperately.
+                        */
+               }
+       }
+
+       if (request->n_match_sets > 0) {
+               /* clean up everything */
+               ret = brcmf_dev_pno_clean(ndev);
+               if  (ret < 0) {
+                       WL_ERR("failed error=%d\n", ret);
+                       return ret;
+               }
+
+               /* configure pno */
+               ret = brcmf_dev_pno_config(ndev);
+               if (ret < 0) {
+                       WL_ERR("PNO setup failed!! ret=%d\n", ret);
+                       return -EINVAL;
+               }
+
+               /* configure each match set */
+               for (i = 0; i < request->n_match_sets; i++) {
+                       struct cfg80211_ssid *ssid;
+                       u32 ssid_len;
+
+                       ssid = &request->match_sets[i].ssid;
+                       ssid_len = ssid->ssid_len;
+
+                       if (!ssid_len) {
+                               WL_ERR("skip broadcast ssid\n");
+                               continue;
+                       }
+                       pfn.auth = cpu_to_le32(WLAN_AUTH_OPEN);
+                       pfn.wpa_auth = cpu_to_le32(BRCMF_PNO_WPA_AUTH_ANY);
+                       pfn.wsec = cpu_to_le32(0);
+                       pfn.infra = cpu_to_le32(1);
+                       pfn.flags = cpu_to_le32(1 << BRCMF_PNO_HIDDEN_BIT);
+                       pfn.ssid.SSID_len = cpu_to_le32(ssid_len);
+                       memcpy(pfn.ssid.SSID, ssid->ssid, ssid_len);
+                       ret = brcmf_dev_iovar_setbuf(ndev, "pfn_add",
+                                                    &pfn, sizeof(pfn),
+                                                    iovbuf, sizeof(iovbuf));
+                       WL_SCAN(">>> PNO filter %s for ssid (%s)\n",
+                               ret == 0 ? "set" : "failed",
+                               ssid->ssid);
+               }
+               /* Enable the PNO */
+               if (brcmf_dev_intvar_set(ndev, "pfn", 1) < 0) {
+                       WL_ERR("PNO enable failed!! ret=%d\n", ret);
+                       return -EINVAL;
+               }
+       } else {
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int brcmf_cfg80211_sched_scan_stop(struct wiphy *wiphy,
+                                         struct net_device *ndev)
+{
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+
+       WL_SCAN("enter\n");
+       brcmf_dev_pno_clean(ndev);
+       if (cfg->sched_escan)
+               brcmf_notify_escan_complete(cfg, ndev, true, true);
+       return 0;
+}
+#endif /* CONFIG_BRCMISCAN */
+
+#ifdef CONFIG_NL80211_TESTMODE
+static int brcmf_cfg80211_testmode(struct wiphy *wiphy, void *data, int len)
+{
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct net_device *ndev = cfg->wdev->netdev;
+       struct brcmf_dcmd *dcmd = data;
+       struct sk_buff *reply;
+       int ret;
+
+       ret = brcmf_netlink_dcmd(ndev, dcmd);
+       if (ret == 0) {
+               reply = cfg80211_testmode_alloc_reply_skb(wiphy, sizeof(*dcmd));
+               nla_put(reply, NL80211_ATTR_TESTDATA, sizeof(*dcmd), dcmd);
+               ret = cfg80211_testmode_reply(reply);
+       }
+       return ret;
+}
+#endif
+
+static s32 brcmf_configure_opensecurity(struct net_device *ndev, s32 bssidx)
+{
+       s32 err;
+
+       /* set auth */
+       err = brcmf_dev_intvar_set_bsscfg(ndev, "auth", 0, bssidx);
+       if (err < 0) {
+               WL_ERR("auth error %d\n", err);
+               return err;
+       }
+       /* set wsec */
+       err = brcmf_dev_intvar_set_bsscfg(ndev, "wsec", 0, bssidx);
+       if (err < 0) {
+               WL_ERR("wsec error %d\n", err);
+               return err;
+       }
+       /* set upper-layer auth */
+       err = brcmf_dev_intvar_set_bsscfg(ndev, "wpa_auth",
+                                         WPA_AUTH_NONE, bssidx);
+       if (err < 0) {
+               WL_ERR("wpa_auth error %d\n", err);
+               return err;
+       }
+
+       return 0;
+}
+
+static bool brcmf_valid_wpa_oui(u8 *oui, bool is_rsn_ie)
+{
+       if (is_rsn_ie)
+               return (memcmp(oui, RSN_OUI, TLV_OUI_LEN) == 0);
+
+       return (memcmp(oui, WPA_OUI, TLV_OUI_LEN) == 0);
+}
+
+static s32
+brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
+                    bool is_rsn_ie, s32 bssidx)
+{
+       u32 auth = 0; /* d11 open authentication */
+       u16 count;
+       s32 err = 0;
+       s32 len = 0;
+       u32 i;
+       u32 wsec;
+       u32 pval = 0;
+       u32 gval = 0;
+       u32 wpa_auth = 0;
+       u32 offset;
+       u8 *data;
+       u16 rsn_cap;
+       u32 wme_bss_disable;
+
+       WL_TRACE("Enter\n");
+       if (wpa_ie == NULL)
+               goto exit;
+
+       len = wpa_ie->len + TLV_HDR_LEN;
+       data = (u8 *)wpa_ie;
+       offset = 0;
+       if (!is_rsn_ie)
+               offset += VS_IE_FIXED_HDR_LEN;
+       offset += WPA_IE_VERSION_LEN;
+
+       /* check for multicast cipher suite */
+       if (offset + WPA_IE_MIN_OUI_LEN > len) {
+               err = -EINVAL;
+               WL_ERR("no multicast cipher suite\n");
+               goto exit;
+       }
+
+       if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
+               err = -EINVAL;
+               WL_ERR("ivalid OUI\n");
+               goto exit;
+       }
+       offset += TLV_OUI_LEN;
+
+       /* pick up multicast cipher */
+       switch (data[offset]) {
+       case WPA_CIPHER_NONE:
+               gval = 0;
+               break;
+       case WPA_CIPHER_WEP_40:
+       case WPA_CIPHER_WEP_104:
+               gval = WEP_ENABLED;
+               break;
+       case WPA_CIPHER_TKIP:
+               gval = TKIP_ENABLED;
+               break;
+       case WPA_CIPHER_AES_CCM:
+               gval = AES_ENABLED;
+               break;
+       default:
+               err = -EINVAL;
+               WL_ERR("Invalid multi cast cipher info\n");
+               goto exit;
+       }
+
+       offset++;
+       /* walk thru unicast cipher list and pick up what we recognize */
+       count = data[offset] + (data[offset + 1] << 8);
+       offset += WPA_IE_SUITE_COUNT_LEN;
+       /* Check for unicast suite(s) */
+       if (offset + (WPA_IE_MIN_OUI_LEN * count) > len) {
+               err = -EINVAL;
+               WL_ERR("no unicast cipher suite\n");
+               goto exit;
+       }
+       for (i = 0; i < count; i++) {
+               if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
+                       err = -EINVAL;
+                       WL_ERR("ivalid OUI\n");
+                       goto exit;
+               }
+               offset += TLV_OUI_LEN;
+               switch (data[offset]) {
+               case WPA_CIPHER_NONE:
+                       break;
+               case WPA_CIPHER_WEP_40:
+               case WPA_CIPHER_WEP_104:
+                       pval |= WEP_ENABLED;
+                       break;
+               case WPA_CIPHER_TKIP:
+                       pval |= TKIP_ENABLED;
+                       break;
+               case WPA_CIPHER_AES_CCM:
+                       pval |= AES_ENABLED;
+                       break;
+               default:
+                       WL_ERR("Ivalid unicast security info\n");
+               }
+               offset++;
+       }
+       /* walk thru auth management suite list and pick up what we recognize */
+       count = data[offset] + (data[offset + 1] << 8);
+       offset += WPA_IE_SUITE_COUNT_LEN;
+       /* Check for auth key management suite(s) */
+       if (offset + (WPA_IE_MIN_OUI_LEN * count) > len) {
+               err = -EINVAL;
+               WL_ERR("no auth key mgmt suite\n");
+               goto exit;
+       }
+       for (i = 0; i < count; i++) {
+               if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
+                       err = -EINVAL;
+                       WL_ERR("ivalid OUI\n");
+                       goto exit;
+               }
+               offset += TLV_OUI_LEN;
+               switch (data[offset]) {
+               case RSN_AKM_NONE:
+                       WL_TRACE("RSN_AKM_NONE\n");
+                       wpa_auth |= WPA_AUTH_NONE;
+                       break;
+               case RSN_AKM_UNSPECIFIED:
+                       WL_TRACE("RSN_AKM_UNSPECIFIED\n");
+                       is_rsn_ie ? (wpa_auth |= WPA2_AUTH_UNSPECIFIED) :
+                                   (wpa_auth |= WPA_AUTH_UNSPECIFIED);
+                       break;
+               case RSN_AKM_PSK:
+                       WL_TRACE("RSN_AKM_PSK\n");
+                       is_rsn_ie ? (wpa_auth |= WPA2_AUTH_PSK) :
+                                   (wpa_auth |= WPA_AUTH_PSK);
+                       break;
+               default:
+                       WL_ERR("Ivalid key mgmt info\n");
+               }
+               offset++;
+       }
 
-static s32 brcmf_cfg80211_resume(struct wiphy *wiphy)
-{
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
-
-       /*
-        * Check for WL_STATUS_READY before any function call which
-        * could result is bus access. Don't block the resume for
-        * any driver error conditions
-        */
-       WL_TRACE("Enter\n");
+       if (is_rsn_ie) {
+               wme_bss_disable = 1;
+               if ((offset + RSN_CAP_LEN) <= len) {
+                       rsn_cap = data[offset] + (data[offset + 1] << 8);
+                       if (rsn_cap & RSN_CAP_PTK_REPLAY_CNTR_MASK)
+                               wme_bss_disable = 0;
+               }
+               /* set wme_bss_disable to sync RSN Capabilities */
+               err = brcmf_dev_intvar_set_bsscfg(ndev, "wme_bss_disable",
+                                                 wme_bss_disable, bssidx);
+               if (err < 0) {
+                       WL_ERR("wme_bss_disable error %d\n", err);
+                       goto exit;
+               }
+       }
+       /* FOR WPS , set SES_OW_ENABLED */
+       wsec = (pval | gval | SES_OW_ENABLED);
 
-       if (test_bit(WL_STATUS_READY, &cfg_priv->status))
-               brcmf_invoke_iscan(wiphy_to_cfg(wiphy));
+       /* set auth */
+       err = brcmf_dev_intvar_set_bsscfg(ndev, "auth", auth, bssidx);
+       if (err < 0) {
+               WL_ERR("auth error %d\n", err);
+               goto exit;
+       }
+       /* set wsec */
+       err = brcmf_dev_intvar_set_bsscfg(ndev, "wsec", wsec, bssidx);
+       if (err < 0) {
+               WL_ERR("wsec error %d\n", err);
+               goto exit;
+       }
+       /* set upper-layer auth */
+       err = brcmf_dev_intvar_set_bsscfg(ndev, "wpa_auth", wpa_auth, bssidx);
+       if (err < 0) {
+               WL_ERR("wpa_auth error %d\n", err);
+               goto exit;
+       }
 
-       WL_TRACE("Exit\n");
-       return 0;
+exit:
+       return err;
 }
 
-static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
-                                 struct cfg80211_wowlan *wow)
+static s32
+brcmf_parse_vndr_ies(u8 *vndr_ie_buf, u32 vndr_ie_len,
+                    struct parsed_vndr_ies *vndr_ies)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
-       struct net_device *ndev = cfg_to_ndev(cfg_priv);
-
-       WL_TRACE("Enter\n");
+       s32 err = 0;
+       struct brcmf_vs_tlv *vndrie;
+       struct brcmf_tlv *ie;
+       struct parsed_vndr_ie_info *parsed_info;
+       s32 remaining_len;
+
+       remaining_len = (s32)vndr_ie_len;
+       memset(vndr_ies, 0, sizeof(*vndr_ies));
+
+       ie = (struct brcmf_tlv *)vndr_ie_buf;
+       while (ie) {
+               if (ie->id != WLAN_EID_VENDOR_SPECIFIC)
+                       goto next;
+               vndrie = (struct brcmf_vs_tlv *)ie;
+               /* len should be bigger than OUI length + one */
+               if (vndrie->len < (VS_IE_FIXED_HDR_LEN - TLV_HDR_LEN + 1)) {
+                       WL_ERR("invalid vndr ie. length is too small %d\n",
+                               vndrie->len);
+                       goto next;
+               }
+               /* if wpa or wme ie, do not add ie */
+               if (!memcmp(vndrie->oui, (u8 *)WPA_OUI, TLV_OUI_LEN) &&
+                   ((vndrie->oui_type == WPA_OUI_TYPE) ||
+                   (vndrie->oui_type == WME_OUI_TYPE))) {
+                       WL_TRACE("Found WPA/WME oui. Do not add it\n");
+                       goto next;
+               }
 
-       /*
-        * Check for WL_STATUS_READY before any function call which
-        * could result is bus access. Don't block the suspend for
-        * any driver error conditions
-        */
+               parsed_info = &vndr_ies->ie_info[vndr_ies->count];
 
-       /*
-        * While going to suspend if associated with AP disassociate
-        * from AP to save power while system is in suspended state
-        */
-       if ((test_bit(WL_STATUS_CONNECTED, &cfg_priv->status) ||
-            test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) &&
-            test_bit(WL_STATUS_READY, &cfg_priv->status)) {
-               WL_INFO("Disassociating from AP"
-                       " while entering suspend state\n");
-               brcmf_link_down(cfg_priv);
+               /* save vndr ie information */
+               parsed_info->ie_ptr = (char *)vndrie;
+               parsed_info->ie_len = vndrie->len + TLV_HDR_LEN;
+               memcpy(&parsed_info->vndrie, vndrie, sizeof(*vndrie));
 
-               /*
-                * Make sure WPA_Supplicant receives all the event
-                * generated due to DISASSOC call to the fw to keep
-                * the state fw and WPA_Supplicant state consistent
-                */
-               brcmf_delay(500);
-       }
+               vndr_ies->count++;
 
-       set_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status);
-       if (test_bit(WL_STATUS_READY, &cfg_priv->status))
-               brcmf_term_iscan(cfg_priv);
+               WL_TRACE("** OUI %02x %02x %02x, type 0x%02x\n",
+                        parsed_info->vndrie.oui[0],
+                        parsed_info->vndrie.oui[1],
+                        parsed_info->vndrie.oui[2],
+                        parsed_info->vndrie.oui_type);
 
-       if (cfg_priv->scan_request) {
-               /* Indidate scan abort to cfg80211 layer */
-               WL_INFO("Terminating scan in progress\n");
-               cfg80211_scan_done(cfg_priv->scan_request, true);
-               cfg_priv->scan_request = NULL;
+               if (vndr_ies->count >= MAX_VNDR_IE_NUMBER)
+                       break;
+next:
+               remaining_len -= ie->len;
+               if (remaining_len <= 2)
+                       ie = NULL;
+               else
+                       ie = (struct brcmf_tlv *)(((u8 *)ie) + ie->len);
        }
-       clear_bit(WL_STATUS_SCANNING, &cfg_priv->status);
-       clear_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status);
+       return err;
+}
 
-       /* Turn off watchdog timer */
-       if (test_bit(WL_STATUS_READY, &cfg_priv->status)) {
-               WL_INFO("Enable MPC\n");
-               brcmf_set_mpc(ndev, 1);
-       }
+static u32
+brcmf_vndr_ie(u8 *iebuf, s32 pktflag, u8 *ie_ptr, u32 ie_len, s8 *add_del_cmd)
+{
 
-       WL_TRACE("Exit\n");
+       __le32 iecount_le;
+       __le32 pktflag_le;
 
-       return 0;
-}
+       strncpy(iebuf, add_del_cmd, VNDR_IE_CMD_LEN - 1);
+       iebuf[VNDR_IE_CMD_LEN - 1] = '\0';
 
-static __used s32
-brcmf_dev_bufvar_set(struct net_device *ndev, s8 *name, s8 *buf, s32 len)
-{
-       struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
-       u32 buflen;
+       iecount_le = cpu_to_le32(1);
+       memcpy(&iebuf[VNDR_IE_COUNT_OFFSET], &iecount_le, sizeof(iecount_le));
 
-       buflen = brcmf_c_mkiovar(name, buf, len, cfg_priv->dcmd_buf,
-                              WL_DCMD_LEN_MAX);
-       BUG_ON(!buflen);
+       pktflag_le = cpu_to_le32(pktflag);
+       memcpy(&iebuf[VNDR_IE_PKTFLAG_OFFSET], &pktflag_le, sizeof(pktflag_le));
 
-       return brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, cfg_priv->dcmd_buf,
-                              buflen);
+       memcpy(&iebuf[VNDR_IE_VSIE_OFFSET], ie_ptr, ie_len);
+
+       return ie_len + VNDR_IE_HDR_SIZE;
 }
 
-static s32
-brcmf_dev_bufvar_get(struct net_device *ndev, s8 *name, s8 *buf,
-                 s32 buf_len)
+s32
+brcmf_set_management_ie(struct brcmf_cfg80211_info *cfg,
+                       struct net_device *ndev, s32 bssidx, s32 pktflag,
+                       u8 *vndr_ie_buf, u32 vndr_ie_len)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
-       u32 len;
        s32 err = 0;
+       u8  *iovar_ie_buf;
+       u8  *curr_ie_buf;
+       u8  *mgmt_ie_buf = NULL;
+       u32 mgmt_ie_buf_len = 0;
+       u32 *mgmt_ie_len = 0;
+       u32 del_add_ie_buf_len = 0;
+       u32 total_ie_buf_len = 0;
+       u32 parsed_ie_buf_len = 0;
+       struct parsed_vndr_ies old_vndr_ies;
+       struct parsed_vndr_ies new_vndr_ies;
+       struct parsed_vndr_ie_info *vndrie_info;
+       s32 i;
+       u8 *ptr;
+       u32 remained_buf_len;
+
+       WL_TRACE("bssidx %d, pktflag : 0x%02X\n", bssidx, pktflag);
+       iovar_ie_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
+       if (!iovar_ie_buf)
+               return -ENOMEM;
+       curr_ie_buf = iovar_ie_buf;
+       if (test_bit(WL_STATUS_AP_CREATING, &cfg->status) ||
+           test_bit(WL_STATUS_AP_CREATED, &cfg->status)) {
+               switch (pktflag) {
+               case VNDR_IE_PRBRSP_FLAG:
+                       mgmt_ie_buf = cfg->ap_info->probe_res_ie;
+                       mgmt_ie_len = &cfg->ap_info->probe_res_ie_len;
+                       mgmt_ie_buf_len =
+                               sizeof(cfg->ap_info->probe_res_ie);
+                       break;
+               case VNDR_IE_BEACON_FLAG:
+                       mgmt_ie_buf = cfg->ap_info->beacon_ie;
+                       mgmt_ie_len = &cfg->ap_info->beacon_ie_len;
+                       mgmt_ie_buf_len = sizeof(cfg->ap_info->beacon_ie);
+                       break;
+               default:
+                       err = -EPERM;
+                       WL_ERR("not suitable type\n");
+                       goto exit;
+               }
+               bssidx = 0;
+       } else {
+               err = -EPERM;
+               WL_ERR("not suitable type\n");
+               goto exit;
+       }
 
-       len = brcmf_c_mkiovar(name, NULL, 0, cfg_priv->dcmd_buf,
-                           WL_DCMD_LEN_MAX);
-       BUG_ON(!len);
-       err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, cfg_priv->dcmd_buf,
-                             WL_DCMD_LEN_MAX);
-       if (err) {
-               WL_ERR("error (%d)\n", err);
-               return err;
+       if (vndr_ie_len > mgmt_ie_buf_len) {
+               err = -ENOMEM;
+               WL_ERR("extra IE size too big\n");
+               goto exit;
+       }
+
+       /* parse and save new vndr_ie in curr_ie_buff before comparing it */
+       if (vndr_ie_buf && vndr_ie_len && curr_ie_buf) {
+               ptr = curr_ie_buf;
+               brcmf_parse_vndr_ies(vndr_ie_buf, vndr_ie_len, &new_vndr_ies);
+               for (i = 0; i < new_vndr_ies.count; i++) {
+                       vndrie_info = &new_vndr_ies.ie_info[i];
+                       memcpy(ptr + parsed_ie_buf_len, vndrie_info->ie_ptr,
+                              vndrie_info->ie_len);
+                       parsed_ie_buf_len += vndrie_info->ie_len;
+               }
        }
-       memcpy(buf, cfg_priv->dcmd_buf, buf_len);
 
-       return err;
-}
+       if (mgmt_ie_buf != NULL) {
+               if (parsed_ie_buf_len && (parsed_ie_buf_len == *mgmt_ie_len) &&
+                   (memcmp(mgmt_ie_buf, curr_ie_buf,
+                           parsed_ie_buf_len) == 0)) {
+                       WL_TRACE("Previous mgmt IE is equals to current IE");
+                       goto exit;
+               }
 
-static __used s32
-brcmf_update_pmklist(struct net_device *ndev,
-                    struct brcmf_cfg80211_pmk_list *pmk_list, s32 err)
-{
-       int i, j;
-       int pmkid_len;
+               /* parse old vndr_ie */
+               brcmf_parse_vndr_ies(mgmt_ie_buf, *mgmt_ie_len, &old_vndr_ies);
+
+               /* make a command to delete old ie */
+               for (i = 0; i < old_vndr_ies.count; i++) {
+                       vndrie_info = &old_vndr_ies.ie_info[i];
+
+                       WL_TRACE("DEL ID : %d, Len: %d , OUI:%02x:%02x:%02x\n",
+                                vndrie_info->vndrie.id,
+                                vndrie_info->vndrie.len,
+                                vndrie_info->vndrie.oui[0],
+                                vndrie_info->vndrie.oui[1],
+                                vndrie_info->vndrie.oui[2]);
+
+                       del_add_ie_buf_len = brcmf_vndr_ie(curr_ie_buf, pktflag,
+                                                          vndrie_info->ie_ptr,
+                                                          vndrie_info->ie_len,
+                                                          "del");
+                       curr_ie_buf += del_add_ie_buf_len;
+                       total_ie_buf_len += del_add_ie_buf_len;
+               }
+       }
 
-       pmkid_len = le32_to_cpu(pmk_list->pmkids.npmkid);
+       *mgmt_ie_len = 0;
+       /* Add if there is any extra IE */
+       if (mgmt_ie_buf && parsed_ie_buf_len) {
+               ptr = mgmt_ie_buf;
+
+               remained_buf_len = mgmt_ie_buf_len;
+
+               /* make a command to add new ie */
+               for (i = 0; i < new_vndr_ies.count; i++) {
+                       vndrie_info = &new_vndr_ies.ie_info[i];
+
+                       WL_TRACE("ADDED ID : %d, Len: %d, OUI:%02x:%02x:%02x\n",
+                                vndrie_info->vndrie.id,
+                                vndrie_info->vndrie.len,
+                                vndrie_info->vndrie.oui[0],
+                                vndrie_info->vndrie.oui[1],
+                                vndrie_info->vndrie.oui[2]);
+
+                       del_add_ie_buf_len = brcmf_vndr_ie(curr_ie_buf, pktflag,
+                                                          vndrie_info->ie_ptr,
+                                                          vndrie_info->ie_len,
+                                                          "add");
+                       /* verify remained buf size before copy data */
+                       remained_buf_len -= vndrie_info->ie_len;
+                       if (remained_buf_len < 0) {
+                               WL_ERR("no space in mgmt_ie_buf: len left %d",
+                                       remained_buf_len);
+                               break;
+                       }
 
-       WL_CONN("No of elements %d\n", pmkid_len);
-       for (i = 0; i < pmkid_len; i++) {
-               WL_CONN("PMKID[%d]: %pM =\n", i,
-                       &pmk_list->pmkids.pmkid[i].BSSID);
-               for (j = 0; j < WLAN_PMKID_LEN; j++)
-                       WL_CONN("%02x\n", pmk_list->pmkids.pmkid[i].PMKID[j]);
-       }
+                       /* save the parsed IE in wl struct */
+                       memcpy(ptr + (*mgmt_ie_len), vndrie_info->ie_ptr,
+                              vndrie_info->ie_len);
+                       *mgmt_ie_len += vndrie_info->ie_len;
 
-       if (!err)
-               brcmf_dev_bufvar_set(ndev, "pmkid_info", (char *)pmk_list,
-                                       sizeof(*pmk_list));
+                       curr_ie_buf += del_add_ie_buf_len;
+                       total_ie_buf_len += del_add_ie_buf_len;
+               }
+       }
+       if (total_ie_buf_len) {
+               err  = brcmf_dev_iovar_setbuf_bsscfg(ndev, "vndr_ie",
+                                                    iovar_ie_buf,
+                                                    total_ie_buf_len,
+                                                    cfg->extra_buf,
+                                                    WL_EXTRA_BUF_MAX, bssidx);
+               if (err)
+                       WL_ERR("vndr ie set error : %d\n", err);
+       }
 
+exit:
+       kfree(iovar_ie_buf);
        return err;
 }
 
 static s32
-brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev,
-                        struct cfg80211_pmksa *pmksa)
+brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
+                       struct cfg80211_ap_settings *settings)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
-       struct pmkid_list *pmkids = &cfg_priv->pmk_list->pmkids;
-       s32 err = 0;
-       int i;
-       int pmkid_len;
+       s32 ie_offset;
+       struct brcmf_tlv *ssid_ie;
+       struct brcmf_ssid_le ssid_le;
+       s32 ioctl_value;
+       s32 err = -EPERM;
+       struct brcmf_tlv *rsn_ie;
+       struct brcmf_vs_tlv *wpa_ie;
+       struct brcmf_join_params join_params;
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       s32 bssidx = 0;
+
+       WL_TRACE("channel_type=%d, beacon_interval=%d, dtim_period=%d,\n",
+                settings->channel_type, settings->beacon_interval,
+                settings->dtim_period);
+       WL_TRACE("ssid=%s(%d), auth_type=%d, inactivity_timeout=%d\n",
+                settings->ssid, settings->ssid_len, settings->auth_type,
+                settings->inactivity_timeout);
+
+       if (!test_bit(WL_STATUS_AP_CREATING, &cfg->status)) {
+               WL_ERR("Not in AP creation mode\n");
+               return -EPERM;
+       }
+
+       memset(&ssid_le, 0, sizeof(ssid_le));
+       if (settings->ssid == NULL || settings->ssid_len == 0) {
+               ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
+               ssid_ie = brcmf_parse_tlvs(
+                               (u8 *)&settings->beacon.head[ie_offset],
+                               settings->beacon.head_len - ie_offset,
+                               WLAN_EID_SSID);
+               if (!ssid_ie)
+                       return -EINVAL;
 
-       WL_TRACE("Enter\n");
-       if (!check_sys_up(wiphy))
-               return -EIO;
+               memcpy(ssid_le.SSID, ssid_ie->data, ssid_ie->len);
+               ssid_le.SSID_len = cpu_to_le32(ssid_ie->len);
+               WL_TRACE("SSID is (%s) in Head\n", ssid_le.SSID);
+       } else {
+               memcpy(ssid_le.SSID, settings->ssid, settings->ssid_len);
+               ssid_le.SSID_len = cpu_to_le32((u32)settings->ssid_len);
+       }
 
-       pmkid_len = le32_to_cpu(pmkids->npmkid);
-       for (i = 0; i < pmkid_len; i++)
-               if (!memcmp(pmksa->bssid, pmkids->pmkid[i].BSSID, ETH_ALEN))
-                       break;
-       if (i < WL_NUM_PMKIDS_MAX) {
-               memcpy(pmkids->pmkid[i].BSSID, pmksa->bssid, ETH_ALEN);
-               memcpy(pmkids->pmkid[i].PMKID, pmksa->pmkid, WLAN_PMKID_LEN);
-               if (i == pmkid_len) {
-                       pmkid_len++;
-                       pmkids->npmkid = cpu_to_le32(pmkid_len);
+       brcmf_set_mpc(ndev, 0);
+       ioctl_value = 1;
+       err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_DOWN, &ioctl_value);
+       if (err < 0) {
+               WL_ERR("BRCMF_C_DOWN error %d\n", err);
+               goto exit;
+       }
+       ioctl_value = 1;
+       err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &ioctl_value);
+       if (err < 0) {
+               WL_ERR("SET INFRA error %d\n", err);
+               goto exit;
+       }
+       ioctl_value = 1;
+       err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AP, &ioctl_value);
+       if (err < 0) {
+               WL_ERR("setting AP mode failed %d\n", err);
+               goto exit;
+       }
+
+       /* find the RSN_IE */
+       rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
+                                 settings->beacon.tail_len, WLAN_EID_RSN);
+
+       /* find the WPA_IE */
+       wpa_ie = brcmf_find_wpaie((u8 *)settings->beacon.tail,
+                                 settings->beacon.tail_len);
+
+       kfree(cfg->ap_info->rsn_ie);
+       cfg->ap_info->rsn_ie = NULL;
+       kfree(cfg->ap_info->wpa_ie);
+       cfg->ap_info->wpa_ie = NULL;
+
+       if ((wpa_ie != NULL || rsn_ie != NULL)) {
+               WL_TRACE("WPA(2) IE is found\n");
+               if (wpa_ie != NULL) {
+                       /* WPA IE */
+                       err = brcmf_configure_wpaie(ndev, wpa_ie, false,
+                                                   bssidx);
+                       if (err < 0)
+                               goto exit;
+                       cfg->ap_info->wpa_ie = kmemdup(wpa_ie,
+                                                           wpa_ie->len +
+                                                           TLV_HDR_LEN,
+                                                           GFP_KERNEL);
+               } else {
+                       /* RSN IE */
+                       err = brcmf_configure_wpaie(ndev,
+                               (struct brcmf_vs_tlv *)rsn_ie, true, bssidx);
+                       if (err < 0)
+                               goto exit;
+                       cfg->ap_info->rsn_ie = kmemdup(rsn_ie,
+                                                           rsn_ie->len +
+                                                           TLV_HDR_LEN,
+                                                           GFP_KERNEL);
                }
-       } else
-               err = -EINVAL;
+               cfg->ap_info->security_mode = true;
+       } else {
+               WL_TRACE("No WPA(2) IEs found\n");
+               brcmf_configure_opensecurity(ndev, bssidx);
+               cfg->ap_info->security_mode = false;
+       }
+       /* Set Beacon IEs to FW */
+       err = brcmf_set_management_ie(cfg, ndev, bssidx,
+                                     VNDR_IE_BEACON_FLAG,
+                                     (u8 *)settings->beacon.tail,
+                                     settings->beacon.tail_len);
+       if (err)
+               WL_ERR("Set Beacon IE Failed\n");
+       else
+               WL_TRACE("Applied Vndr IEs for Beacon\n");
 
-       WL_CONN("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n",
-               pmkids->pmkid[pmkid_len].BSSID);
-       for (i = 0; i < WLAN_PMKID_LEN; i++)
-               WL_CONN("%02x\n", pmkids->pmkid[pmkid_len].PMKID[i]);
+       /* Set Probe Response IEs to FW */
+       err = brcmf_set_management_ie(cfg, ndev, bssidx,
+                                     VNDR_IE_PRBRSP_FLAG,
+                                     (u8 *)settings->beacon.proberesp_ies,
+                                     settings->beacon.proberesp_ies_len);
+       if (err)
+               WL_ERR("Set Probe Resp IE Failed\n");
+       else
+               WL_TRACE("Applied Vndr IEs for Probe Resp\n");
+
+       if (settings->beacon_interval) {
+               ioctl_value = settings->beacon_interval;
+               err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_BCNPRD,
+                                         &ioctl_value);
+               if (err < 0) {
+                       WL_ERR("Beacon Interval Set Error, %d\n", err);
+                       goto exit;
+               }
+       }
+       if (settings->dtim_period) {
+               ioctl_value = settings->dtim_period;
+               err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_DTIMPRD,
+                                         &ioctl_value);
+               if (err < 0) {
+                       WL_ERR("DTIM Interval Set Error, %d\n", err);
+                       goto exit;
+               }
+       }
+       ioctl_value = 1;
+       err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_UP, &ioctl_value);
+       if (err < 0) {
+               WL_ERR("BRCMF_C_UP error (%d)\n", err);
+               goto exit;
+       }
 
-       err = brcmf_update_pmklist(ndev, cfg_priv->pmk_list, err);
+       memset(&join_params, 0, sizeof(join_params));
+       /* join parameters starts with ssid */
+       memcpy(&join_params.ssid_le, &ssid_le, sizeof(ssid_le));
+       /* create softap */
+       err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID, &join_params,
+                             sizeof(join_params));
+       if (err < 0) {
+               WL_ERR("SET SSID error (%d)\n", err);
+               goto exit;
+       }
+       clear_bit(WL_STATUS_AP_CREATING, &cfg->status);
+       set_bit(WL_STATUS_AP_CREATED, &cfg->status);
 
-       WL_TRACE("Exit\n");
+exit:
+       if (err)
+               brcmf_set_mpc(ndev, 1);
        return err;
 }
 
-static s32
-brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev,
-                     struct cfg80211_pmksa *pmksa)
+static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
-       struct pmkid_list pmkid;
-       s32 err = 0;
-       int i, pmkid_len;
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       s32 ioctl_value;
+       s32 err = -EPERM;
 
        WL_TRACE("Enter\n");
-       if (!check_sys_up(wiphy))
-               return -EIO;
-
-       memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETH_ALEN);
-       memcpy(&pmkid.pmkid[0].PMKID, pmksa->pmkid, WLAN_PMKID_LEN);
-
-       WL_CONN("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n",
-              &pmkid.pmkid[0].BSSID);
-       for (i = 0; i < WLAN_PMKID_LEN; i++)
-               WL_CONN("%02x\n", pmkid.pmkid[0].PMKID[i]);
-
-       pmkid_len = le32_to_cpu(cfg_priv->pmk_list->pmkids.npmkid);
-       for (i = 0; i < pmkid_len; i++)
-               if (!memcmp
-                   (pmksa->bssid, &cfg_priv->pmk_list->pmkids.pmkid[i].BSSID,
-                    ETH_ALEN))
-                       break;
 
-       if ((pmkid_len > 0)
-           && (i < pmkid_len)) {
-               memset(&cfg_priv->pmk_list->pmkids.pmkid[i], 0,
-                      sizeof(struct pmkid));
-               for (; i < (pmkid_len - 1); i++) {
-                       memcpy(&cfg_priv->pmk_list->pmkids.pmkid[i].BSSID,
-                              &cfg_priv->pmk_list->pmkids.pmkid[i + 1].BSSID,
-                              ETH_ALEN);
-                       memcpy(&cfg_priv->pmk_list->pmkids.pmkid[i].PMKID,
-                              &cfg_priv->pmk_list->pmkids.pmkid[i + 1].PMKID,
-                              WLAN_PMKID_LEN);
+       if (cfg->conf->mode == WL_MODE_AP) {
+               /* Due to most likely deauths outstanding we sleep */
+               /* first to make sure they get processed by fw. */
+               msleep(400);
+               ioctl_value = 0;
+               err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AP, &ioctl_value);
+               if (err < 0) {
+                       WL_ERR("setting AP mode failed %d\n", err);
+                       goto exit;
                }
-               cfg_priv->pmk_list->pmkids.npmkid = cpu_to_le32(pmkid_len - 1);
-       } else
-               err = -EINVAL;
-
-       err = brcmf_update_pmklist(ndev, cfg_priv->pmk_list, err);
-
-       WL_TRACE("Exit\n");
+               ioctl_value = 0;
+               err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_UP, &ioctl_value);
+               if (err < 0) {
+                       WL_ERR("BRCMF_C_UP error %d\n", err);
+                       goto exit;
+               }
+               brcmf_set_mpc(ndev, 1);
+               clear_bit(WL_STATUS_AP_CREATING, &cfg->status);
+               clear_bit(WL_STATUS_AP_CREATED, &cfg->status);
+       }
+exit:
        return err;
-
 }
 
-static s32
-brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *ndev)
+static int
+brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
+                          u8 *mac)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
-       s32 err = 0;
+       struct brcmf_scb_val_le scbval;
+       s32 err;
+
+       if (!mac)
+               return -EFAULT;
+
+       WL_TRACE("Enter %pM\n", mac);
 
-       WL_TRACE("Enter\n");
        if (!check_sys_up(wiphy))
                return -EIO;
 
-       memset(cfg_priv->pmk_list, 0, sizeof(*cfg_priv->pmk_list));
-       err = brcmf_update_pmklist(ndev, cfg_priv->pmk_list, err);
+       memcpy(&scbval.ea, mac, ETH_ALEN);
+       scbval.val = cpu_to_le32(WLAN_REASON_DEAUTH_LEAVING);
+       err = brcmf_exec_dcmd(ndev, BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON,
+                             &scbval, sizeof(scbval));
+       if (err)
+               WL_ERR("SCB_DEAUTHENTICATE_FOR_REASON failed %d\n", err);
 
        WL_TRACE("Exit\n");
        return err;
-
 }
 
 static struct cfg80211_ops wl_cfg80211_ops = {
@@ -2748,7 +4369,18 @@ static struct cfg80211_ops wl_cfg80211_ops = {
        .resume = brcmf_cfg80211_resume,
        .set_pmksa = brcmf_cfg80211_set_pmksa,
        .del_pmksa = brcmf_cfg80211_del_pmksa,
-       .flush_pmksa = brcmf_cfg80211_flush_pmksa
+       .flush_pmksa = brcmf_cfg80211_flush_pmksa,
+       .start_ap = brcmf_cfg80211_start_ap,
+       .stop_ap = brcmf_cfg80211_stop_ap,
+       .del_station = brcmf_cfg80211_del_station,
+#ifndef CONFIG_BRCMISCAN
+       /* scheduled scan need e-scan, which is mutual exclusive with i-scan */
+       .sched_scan_start = brcmf_cfg80211_sched_scan_start,
+       .sched_scan_stop = brcmf_cfg80211_sched_scan_stop,
+#endif
+#ifdef CONFIG_NL80211_TESTMODE
+       .testmode_cmd = brcmf_cfg80211_testmode
+#endif
 };
 
 static s32 brcmf_mode_to_nl80211_iftype(s32 mode)
@@ -2767,8 +4399,18 @@ static s32 brcmf_mode_to_nl80211_iftype(s32 mode)
        return err;
 }
 
-static struct wireless_dev *brcmf_alloc_wdev(s32 sizeof_iface,
-                                         struct device *ndev)
+static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
+{
+#ifndef CONFIG_BRCMFISCAN
+       /* scheduled scan settings */
+       wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
+       wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
+       wiphy->max_sched_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
+       wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+#endif
+}
+
+static struct wireless_dev *brcmf_alloc_wdev(struct device *ndev)
 {
        struct wireless_dev *wdev;
        s32 err = 0;
@@ -2777,9 +4419,8 @@ static struct wireless_dev *brcmf_alloc_wdev(s32 sizeof_iface,
        if (!wdev)
                return ERR_PTR(-ENOMEM);
 
-       wdev->wiphy =
-           wiphy_new(&wl_cfg80211_ops,
-                     sizeof(struct brcmf_cfg80211_priv) + sizeof_iface);
+       wdev->wiphy = wiphy_new(&wl_cfg80211_ops,
+                               sizeof(struct brcmf_cfg80211_info));
        if (!wdev->wiphy) {
                WL_ERR("Could not allocate wiphy device\n");
                err = -ENOMEM;
@@ -2788,8 +4429,9 @@ static struct wireless_dev *brcmf_alloc_wdev(s32 sizeof_iface,
        set_wiphy_dev(wdev->wiphy, ndev);
        wdev->wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
        wdev->wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
-       wdev->wiphy->interface_modes =
-           BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
+       wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+                                      BIT(NL80211_IFTYPE_ADHOC) |
+                                      BIT(NL80211_IFTYPE_AP);
        wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
        wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a;    /* Set
                                                * it as 11a by default.
@@ -2805,6 +4447,7 @@ static struct wireless_dev *brcmf_alloc_wdev(s32 sizeof_iface,
                                                                 * save mode
                                                                 * by default
                                                                 */
+       brcmf_wiphy_pno_params(wdev->wiphy);
        err = wiphy_register(wdev->wiphy);
        if (err < 0) {
                WL_ERR("Could not register wiphy device (%d)\n", err);
@@ -2821,9 +4464,9 @@ wiphy_new_out:
        return ERR_PTR(err);
 }
 
-static void brcmf_free_wdev(struct brcmf_cfg80211_priv *cfg_priv)
+static void brcmf_free_wdev(struct brcmf_cfg80211_info *cfg)
 {
-       struct wireless_dev *wdev = cfg_priv->wdev;
+       struct wireless_dev *wdev = cfg->wdev;
 
        if (!wdev) {
                WL_ERR("wdev is invalid\n");
@@ -2832,10 +4475,10 @@ static void brcmf_free_wdev(struct brcmf_cfg80211_priv *cfg_priv)
        wiphy_unregister(wdev->wiphy);
        wiphy_free(wdev->wiphy);
        kfree(wdev);
-       cfg_priv->wdev = NULL;
+       cfg->wdev = NULL;
 }
 
-static bool brcmf_is_linkup(struct brcmf_cfg80211_priv *cfg_priv,
+static bool brcmf_is_linkup(struct brcmf_cfg80211_info *cfg,
                            const struct brcmf_event_msg *e)
 {
        u32 event = be32_to_cpu(e->event_type);
@@ -2843,14 +4486,14 @@ static bool brcmf_is_linkup(struct brcmf_cfg80211_priv *cfg_priv,
 
        if (event == BRCMF_E_SET_SSID && status == BRCMF_E_STATUS_SUCCESS) {
                WL_CONN("Processing set ssid\n");
-               cfg_priv->link_up = true;
+               cfg->link_up = true;
                return true;
        }
 
        return false;
 }
 
-static bool brcmf_is_linkdown(struct brcmf_cfg80211_priv *cfg_priv,
+static bool brcmf_is_linkdown(struct brcmf_cfg80211_info *cfg,
                              const struct brcmf_event_msg *e)
 {
        u32 event = be32_to_cpu(e->event_type);
@@ -2863,7 +4506,7 @@ static bool brcmf_is_linkdown(struct brcmf_cfg80211_priv *cfg_priv,
        return false;
 }
 
-static bool brcmf_is_nonetwork(struct brcmf_cfg80211_priv *cfg_priv,
+static bool brcmf_is_nonetwork(struct brcmf_cfg80211_info *cfg,
                               const struct brcmf_event_msg *e)
 {
        u32 event = be32_to_cpu(e->event_type);
@@ -2884,9 +4527,9 @@ static bool brcmf_is_nonetwork(struct brcmf_cfg80211_priv *cfg_priv,
        return false;
 }
 
-static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
+static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_info *cfg)
 {
-       struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv);
+       struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
 
        kfree(conn_info->req_ie);
        conn_info->req_ie = NULL;
@@ -2896,30 +4539,30 @@ static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
        conn_info->resp_ie_len = 0;
 }
 
-static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg)
 {
-       struct net_device *ndev = cfg_to_ndev(cfg_priv);
+       struct net_device *ndev = cfg_to_ndev(cfg);
        struct brcmf_cfg80211_assoc_ielen_le *assoc_info;
-       struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv);
+       struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
        u32 req_len;
        u32 resp_len;
        s32 err = 0;
 
-       brcmf_clear_assoc_ies(cfg_priv);
+       brcmf_clear_assoc_ies(cfg);
 
-       err = brcmf_dev_bufvar_get(ndev, "assoc_info", cfg_priv->extra_buf,
+       err = brcmf_dev_bufvar_get(ndev, "assoc_info", cfg->extra_buf,
                                WL_ASSOC_INFO_MAX);
        if (err) {
                WL_ERR("could not get assoc info (%d)\n", err);
                return err;
        }
        assoc_info =
-               (struct brcmf_cfg80211_assoc_ielen_le *)cfg_priv->extra_buf;
+               (struct brcmf_cfg80211_assoc_ielen_le *)cfg->extra_buf;
        req_len = le32_to_cpu(assoc_info->req_len);
        resp_len = le32_to_cpu(assoc_info->resp_len);
        if (req_len) {
                err = brcmf_dev_bufvar_get(ndev, "assoc_req_ies",
-                                          cfg_priv->extra_buf,
+                                          cfg->extra_buf,
                                           WL_ASSOC_INFO_MAX);
                if (err) {
                        WL_ERR("could not get assoc req (%d)\n", err);
@@ -2927,7 +4570,7 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
                }
                conn_info->req_ie_len = req_len;
                conn_info->req_ie =
-                   kmemdup(cfg_priv->extra_buf, conn_info->req_ie_len,
+                   kmemdup(cfg->extra_buf, conn_info->req_ie_len,
                            GFP_KERNEL);
        } else {
                conn_info->req_ie_len = 0;
@@ -2935,7 +4578,7 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
        }
        if (resp_len) {
                err = brcmf_dev_bufvar_get(ndev, "assoc_resp_ies",
-                                          cfg_priv->extra_buf,
+                                          cfg->extra_buf,
                                           WL_ASSOC_INFO_MAX);
                if (err) {
                        WL_ERR("could not get assoc resp (%d)\n", err);
@@ -2943,7 +4586,7 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
                }
                conn_info->resp_ie_len = resp_len;
                conn_info->resp_ie =
-                   kmemdup(cfg_priv->extra_buf, conn_info->resp_ie_len,
+                   kmemdup(cfg->extra_buf, conn_info->resp_ie_len,
                            GFP_KERNEL);
        } else {
                conn_info->resp_ie_len = 0;
@@ -2956,12 +4599,13 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
 }
 
 static s32
-brcmf_bss_roaming_done(struct brcmf_cfg80211_priv *cfg_priv,
+brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
                       struct net_device *ndev,
                       const struct brcmf_event_msg *e)
 {
-       struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv);
-       struct wiphy *wiphy = cfg_to_wiphy(cfg_priv);
+       struct brcmf_cfg80211_profile *profile = cfg->profile;
+       struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
+       struct wiphy *wiphy = cfg_to_wiphy(cfg);
        struct brcmf_channel_info_le channel_le;
        struct ieee80211_channel *notify_channel;
        struct ieee80211_supported_band *band;
@@ -2971,9 +4615,9 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_priv *cfg_priv,
 
        WL_TRACE("Enter\n");
 
-       brcmf_get_assoc_ies(cfg_priv);
-       brcmf_update_prof(cfg_priv, NULL, &e->addr, WL_PROF_BSSID);
-       brcmf_update_bss_info(cfg_priv);
+       brcmf_get_assoc_ies(cfg);
+       memcpy(profile->bssid, e->addr, ETH_ALEN);
+       brcmf_update_bss_info(cfg);
 
        brcmf_exec_dcmd(ndev, BRCMF_C_GET_CHANNEL, &channel_le,
                        sizeof(channel_le));
@@ -2989,37 +4633,35 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_priv *cfg_priv,
        freq = ieee80211_channel_to_frequency(target_channel, band->band);
        notify_channel = ieee80211_get_channel(wiphy, freq);
 
-       cfg80211_roamed(ndev, notify_channel,
-                       (u8 *)brcmf_read_prof(cfg_priv, WL_PROF_BSSID),
+       cfg80211_roamed(ndev, notify_channel, (u8 *)profile->bssid,
                        conn_info->req_ie, conn_info->req_ie_len,
                        conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
        WL_CONN("Report roaming result\n");
 
-       set_bit(WL_STATUS_CONNECTED, &cfg_priv->status);
+       set_bit(WL_STATUS_CONNECTED, &cfg->status);
        WL_TRACE("Exit\n");
        return err;
 }
 
 static s32
-brcmf_bss_connect_done(struct brcmf_cfg80211_priv *cfg_priv,
+brcmf_bss_connect_done(struct brcmf_cfg80211_info *cfg,
                       struct net_device *ndev, const struct brcmf_event_msg *e,
                       bool completed)
 {
-       struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv);
+       struct brcmf_cfg80211_profile *profile = cfg->profile;
+       struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
        s32 err = 0;
 
        WL_TRACE("Enter\n");
 
-       if (test_and_clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) {
+       if (test_and_clear_bit(WL_STATUS_CONNECTING, &cfg->status)) {
                if (completed) {
-                       brcmf_get_assoc_ies(cfg_priv);
-                       brcmf_update_prof(cfg_priv, NULL, &e->addr,
-                                         WL_PROF_BSSID);
-                       brcmf_update_bss_info(cfg_priv);
+                       brcmf_get_assoc_ies(cfg);
+                       memcpy(profile->bssid, e->addr, ETH_ALEN);
+                       brcmf_update_bss_info(cfg);
                }
                cfg80211_connect_result(ndev,
-                                       (u8 *)brcmf_read_prof(cfg_priv,
-                                                             WL_PROF_BSSID),
+                                       (u8 *)profile->bssid,
                                        conn_info->req_ie,
                                        conn_info->req_ie_len,
                                        conn_info->resp_ie,
@@ -3028,7 +4670,7 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_priv *cfg_priv,
                                                    WLAN_STATUS_AUTH_TIMEOUT,
                                        GFP_KERNEL);
                if (completed)
-                       set_bit(WL_STATUS_CONNECTED, &cfg_priv->status);
+                       set_bit(WL_STATUS_CONNECTED, &cfg->status);
                WL_CONN("Report connect result - connection %s\n",
                                completed ? "succeeded" : "failed");
        }
@@ -3037,52 +4679,93 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_priv *cfg_priv,
 }
 
 static s32
-brcmf_notify_connect_status(struct brcmf_cfg80211_priv *cfg_priv,
+brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
+                              struct net_device *ndev,
+                              const struct brcmf_event_msg *e, void *data)
+{
+       s32 err = 0;
+       u32 event = be32_to_cpu(e->event_type);
+       u32 reason = be32_to_cpu(e->reason);
+       u32 len = be32_to_cpu(e->datalen);
+       static int generation;
+
+       struct station_info sinfo;
+
+       WL_CONN("event %d, reason %d\n", event, reason);
+       memset(&sinfo, 0, sizeof(sinfo));
+
+       sinfo.filled = 0;
+       if (((event == BRCMF_E_ASSOC_IND) || (event == BRCMF_E_REASSOC_IND)) &&
+           reason == BRCMF_E_STATUS_SUCCESS) {
+               sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
+               if (!data) {
+                       WL_ERR("No IEs present in ASSOC/REASSOC_IND");
+                       return -EINVAL;
+               }
+               sinfo.assoc_req_ies = data;
+               sinfo.assoc_req_ies_len = len;
+               generation++;
+               sinfo.generation = generation;
+               cfg80211_new_sta(ndev, e->addr, &sinfo, GFP_ATOMIC);
+       } else if ((event == BRCMF_E_DISASSOC_IND) ||
+                  (event == BRCMF_E_DEAUTH_IND) ||
+                  (event == BRCMF_E_DEAUTH)) {
+               generation++;
+               sinfo.generation = generation;
+               cfg80211_del_sta(ndev, e->addr, GFP_ATOMIC);
+       }
+       return err;
+}
+
+static s32
+brcmf_notify_connect_status(struct brcmf_cfg80211_info *cfg,
                            struct net_device *ndev,
                            const struct brcmf_event_msg *e, void *data)
 {
+       struct brcmf_cfg80211_profile *profile = cfg->profile;
        s32 err = 0;
 
-       if (brcmf_is_linkup(cfg_priv, e)) {
+       if (cfg->conf->mode == WL_MODE_AP) {
+               err = brcmf_notify_connect_status_ap(cfg, ndev, e, data);
+       } else if (brcmf_is_linkup(cfg, e)) {
                WL_CONN("Linkup\n");
-               if (brcmf_is_ibssmode(cfg_priv)) {
-                       brcmf_update_prof(cfg_priv, NULL, (void *)e->addr,
-                               WL_PROF_BSSID);
-                       wl_inform_ibss(cfg_priv, ndev, e->addr);
+               if (brcmf_is_ibssmode(cfg)) {
+                       memcpy(profile->bssid, e->addr, ETH_ALEN);
+                       wl_inform_ibss(cfg, ndev, e->addr);
                        cfg80211_ibss_joined(ndev, e->addr, GFP_KERNEL);
-                       clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
-                       set_bit(WL_STATUS_CONNECTED, &cfg_priv->status);
+                       clear_bit(WL_STATUS_CONNECTING, &cfg->status);
+                       set_bit(WL_STATUS_CONNECTED, &cfg->status);
                } else
-                       brcmf_bss_connect_done(cfg_priv, ndev, e, true);
-       } else if (brcmf_is_linkdown(cfg_priv, e)) {
+                       brcmf_bss_connect_done(cfg, ndev, e, true);
+       } else if (brcmf_is_linkdown(cfg, e)) {
                WL_CONN("Linkdown\n");
-               if (brcmf_is_ibssmode(cfg_priv)) {
-                       clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
+               if (brcmf_is_ibssmode(cfg)) {
+                       clear_bit(WL_STATUS_CONNECTING, &cfg->status);
                        if (test_and_clear_bit(WL_STATUS_CONNECTED,
-                               &cfg_priv->status))
-                               brcmf_link_down(cfg_priv);
+                               &cfg->status))
+                               brcmf_link_down(cfg);
                } else {
-                       brcmf_bss_connect_done(cfg_priv, ndev, e, false);
+                       brcmf_bss_connect_done(cfg, ndev, e, false);
                        if (test_and_clear_bit(WL_STATUS_CONNECTED,
-                               &cfg_priv->status)) {
+                               &cfg->status)) {
                                cfg80211_disconnected(ndev, 0, NULL, 0,
                                        GFP_KERNEL);
-                               brcmf_link_down(cfg_priv);
+                               brcmf_link_down(cfg);
                        }
                }
-               brcmf_init_prof(cfg_priv->profile);
-       } else if (brcmf_is_nonetwork(cfg_priv, e)) {
-               if (brcmf_is_ibssmode(cfg_priv))
-                       clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
+               brcmf_init_prof(cfg->profile);
+       } else if (brcmf_is_nonetwork(cfg, e)) {
+               if (brcmf_is_ibssmode(cfg))
+                       clear_bit(WL_STATUS_CONNECTING, &cfg->status);
                else
-                       brcmf_bss_connect_done(cfg_priv, ndev, e, false);
+                       brcmf_bss_connect_done(cfg, ndev, e, false);
        }
 
        return err;
 }
 
 static s32
-brcmf_notify_roaming_status(struct brcmf_cfg80211_priv *cfg_priv,
+brcmf_notify_roaming_status(struct brcmf_cfg80211_info *cfg,
                            struct net_device *ndev,
                            const struct brcmf_event_msg *e, void *data)
 {
@@ -3091,17 +4774,17 @@ brcmf_notify_roaming_status(struct brcmf_cfg80211_priv *cfg_priv,
        u32 status = be32_to_cpu(e->status);
 
        if (event == BRCMF_E_ROAM && status == BRCMF_E_STATUS_SUCCESS) {
-               if (test_bit(WL_STATUS_CONNECTED, &cfg_priv->status))
-                       brcmf_bss_roaming_done(cfg_priv, ndev, e);
+               if (test_bit(WL_STATUS_CONNECTED, &cfg->status))
+                       brcmf_bss_roaming_done(cfg, ndev, e);
                else
-                       brcmf_bss_connect_done(cfg_priv, ndev, e, true);
+                       brcmf_bss_connect_done(cfg, ndev, e, true);
        }
 
        return err;
 }
 
 static s32
-brcmf_notify_mic_status(struct brcmf_cfg80211_priv *cfg_priv,
+brcmf_notify_mic_status(struct brcmf_cfg80211_info *cfg,
                        struct net_device *ndev,
                        const struct brcmf_event_msg *e, void *data)
 {
@@ -3120,7 +4803,7 @@ brcmf_notify_mic_status(struct brcmf_cfg80211_priv *cfg_priv,
 }
 
 static s32
-brcmf_notify_scan_status(struct brcmf_cfg80211_priv *cfg_priv,
+brcmf_notify_scan_status(struct brcmf_cfg80211_info *cfg,
                         struct net_device *ndev,
                         const struct brcmf_event_msg *e, void *data)
 {
@@ -3133,12 +4816,12 @@ brcmf_notify_scan_status(struct brcmf_cfg80211_priv *cfg_priv,
 
        WL_TRACE("Enter\n");
 
-       if (cfg_priv->iscan_on && cfg_priv->iscan_kickstart) {
+       if (cfg->iscan_on && cfg->iscan_kickstart) {
                WL_TRACE("Exit\n");
-               return brcmf_wakeup_iscan(cfg_to_iscan(cfg_priv));
+               return brcmf_wakeup_iscan(cfg_to_iscan(cfg));
        }
 
-       if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg_priv->status)) {
+       if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg->status)) {
                WL_ERR("Scan complete while device not scanning\n");
                scan_abort = true;
                err = -EINVAL;
@@ -3155,35 +4838,33 @@ brcmf_notify_scan_status(struct brcmf_cfg80211_priv *cfg_priv,
        scan_channel = le32_to_cpu(channel_inform_le.scan_channel);
        if (scan_channel)
                WL_CONN("channel_inform.scan_channel (%d)\n", scan_channel);
-       cfg_priv->bss_list = cfg_priv->scan_results;
-       bss_list_le = (struct brcmf_scan_results_le *) cfg_priv->bss_list;
+       cfg->bss_list = cfg->scan_results;
+       bss_list_le = (struct brcmf_scan_results_le *) cfg->bss_list;
 
-       memset(cfg_priv->scan_results, 0, len);
+       memset(cfg->scan_results, 0, len);
        bss_list_le->buflen = cpu_to_le32(len);
        err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN_RESULTS,
-                             cfg_priv->scan_results, len);
+                             cfg->scan_results, len);
        if (err) {
                WL_ERR("%s Scan_results error (%d)\n", ndev->name, err);
                err = -EINVAL;
                scan_abort = true;
                goto scan_done_out;
        }
-       cfg_priv->scan_results->buflen = le32_to_cpu(bss_list_le->buflen);
-       cfg_priv->scan_results->version = le32_to_cpu(bss_list_le->version);
-       cfg_priv->scan_results->count = le32_to_cpu(bss_list_le->count);
+       cfg->scan_results->buflen = le32_to_cpu(bss_list_le->buflen);
+       cfg->scan_results->version = le32_to_cpu(bss_list_le->version);
+       cfg->scan_results->count = le32_to_cpu(bss_list_le->count);
 
-       err = brcmf_inform_bss(cfg_priv);
-       if (err) {
+       err = brcmf_inform_bss(cfg);
+       if (err)
                scan_abort = true;
-               goto scan_done_out;
-       }
 
 scan_done_out:
-       if (cfg_priv->scan_request) {
+       if (cfg->scan_request) {
                WL_SCAN("calling cfg80211_scan_done\n");
-               cfg80211_scan_done(cfg_priv->scan_request, scan_abort);
+               cfg80211_scan_done(cfg->scan_request, scan_abort);
                brcmf_set_mpc(ndev, 1);
-               cfg_priv->scan_request = NULL;
+               cfg->scan_request = NULL;
        }
 
        WL_TRACE("Exit\n");
@@ -3206,68 +4887,85 @@ static void brcmf_init_eloop_handler(struct brcmf_cfg80211_event_loop *el)
        memset(el, 0, sizeof(*el));
        el->handler[BRCMF_E_SCAN_COMPLETE] = brcmf_notify_scan_status;
        el->handler[BRCMF_E_LINK] = brcmf_notify_connect_status;
+       el->handler[BRCMF_E_DEAUTH_IND] = brcmf_notify_connect_status;
+       el->handler[BRCMF_E_DEAUTH] = brcmf_notify_connect_status;
+       el->handler[BRCMF_E_DISASSOC_IND] = brcmf_notify_connect_status;
+       el->handler[BRCMF_E_ASSOC_IND] = brcmf_notify_connect_status;
+       el->handler[BRCMF_E_REASSOC_IND] = brcmf_notify_connect_status;
        el->handler[BRCMF_E_ROAM] = brcmf_notify_roaming_status;
        el->handler[BRCMF_E_MIC_ERROR] = brcmf_notify_mic_status;
        el->handler[BRCMF_E_SET_SSID] = brcmf_notify_connect_status;
+       el->handler[BRCMF_E_PFN_NET_FOUND] = brcmf_notify_sched_scan_results;
+}
+
+static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_info *cfg)
+{
+       kfree(cfg->scan_results);
+       cfg->scan_results = NULL;
+       kfree(cfg->bss_info);
+       cfg->bss_info = NULL;
+       kfree(cfg->conf);
+       cfg->conf = NULL;
+       kfree(cfg->profile);
+       cfg->profile = NULL;
+       kfree(cfg->scan_req_int);
+       cfg->scan_req_int = NULL;
+       kfree(cfg->escan_ioctl_buf);
+       cfg->escan_ioctl_buf = NULL;
+       kfree(cfg->dcmd_buf);
+       cfg->dcmd_buf = NULL;
+       kfree(cfg->extra_buf);
+       cfg->extra_buf = NULL;
+       kfree(cfg->iscan);
+       cfg->iscan = NULL;
+       kfree(cfg->pmk_list);
+       cfg->pmk_list = NULL;
+       if (cfg->ap_info) {
+               kfree(cfg->ap_info->wpa_ie);
+               kfree(cfg->ap_info->rsn_ie);
+               kfree(cfg->ap_info);
+               cfg->ap_info = NULL;
+       }
 }
 
-static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_priv *cfg_priv)
-{
-       kfree(cfg_priv->scan_results);
-       cfg_priv->scan_results = NULL;
-       kfree(cfg_priv->bss_info);
-       cfg_priv->bss_info = NULL;
-       kfree(cfg_priv->conf);
-       cfg_priv->conf = NULL;
-       kfree(cfg_priv->profile);
-       cfg_priv->profile = NULL;
-       kfree(cfg_priv->scan_req_int);
-       cfg_priv->scan_req_int = NULL;
-       kfree(cfg_priv->dcmd_buf);
-       cfg_priv->dcmd_buf = NULL;
-       kfree(cfg_priv->extra_buf);
-       cfg_priv->extra_buf = NULL;
-       kfree(cfg_priv->iscan);
-       cfg_priv->iscan = NULL;
-       kfree(cfg_priv->pmk_list);
-       cfg_priv->pmk_list = NULL;
-}
-
-static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_priv *cfg_priv)
-{
-       cfg_priv->scan_results = kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
-       if (!cfg_priv->scan_results)
+static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_info *cfg)
+{
+       cfg->scan_results = kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
+       if (!cfg->scan_results)
                goto init_priv_mem_out;
-       cfg_priv->conf = kzalloc(sizeof(*cfg_priv->conf), GFP_KERNEL);
-       if (!cfg_priv->conf)
+       cfg->conf = kzalloc(sizeof(*cfg->conf), GFP_KERNEL);
+       if (!cfg->conf)
                goto init_priv_mem_out;
-       cfg_priv->profile = kzalloc(sizeof(*cfg_priv->profile), GFP_KERNEL);
-       if (!cfg_priv->profile)
+       cfg->profile = kzalloc(sizeof(*cfg->profile), GFP_KERNEL);
+       if (!cfg->profile)
                goto init_priv_mem_out;
-       cfg_priv->bss_info = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
-       if (!cfg_priv->bss_info)
+       cfg->bss_info = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
+       if (!cfg->bss_info)
                goto init_priv_mem_out;
-       cfg_priv->scan_req_int = kzalloc(sizeof(*cfg_priv->scan_req_int),
+       cfg->scan_req_int = kzalloc(sizeof(*cfg->scan_req_int),
                                         GFP_KERNEL);
-       if (!cfg_priv->scan_req_int)
+       if (!cfg->scan_req_int)
+               goto init_priv_mem_out;
+       cfg->escan_ioctl_buf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL);
+       if (!cfg->escan_ioctl_buf)
                goto init_priv_mem_out;
-       cfg_priv->dcmd_buf = kzalloc(WL_DCMD_LEN_MAX, GFP_KERNEL);
-       if (!cfg_priv->dcmd_buf)
+       cfg->dcmd_buf = kzalloc(WL_DCMD_LEN_MAX, GFP_KERNEL);
+       if (!cfg->dcmd_buf)
                goto init_priv_mem_out;
-       cfg_priv->extra_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
-       if (!cfg_priv->extra_buf)
+       cfg->extra_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
+       if (!cfg->extra_buf)
                goto init_priv_mem_out;
-       cfg_priv->iscan = kzalloc(sizeof(*cfg_priv->iscan), GFP_KERNEL);
-       if (!cfg_priv->iscan)
+       cfg->iscan = kzalloc(sizeof(*cfg->iscan), GFP_KERNEL);
+       if (!cfg->iscan)
                goto init_priv_mem_out;
-       cfg_priv->pmk_list = kzalloc(sizeof(*cfg_priv->pmk_list), GFP_KERNEL);
-       if (!cfg_priv->pmk_list)
+       cfg->pmk_list = kzalloc(sizeof(*cfg->pmk_list), GFP_KERNEL);
+       if (!cfg->pmk_list)
                goto init_priv_mem_out;
 
        return 0;
 
 init_priv_mem_out:
-       brcmf_deinit_priv_mem(cfg_priv);
+       brcmf_deinit_priv_mem(cfg);
 
        return -ENOMEM;
 }
@@ -3277,17 +4975,17 @@ init_priv_mem_out:
 */
 
 static struct brcmf_cfg80211_event_q *brcmf_deq_event(
-       struct brcmf_cfg80211_priv *cfg_priv)
+       struct brcmf_cfg80211_info *cfg)
 {
        struct brcmf_cfg80211_event_q *e = NULL;
 
-       spin_lock_irq(&cfg_priv->evt_q_lock);
-       if (!list_empty(&cfg_priv->evt_q_list)) {
-               e = list_first_entry(&cfg_priv->evt_q_list,
+       spin_lock_irq(&cfg->evt_q_lock);
+       if (!list_empty(&cfg->evt_q_list)) {
+               e = list_first_entry(&cfg->evt_q_list,
                                     struct brcmf_cfg80211_event_q, evt_q_list);
                list_del(&e->evt_q_list);
        }
-       spin_unlock_irq(&cfg_priv->evt_q_lock);
+       spin_unlock_irq(&cfg->evt_q_lock);
 
        return e;
 }
@@ -3299,23 +4997,33 @@ static struct brcmf_cfg80211_event_q *brcmf_deq_event(
 */
 
 static s32
-brcmf_enq_event(struct brcmf_cfg80211_priv *cfg_priv, u32 event,
-               const struct brcmf_event_msg *msg)
+brcmf_enq_event(struct brcmf_cfg80211_info *cfg, u32 event,
+               const struct brcmf_event_msg *msg, void *data)
 {
        struct brcmf_cfg80211_event_q *e;
        s32 err = 0;
        ulong flags;
+       u32 data_len;
+       u32 total_len;
 
-       e = kzalloc(sizeof(struct brcmf_cfg80211_event_q), GFP_ATOMIC);
+       total_len = sizeof(struct brcmf_cfg80211_event_q);
+       if (data)
+               data_len = be32_to_cpu(msg->datalen);
+       else
+               data_len = 0;
+       total_len += data_len;
+       e = kzalloc(total_len, GFP_ATOMIC);
        if (!e)
                return -ENOMEM;
 
        e->etype = event;
        memcpy(&e->emsg, msg, sizeof(struct brcmf_event_msg));
+       if (data)
+               memcpy(&e->edata, data, data_len);
 
-       spin_lock_irqsave(&cfg_priv->evt_q_lock, flags);
-       list_add_tail(&e->evt_q_list, &cfg_priv->evt_q_list);
-       spin_unlock_irqrestore(&cfg_priv->evt_q_lock, flags);
+       spin_lock_irqsave(&cfg->evt_q_lock, flags);
+       list_add_tail(&e->evt_q_list, &cfg->evt_q_list);
+       spin_unlock_irqrestore(&cfg->evt_q_lock, flags);
 
        return err;
 }
@@ -3327,12 +5035,12 @@ static void brcmf_put_event(struct brcmf_cfg80211_event_q *e)
 
 static void brcmf_cfg80211_event_handler(struct work_struct *work)
 {
-       struct brcmf_cfg80211_priv *cfg_priv =
-                       container_of(work, struct brcmf_cfg80211_priv,
+       struct brcmf_cfg80211_info *cfg =
+                       container_of(work, struct brcmf_cfg80211_info,
                                     event_work);
        struct brcmf_cfg80211_event_q *e;
 
-       e = brcmf_deq_event(cfg_priv);
+       e = brcmf_deq_event(cfg);
        if (unlikely(!e)) {
                WL_ERR("event queue empty...\n");
                return;
@@ -3340,137 +5048,131 @@ static void brcmf_cfg80211_event_handler(struct work_struct *work)
 
        do {
                WL_INFO("event type (%d)\n", e->etype);
-               if (cfg_priv->el.handler[e->etype])
-                       cfg_priv->el.handler[e->etype](cfg_priv,
-                                                      cfg_to_ndev(cfg_priv),
+               if (cfg->el.handler[e->etype])
+                       cfg->el.handler[e->etype](cfg,
+                                                      cfg_to_ndev(cfg),
                                                       &e->emsg, e->edata);
                else
                        WL_INFO("Unknown Event (%d): ignoring\n", e->etype);
                brcmf_put_event(e);
-       } while ((e = brcmf_deq_event(cfg_priv)));
+       } while ((e = brcmf_deq_event(cfg)));
 
 }
 
-static void brcmf_init_eq(struct brcmf_cfg80211_priv *cfg_priv)
+static void brcmf_init_eq(struct brcmf_cfg80211_info *cfg)
 {
-       spin_lock_init(&cfg_priv->evt_q_lock);
-       INIT_LIST_HEAD(&cfg_priv->evt_q_list);
+       spin_lock_init(&cfg->evt_q_lock);
+       INIT_LIST_HEAD(&cfg->evt_q_list);
 }
 
-static void brcmf_flush_eq(struct brcmf_cfg80211_priv *cfg_priv)
+static void brcmf_flush_eq(struct brcmf_cfg80211_info *cfg)
 {
        struct brcmf_cfg80211_event_q *e;
 
-       spin_lock_irq(&cfg_priv->evt_q_lock);
-       while (!list_empty(&cfg_priv->evt_q_list)) {
-               e = list_first_entry(&cfg_priv->evt_q_list,
+       spin_lock_irq(&cfg->evt_q_lock);
+       while (!list_empty(&cfg->evt_q_list)) {
+               e = list_first_entry(&cfg->evt_q_list,
                                     struct brcmf_cfg80211_event_q, evt_q_list);
                list_del(&e->evt_q_list);
                kfree(e);
        }
-       spin_unlock_irq(&cfg_priv->evt_q_lock);
+       spin_unlock_irq(&cfg->evt_q_lock);
 }
 
-static s32 wl_init_priv(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 wl_init_priv(struct brcmf_cfg80211_info *cfg)
 {
        s32 err = 0;
 
-       cfg_priv->scan_request = NULL;
-       cfg_priv->pwr_save = true;
-       cfg_priv->iscan_on = true;      /* iscan on & off switch.
+       cfg->scan_request = NULL;
+       cfg->pwr_save = true;
+#ifdef CONFIG_BRCMISCAN
+       cfg->iscan_on = true;   /* iscan on & off switch.
                                 we enable iscan per default */
-       cfg_priv->roam_on = true;       /* roam on & off switch.
+       cfg->escan_on = false;  /* escan on & off switch.
+                                we disable escan per default */
+#else
+       cfg->iscan_on = false;  /* iscan on & off switch.
+                                we disable iscan per default */
+       cfg->escan_on = true;   /* escan on & off switch.
+                                we enable escan per default */
+#endif
+       cfg->roam_on = true;    /* roam on & off switch.
                                 we enable roam per default */
 
-       cfg_priv->iscan_kickstart = false;
-       cfg_priv->active_scan = true;   /* we do active scan for
+       cfg->iscan_kickstart = false;
+       cfg->active_scan = true;        /* we do active scan for
                                 specific scan per default */
-       cfg_priv->dongle_up = false;    /* dongle is not up yet */
-       brcmf_init_eq(cfg_priv);
-       err = brcmf_init_priv_mem(cfg_priv);
+       cfg->dongle_up = false; /* dongle is not up yet */
+       brcmf_init_eq(cfg);
+       err = brcmf_init_priv_mem(cfg);
        if (err)
                return err;
-       INIT_WORK(&cfg_priv->event_work, brcmf_cfg80211_event_handler);
-       brcmf_init_eloop_handler(&cfg_priv->el);
-       mutex_init(&cfg_priv->usr_sync);
-       err = brcmf_init_iscan(cfg_priv);
+       INIT_WORK(&cfg->event_work, brcmf_cfg80211_event_handler);
+       brcmf_init_eloop_handler(&cfg->el);
+       mutex_init(&cfg->usr_sync);
+       err = brcmf_init_iscan(cfg);
        if (err)
                return err;
-       brcmf_init_conf(cfg_priv->conf);
-       brcmf_init_prof(cfg_priv->profile);
-       brcmf_link_down(cfg_priv);
+       brcmf_init_escan(cfg);
+       brcmf_init_conf(cfg->conf);
+       brcmf_init_prof(cfg->profile);
+       brcmf_link_down(cfg);
 
        return err;
 }
 
-static void wl_deinit_priv(struct brcmf_cfg80211_priv *cfg_priv)
+static void wl_deinit_priv(struct brcmf_cfg80211_info *cfg)
 {
-       cancel_work_sync(&cfg_priv->event_work);
-       cfg_priv->dongle_up = false;    /* dongle down */
-       brcmf_flush_eq(cfg_priv);
-       brcmf_link_down(cfg_priv);
-       brcmf_term_iscan(cfg_priv);
-       brcmf_deinit_priv_mem(cfg_priv);
+       cancel_work_sync(&cfg->event_work);
+       cfg->dongle_up = false; /* dongle down */
+       brcmf_flush_eq(cfg);
+       brcmf_link_down(cfg);
+       brcmf_abort_scanning(cfg);
+       brcmf_deinit_priv_mem(cfg);
 }
 
-struct brcmf_cfg80211_dev *brcmf_cfg80211_attach(struct net_device *ndev,
-                                                struct device *busdev,
-                                                void *data)
+struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct net_device *ndev,
+                                                 struct device *busdev,
+                                                 struct brcmf_pub *drvr)
 {
        struct wireless_dev *wdev;
-       struct brcmf_cfg80211_priv *cfg_priv;
-       struct brcmf_cfg80211_iface *ci;
-       struct brcmf_cfg80211_dev *cfg_dev;
+       struct brcmf_cfg80211_info *cfg;
        s32 err = 0;
 
        if (!ndev) {
                WL_ERR("ndev is invalid\n");
                return NULL;
        }
-       cfg_dev = kzalloc(sizeof(struct brcmf_cfg80211_dev), GFP_KERNEL);
-       if (!cfg_dev)
-               return NULL;
 
-       wdev = brcmf_alloc_wdev(sizeof(struct brcmf_cfg80211_iface), busdev);
+       wdev = brcmf_alloc_wdev(busdev);
        if (IS_ERR(wdev)) {
-               kfree(cfg_dev);
                return NULL;
        }
 
        wdev->iftype = brcmf_mode_to_nl80211_iftype(WL_MODE_BSS);
-       cfg_priv = wdev_to_cfg(wdev);
-       cfg_priv->wdev = wdev;
-       cfg_priv->pub = data;
-       ci = (struct brcmf_cfg80211_iface *)&cfg_priv->ci;
-       ci->cfg_priv = cfg_priv;
+       cfg = wdev_to_cfg(wdev);
+       cfg->wdev = wdev;
+       cfg->pub = drvr;
        ndev->ieee80211_ptr = wdev;
        SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
        wdev->netdev = ndev;
-       err = wl_init_priv(cfg_priv);
+       err = wl_init_priv(cfg);
        if (err) {
                WL_ERR("Failed to init iwm_priv (%d)\n", err);
                goto cfg80211_attach_out;
        }
-       brcmf_set_drvdata(cfg_dev, ci);
 
-       return cfg_dev;
+       return cfg;
 
 cfg80211_attach_out:
-       brcmf_free_wdev(cfg_priv);
-       kfree(cfg_dev);
+       brcmf_free_wdev(cfg);
        return NULL;
 }
 
-void brcmf_cfg80211_detach(struct brcmf_cfg80211_dev *cfg_dev)
+void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
 {
-       struct brcmf_cfg80211_priv *cfg_priv;
-
-       cfg_priv = brcmf_priv_get(cfg_dev);
-
-       wl_deinit_priv(cfg_priv);
-       brcmf_free_wdev(cfg_priv);
-       brcmf_set_drvdata(cfg_dev, NULL);
-       kfree(cfg_dev);
+       wl_deinit_priv(cfg);
+       brcmf_free_wdev(cfg);
 }
 
 void
@@ -3478,10 +5180,10 @@ brcmf_cfg80211_event(struct net_device *ndev,
                  const struct brcmf_event_msg *e, void *data)
 {
        u32 event_type = be32_to_cpu(e->event_type);
-       struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+       struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
 
-       if (!brcmf_enq_event(cfg_priv, event_type, e))
-               schedule_work(&cfg_priv->event_work);
+       if (!brcmf_enq_event(cfg, event_type, e, data))
+               schedule_work(&cfg->event_work);
 }
 
 static s32 brcmf_dongle_mode(struct net_device *ndev, s32 iftype)
@@ -3502,6 +5204,9 @@ static s32 brcmf_dongle_mode(struct net_device *ndev, s32 iftype)
        case NL80211_IFTYPE_STATION:
                infra = 1;
                break;
+       case NL80211_IFTYPE_AP:
+               infra = 1;
+               break;
        default:
                err = -EINVAL;
                WL_ERR("invalid type (%d)\n", iftype);
@@ -3554,6 +5259,8 @@ static s32 brcmf_dongle_eventmsg(struct net_device *ndev)
        setbit(eventmask, BRCMF_E_TXFAIL);
        setbit(eventmask, BRCMF_E_JOIN_START);
        setbit(eventmask, BRCMF_E_SCAN_COMPLETE);
+       setbit(eventmask, BRCMF_E_ESCAN_RESULT);
+       setbit(eventmask, BRCMF_E_PFN_NET_FOUND);
 
        brcmf_c_mkiovar("event_msgs", eventmask, BRCMF_EVENTING_MASK_LEN,
                        iovbuf, sizeof(iovbuf));
@@ -3672,46 +5379,46 @@ dongle_scantime_out:
        return err;
 }
 
-static s32 wl_update_wiphybands(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 wl_update_wiphybands(struct brcmf_cfg80211_info *cfg)
 {
        struct wiphy *wiphy;
        s32 phy_list;
        s8 phy;
        s32 err = 0;
 
-       err = brcmf_exec_dcmd(cfg_to_ndev(cfg_priv), BRCM_GET_PHYLIST,
+       err = brcmf_exec_dcmd(cfg_to_ndev(cfg), BRCM_GET_PHYLIST,
                              &phy_list, sizeof(phy_list));
        if (err) {
                WL_ERR("error (%d)\n", err);
                return err;
        }
 
-       phy = ((char *)&phy_list)[1];
+       phy = ((char *)&phy_list)[0];
        WL_INFO("%c phy\n", phy);
        if (phy == 'n' || phy == 'a') {
-               wiphy = cfg_to_wiphy(cfg_priv);
+               wiphy = cfg_to_wiphy(cfg);
                wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_n;
        }
 
        return err;
 }
 
-static s32 brcmf_dongle_probecap(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_dongle_probecap(struct brcmf_cfg80211_info *cfg)
 {
-       return wl_update_wiphybands(cfg_priv);
+       return wl_update_wiphybands(cfg);
 }
 
-static s32 brcmf_config_dongle(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
 {
        struct net_device *ndev;
        struct wireless_dev *wdev;
        s32 power_mode;
        s32 err = 0;
 
-       if (cfg_priv->dongle_up)
+       if (cfg->dongle_up)
                return err;
 
-       ndev = cfg_to_ndev(cfg_priv);
+       ndev = cfg_to_ndev(cfg);
        wdev = ndev->ieee80211_ptr;
 
        brcmf_dongle_scantime(ndev, WL_SCAN_CHANNEL_TIME,
@@ -3721,21 +5428,21 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_priv *cfg_priv)
        if (err)
                goto default_conf_out;
 
-       power_mode = cfg_priv->pwr_save ? PM_FAST : PM_OFF;
+       power_mode = cfg->pwr_save ? PM_FAST : PM_OFF;
        err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_PM, &power_mode);
        if (err)
                goto default_conf_out;
        WL_INFO("power save set to %s\n",
                (power_mode ? "enabled" : "disabled"));
 
-       err = brcmf_dongle_roam(ndev, (cfg_priv->roam_on ? 0 : 1),
+       err = brcmf_dongle_roam(ndev, (cfg->roam_on ? 0 : 1),
                                WL_BEACON_TIMEOUT);
        if (err)
                goto default_conf_out;
        err = brcmf_dongle_mode(ndev, wdev->iftype);
        if (err && err != -EINPROGRESS)
                goto default_conf_out;
-       err = brcmf_dongle_probecap(cfg_priv);
+       err = brcmf_dongle_probecap(cfg);
        if (err)
                goto default_conf_out;
 
@@ -3743,31 +5450,31 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_priv *cfg_priv)
 
 default_conf_out:
 
-       cfg_priv->dongle_up = true;
+       cfg->dongle_up = true;
 
        return err;
 
 }
 
-static int brcmf_debugfs_add_netdev_params(struct brcmf_cfg80211_priv *cfg_priv)
+static int brcmf_debugfs_add_netdev_params(struct brcmf_cfg80211_info *cfg)
 {
        char buf[10+IFNAMSIZ];
        struct dentry *fd;
        s32 err = 0;
 
-       sprintf(buf, "netdev:%s", cfg_to_ndev(cfg_priv)->name);
-       cfg_priv->debugfsdir = debugfs_create_dir(buf,
-                                       cfg_to_wiphy(cfg_priv)->debugfsdir);
+       sprintf(buf, "netdev:%s", cfg_to_ndev(cfg)->name);
+       cfg->debugfsdir = debugfs_create_dir(buf,
+                                       cfg_to_wiphy(cfg)->debugfsdir);
 
-       fd = debugfs_create_u16("beacon_int", S_IRUGO, cfg_priv->debugfsdir,
-               (u16 *)&cfg_priv->profile->beacon_interval);
+       fd = debugfs_create_u16("beacon_int", S_IRUGO, cfg->debugfsdir,
+               (u16 *)&cfg->profile->beacon_interval);
        if (!fd) {
                err = -ENOMEM;
                goto err_out;
        }
 
-       fd = debugfs_create_u8("dtim_period", S_IRUGO, cfg_priv->debugfsdir,
-               (u8 *)&cfg_priv->profile->dtim_period);
+       fd = debugfs_create_u8("dtim_period", S_IRUGO, cfg->debugfsdir,
+               (u8 *)&cfg->profile->dtim_period);
        if (!fd) {
                err = -ENOMEM;
                goto err_out;
@@ -3777,40 +5484,40 @@ err_out:
        return err;
 }
 
-static void brcmf_debugfs_remove_netdev(struct brcmf_cfg80211_priv *cfg_priv)
+static void brcmf_debugfs_remove_netdev(struct brcmf_cfg80211_info *cfg)
 {
-       debugfs_remove_recursive(cfg_priv->debugfsdir);
-       cfg_priv->debugfsdir = NULL;
+       debugfs_remove_recursive(cfg->debugfsdir);
+       cfg->debugfsdir = NULL;
 }
 
-static s32 __brcmf_cfg80211_up(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 __brcmf_cfg80211_up(struct brcmf_cfg80211_info *cfg)
 {
        s32 err = 0;
 
-       set_bit(WL_STATUS_READY, &cfg_priv->status);
+       set_bit(WL_STATUS_READY, &cfg->status);
 
-       brcmf_debugfs_add_netdev_params(cfg_priv);
+       brcmf_debugfs_add_netdev_params(cfg);
 
-       err = brcmf_config_dongle(cfg_priv);
+       err = brcmf_config_dongle(cfg);
        if (err)
                return err;
 
-       brcmf_invoke_iscan(cfg_priv);
+       brcmf_invoke_iscan(cfg);
 
        return err;
 }
 
-static s32 __brcmf_cfg80211_down(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 __brcmf_cfg80211_down(struct brcmf_cfg80211_info *cfg)
 {
        /*
         * While going down, if associated with AP disassociate
         * from AP to save power
         */
-       if ((test_bit(WL_STATUS_CONNECTED, &cfg_priv->status) ||
-            test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) &&
-            test_bit(WL_STATUS_READY, &cfg_priv->status)) {
+       if ((test_bit(WL_STATUS_CONNECTED, &cfg->status) ||
+            test_bit(WL_STATUS_CONNECTING, &cfg->status)) &&
+            test_bit(WL_STATUS_READY, &cfg->status)) {
                WL_INFO("Disassociating from AP");
-               brcmf_link_down(cfg_priv);
+               brcmf_link_down(cfg);
 
                /* Make sure WPA_Supplicant receives all the event
                   generated due to DISASSOC call to the fw to keep
@@ -3819,63 +5526,33 @@ static s32 __brcmf_cfg80211_down(struct brcmf_cfg80211_priv *cfg_priv)
                brcmf_delay(500);
        }
 
-       set_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status);
-       brcmf_term_iscan(cfg_priv);
-       if (cfg_priv->scan_request) {
-               cfg80211_scan_done(cfg_priv->scan_request, true);
-               /* May need to perform this to cover rmmod */
-               /* wl_set_mpc(cfg_to_ndev(wl), 1); */
-               cfg_priv->scan_request = NULL;
-       }
-       clear_bit(WL_STATUS_READY, &cfg_priv->status);
-       clear_bit(WL_STATUS_SCANNING, &cfg_priv->status);
-       clear_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status);
+       brcmf_abort_scanning(cfg);
+       clear_bit(WL_STATUS_READY, &cfg->status);
 
-       brcmf_debugfs_remove_netdev(cfg_priv);
+       brcmf_debugfs_remove_netdev(cfg);
 
        return 0;
 }
 
-s32 brcmf_cfg80211_up(struct brcmf_cfg80211_dev *cfg_dev)
+s32 brcmf_cfg80211_up(struct brcmf_cfg80211_info *cfg)
 {
-       struct brcmf_cfg80211_priv *cfg_priv;
        s32 err = 0;
 
-       cfg_priv = brcmf_priv_get(cfg_dev);
-       mutex_lock(&cfg_priv->usr_sync);
-       err = __brcmf_cfg80211_up(cfg_priv);
-       mutex_unlock(&cfg_priv->usr_sync);
+       mutex_lock(&cfg->usr_sync);
+       err = __brcmf_cfg80211_up(cfg);
+       mutex_unlock(&cfg->usr_sync);
 
        return err;
 }
 
-s32 brcmf_cfg80211_down(struct brcmf_cfg80211_dev *cfg_dev)
+s32 brcmf_cfg80211_down(struct brcmf_cfg80211_info *cfg)
 {
-       struct brcmf_cfg80211_priv *cfg_priv;
        s32 err = 0;
 
-       cfg_priv = brcmf_priv_get(cfg_dev);
-       mutex_lock(&cfg_priv->usr_sync);
-       err = __brcmf_cfg80211_down(cfg_priv);
-       mutex_unlock(&cfg_priv->usr_sync);
+       mutex_lock(&cfg->usr_sync);
+       err = __brcmf_cfg80211_down(cfg);
+       mutex_unlock(&cfg->usr_sync);
 
        return err;
 }
 
-static __used s32 brcmf_add_ie(struct brcmf_cfg80211_priv *cfg_priv,
-                              u8 t, u8 l, u8 *v)
-{
-       struct brcmf_cfg80211_ie *ie = &cfg_priv->ie;
-       s32 err = 0;
-
-       if (ie->offset + l + 2 > WL_TLV_INFO_MAX) {
-               WL_ERR("ei crosses buffer boundary\n");
-               return -ENOSPC;
-       }
-       ie->buf[ie->offset] = t;
-       ie->buf[ie->offset + 1] = l;
-       memcpy(&ie->buf[ie->offset + 2], v, l);
-       ie->offset += l + 2;
-
-       return err;
-}
index b5d9b36df3d0556bb54a34efbeabc5512b10057b..71ced174748a335a72f2c6b68712d9d3e40731d5 100644 (file)
 #ifndef _wl_cfg80211_h_
 #define _wl_cfg80211_h_
 
-struct brcmf_cfg80211_conf;
-struct brcmf_cfg80211_iface;
-struct brcmf_cfg80211_priv;
-struct brcmf_cfg80211_security;
-struct brcmf_cfg80211_ibss;
-
 #define WL_DBG_NONE            0
 #define WL_DBG_CONN            (1 << 5)
 #define WL_DBG_SCAN            (1 << 4)
@@ -123,13 +117,25 @@ do {                                                              \
 #define WL_SCAN_UNASSOC_TIME           40
 #define WL_SCAN_PASSIVE_TIME           120
 
+#define WL_ESCAN_BUF_SIZE              (1024 * 64)
+#define WL_ESCAN_TIMER_INTERVAL_MS     8000 /* E-Scan timeout */
+
+#define WL_ESCAN_ACTION_START          1
+#define WL_ESCAN_ACTION_CONTINUE       2
+#define WL_ESCAN_ACTION_ABORT          3
+
+#define WL_AUTH_SHARED_KEY             1       /* d11 shared authentication */
+#define IE_MAX_LEN                     512
+
 /* dongle status */
 enum wl_status {
        WL_STATUS_READY,
        WL_STATUS_SCANNING,
        WL_STATUS_SCAN_ABORTING,
        WL_STATUS_CONNECTING,
-       WL_STATUS_CONNECTED
+       WL_STATUS_CONNECTED,
+       WL_STATUS_AP_CREATING,
+       WL_STATUS_AP_CREATED
 };
 
 /* wi-fi mode */
@@ -169,23 +175,17 @@ struct brcmf_cfg80211_conf {
        struct ieee80211_channel channel;
 };
 
+/* forward declaration */
+struct brcmf_cfg80211_info;
+
 /* cfg80211 main event loop */
 struct brcmf_cfg80211_event_loop {
-       s32(*handler[BRCMF_E_LAST]) (struct brcmf_cfg80211_priv *cfg_priv,
+       s32(*handler[BRCMF_E_LAST]) (struct brcmf_cfg80211_info *cfg,
                                     struct net_device *ndev,
                                     const struct brcmf_event_msg *e,
                                     void *data);
 };
 
-/* representing interface of cfg80211 plane */
-struct brcmf_cfg80211_iface {
-       struct brcmf_cfg80211_priv *cfg_priv;
-};
-
-struct brcmf_cfg80211_dev {
-       void *driver_data;      /* to store cfg80211 object information */
-};
-
 /* basic structure of scan request */
 struct brcmf_cfg80211_scan_req {
        struct brcmf_ssid_le ssid_le;
@@ -238,7 +238,7 @@ struct brcmf_cfg80211_profile {
 /* dongle iscan event loop */
 struct brcmf_cfg80211_iscan_eloop {
        s32 (*handler[WL_SCAN_ERSULTS_LAST])
-               (struct brcmf_cfg80211_priv *cfg_priv);
+               (struct brcmf_cfg80211_info *cfg);
 };
 
 /* dongle iscan controller */
@@ -275,92 +275,240 @@ struct brcmf_cfg80211_pmk_list {
        struct pmkid foo[MAXPMKID - 1];
 };
 
-/* dongle private data of cfg80211 interface */
-struct brcmf_cfg80211_priv {
-       struct wireless_dev *wdev;      /* representing wl cfg80211 device */
-       struct brcmf_cfg80211_conf *conf;       /* dongle configuration */
-       struct cfg80211_scan_request *scan_request;     /* scan request
-                                                        object */
-       struct brcmf_cfg80211_event_loop el;    /* main event loop */
-       struct list_head evt_q_list;    /* used for event queue */
-       spinlock_t       evt_q_lock;    /* for event queue synchronization */
-       struct mutex usr_sync;  /* maily for dongle up/down synchronization */
-       struct brcmf_scan_results *bss_list;    /* bss_list holding scanned
-                                                ap information */
+/* dongle escan state */
+enum wl_escan_state {
+       WL_ESCAN_STATE_IDLE,
+       WL_ESCAN_STATE_SCANNING
+};
+
+struct escan_info {
+       u32 escan_state;
+       u8 escan_buf[WL_ESCAN_BUF_SIZE];
+       struct wiphy *wiphy;
+       struct net_device *ndev;
+};
+
+/* Structure to hold WPS, WPA IEs for a AP */
+struct ap_info {
+       u8 probe_res_ie[IE_MAX_LEN];
+       u8 beacon_ie[IE_MAX_LEN];
+       u32 probe_res_ie_len;
+       u32 beacon_ie_len;
+       u8 *wpa_ie;
+       u8 *rsn_ie;
+       bool security_mode;
+};
+
+/**
+ * struct brcmf_pno_param_le - PNO scan configuration parameters
+ *
+ * @version: PNO parameters version.
+ * @scan_freq: scan frequency.
+ * @lost_network_timeout: #sec. to declare discovered network as lost.
+ * @flags: Bit field to control features of PFN such as sort criteria auto
+ *     enable switch and background scan.
+ * @rssi_margin: Margin to avoid jitter for choosing a PFN based on RSSI sort
+ *     criteria.
+ * @bestn: number of best networks in each scan.
+ * @mscan: number of scans recorded.
+ * @repeat: minimum number of scan intervals before scan frequency changes
+ *     in adaptive scan.
+ * @exp: exponent of 2 for maximum scan interval.
+ * @slow_freq: slow scan period.
+ */
+struct brcmf_pno_param_le {
+       __le32 version;
+       __le32 scan_freq;
+       __le32 lost_network_timeout;
+       __le16 flags;
+       __le16 rssi_margin;
+       u8 bestn;
+       u8 mscan;
+       u8 repeat;
+       u8 exp;
+       __le32 slow_freq;
+};
+
+/**
+ * struct brcmf_pno_net_param_le - scan parameters per preferred network.
+ *
+ * @ssid: ssid name and its length.
+ * @flags: bit2: hidden.
+ * @infra: BSS vs IBSS.
+ * @auth: Open vs Closed.
+ * @wpa_auth: WPA type.
+ * @wsec: wsec value.
+ */
+struct brcmf_pno_net_param_le {
+       struct brcmf_ssid_le ssid;
+       __le32 flags;
+       __le32 infra;
+       __le32 auth;
+       __le32 wpa_auth;
+       __le32 wsec;
+};
+
+/**
+ * struct brcmf_pno_net_info_le - information per found network.
+ *
+ * @bssid: BSS network identifier.
+ * @channel: channel number only.
+ * @SSID_len: length of ssid.
+ * @SSID: ssid characters.
+ * @RSSI: receive signal strength (in dBm).
+ * @timestamp: age in seconds.
+ */
+struct brcmf_pno_net_info_le {
+       u8 bssid[ETH_ALEN];
+       u8 channel;
+       u8 SSID_len;
+       u8 SSID[32];
+       __le16  RSSI;
+       __le16  timestamp;
+};
+
+/**
+ * struct brcmf_pno_scanresults_le - result returned in PNO NET FOUND event.
+ *
+ * @version: PNO version identifier.
+ * @status: indicates completion status of PNO scan.
+ * @count: amount of brcmf_pno_net_info_le entries appended.
+ */
+struct brcmf_pno_scanresults_le {
+       __le32 version;
+       __le32 status;
+       __le32 count;
+};
+
+/**
+ * struct brcmf_cfg80211_info - dongle private data of cfg80211 interface
+ *
+ * @wdev: representing wl cfg80211 device.
+ * @conf: dongle configuration.
+ * @scan_request: cfg80211 scan request object.
+ * @el: main event loop.
+ * @evt_q_list: used for event queue.
+ * @evt_q_lock: for event queue synchronization.
+ * @usr_sync: mainly for dongle up/down synchronization.
+ * @bss_list: bss_list holding scanned ap information.
+ * @scan_results: results of the last scan.
+ * @scan_req_int: internal scan request object.
+ * @bss_info: bss information for cfg80211 layer.
+ * @ie: information element object for internal purpose.
+ * @profile: holding dongle profile.
+ * @iscan: iscan controller information.
+ * @conn_info: association info.
+ * @pmk_list: wpa2 pmk list.
+ * @event_work: event handler work struct.
+ * @status: current dongle status.
+ * @pub: common driver information.
+ * @channel: current channel.
+ * @iscan_on: iscan on/off switch.
+ * @iscan_kickstart: indicate iscan already started.
+ * @active_scan: current scan mode.
+ * @sched_escan: e-scan for scheduled scan support running.
+ * @ibss_starter: indicates this sta is ibss starter.
+ * @link_up: link/connection up flag.
+ * @pwr_save: indicate whether dongle to support power save mode.
+ * @dongle_up: indicate whether dongle up or not.
+ * @roam_on: on/off switch for dongle self-roaming.
+ * @scan_tried: indicates if first scan attempted.
+ * @dcmd_buf: dcmd buffer.
+ * @extra_buf: mainly to grab assoc information.
+ * @debugfsdir: debugfs folder for this device.
+ * @escan_on: escan on/off switch.
+ * @escan_info: escan information.
+ * @escan_timeout: Timer for catch scan timeout.
+ * @escan_timeout_work: scan timeout worker.
+ * @escan_ioctl_buf: dongle command buffer for escan commands.
+ * @ap_info: host ap information.
+ * @ci: used to link this structure to netdev private data.
+ */
+struct brcmf_cfg80211_info {
+       struct wireless_dev *wdev;
+       struct brcmf_cfg80211_conf *conf;
+       struct cfg80211_scan_request *scan_request;
+       struct brcmf_cfg80211_event_loop el;
+       struct list_head evt_q_list;
+       spinlock_t       evt_q_lock;
+       struct mutex usr_sync;
+       struct brcmf_scan_results *bss_list;
        struct brcmf_scan_results *scan_results;
-       struct brcmf_cfg80211_scan_req *scan_req_int;   /* scan request object
-                                                for internal purpose */
-       struct wl_cfg80211_bss_info *bss_info;  /* bss information for
-                                                cfg80211 layer */
-       struct brcmf_cfg80211_ie ie;    /* information element object for
-                                        internal purpose */
-       struct brcmf_cfg80211_profile *profile; /* holding dongle profile */
-       struct brcmf_cfg80211_iscan_ctrl *iscan;        /* iscan controller */
-       struct brcmf_cfg80211_connect_info conn_info; /* association info */
-       struct brcmf_cfg80211_pmk_list *pmk_list;       /* wpa2 pmk list */
-       struct work_struct event_work;  /* event handler work struct */
-       unsigned long status;           /* current dongle status */
-       void *pub;
-       u32 channel;            /* current channel */
-       bool iscan_on;          /* iscan on/off switch */
-       bool iscan_kickstart;   /* indicate iscan already started */
-       bool active_scan;       /* current scan mode */
-       bool ibss_starter;      /* indicates this sta is ibss starter */
-       bool link_up;           /* link/connection up flag */
-       bool pwr_save;          /* indicate whether dongle to support
-                                        power save mode */
-       bool dongle_up;         /* indicate whether dongle up or not */
-       bool roam_on;           /* on/off switch for dongle self-roaming */
-       bool scan_tried;        /* indicates if first scan attempted */
-       u8 *dcmd_buf;           /* dcmd buffer */
-       u8 *extra_buf;          /* maily to grab assoc information */
+       struct brcmf_cfg80211_scan_req *scan_req_int;
+       struct wl_cfg80211_bss_info *bss_info;
+       struct brcmf_cfg80211_ie ie;
+       struct brcmf_cfg80211_profile *profile;
+       struct brcmf_cfg80211_iscan_ctrl *iscan;
+       struct brcmf_cfg80211_connect_info conn_info;
+       struct brcmf_cfg80211_pmk_list *pmk_list;
+       struct work_struct event_work;
+       unsigned long status;
+       struct brcmf_pub *pub;
+       u32 channel;
+       bool iscan_on;
+       bool iscan_kickstart;
+       bool active_scan;
+       bool sched_escan;
+       bool ibss_starter;
+       bool link_up;
+       bool pwr_save;
+       bool dongle_up;
+       bool roam_on;
+       bool scan_tried;
+       u8 *dcmd_buf;
+       u8 *extra_buf;
        struct dentry *debugfsdir;
-       u8 ci[0] __aligned(NETDEV_ALIGN);
+       bool escan_on;
+       struct escan_info escan_info;
+       struct timer_list escan_timeout;
+       struct work_struct escan_timeout_work;
+       u8 *escan_ioctl_buf;
+       struct ap_info *ap_info;
 };
 
-static inline struct wiphy *cfg_to_wiphy(struct brcmf_cfg80211_priv *w)
+static inline struct wiphy *cfg_to_wiphy(struct brcmf_cfg80211_info *w)
 {
        return w->wdev->wiphy;
 }
 
-static inline struct brcmf_cfg80211_priv *wiphy_to_cfg(struct wiphy *w)
+static inline struct brcmf_cfg80211_info *wiphy_to_cfg(struct wiphy *w)
 {
-       return (struct brcmf_cfg80211_priv *)(wiphy_priv(w));
+       return (struct brcmf_cfg80211_info *)(wiphy_priv(w));
 }
 
-static inline struct brcmf_cfg80211_priv *wdev_to_cfg(struct wireless_dev *wd)
+static inline struct brcmf_cfg80211_info *wdev_to_cfg(struct wireless_dev *wd)
 {
-       return (struct brcmf_cfg80211_priv *)(wdev_priv(wd));
+       return (struct brcmf_cfg80211_info *)(wdev_priv(wd));
 }
 
-static inline struct net_device *cfg_to_ndev(struct brcmf_cfg80211_priv *cfg)
+static inline struct net_device *cfg_to_ndev(struct brcmf_cfg80211_info *cfg)
 {
        return cfg->wdev->netdev;
 }
 
-static inline struct brcmf_cfg80211_priv *ndev_to_cfg(struct net_device *ndev)
+static inline struct brcmf_cfg80211_info *ndev_to_cfg(struct net_device *ndev)
 {
        return wdev_to_cfg(ndev->ieee80211_ptr);
 }
 
-#define iscan_to_cfg(i) ((struct brcmf_cfg80211_priv *)(i->data))
+#define iscan_to_cfg(i) ((struct brcmf_cfg80211_info *)(i->data))
 #define cfg_to_iscan(w) (w->iscan)
 
 static inline struct
-brcmf_cfg80211_connect_info *cfg_to_conn(struct brcmf_cfg80211_priv *cfg)
+brcmf_cfg80211_connect_info *cfg_to_conn(struct brcmf_cfg80211_info *cfg)
 {
        return &cfg->conn_info;
 }
 
-extern struct brcmf_cfg80211_dev *brcmf_cfg80211_attach(struct net_device *ndev,
-                                                       struct device *busdev,
-                                                       void *data);
-extern void brcmf_cfg80211_detach(struct brcmf_cfg80211_dev *cfg);
+struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct net_device *ndev,
+                                                 struct device *busdev,
+                                                 struct brcmf_pub *drvr);
+void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg);
 
 /* event handler from dongle */
-extern void brcmf_cfg80211_event(struct net_device *ndev,
-                                const struct brcmf_event_msg *e, void *data);
-extern s32 brcmf_cfg80211_up(struct brcmf_cfg80211_dev *cfg_dev);
-extern s32 brcmf_cfg80211_down(struct brcmf_cfg80211_dev *cfg_dev);
+void brcmf_cfg80211_event(struct net_device *ndev,
+                         const struct brcmf_event_msg *e, void *data);
+s32 brcmf_cfg80211_up(struct brcmf_cfg80211_info *cfg);
+s32 brcmf_cfg80211_down(struct brcmf_cfg80211_info *cfg);
 
 #endif                         /* _wl_cfg80211_h_ */
index 8c9345dd37d270fcf8abf03d08714ff176df5529..b89f1272b93f506f24f8cf9e262f9b7c489c6180 100644 (file)
@@ -535,9 +535,6 @@ void ai_detach(struct si_pub *sih)
 {
        struct si_info *sii;
 
-       struct si_pub *si_local = NULL;
-       memcpy(&si_local, &sih, sizeof(struct si_pub **));
-
        sii = container_of(sih, struct si_info, pub);
 
        if (sii == NULL)
index a5edebeb0b4f7f748155551df76396602885078e..a744ea5a95599797023a0040401ab1816855f77f 100644 (file)
@@ -86,7 +86,9 @@ MODULE_AUTHOR("Broadcom Corporation");
 MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver.");
 MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards");
 MODULE_LICENSE("Dual BSD/GPL");
-
+/* This needs to be adjusted when brcms_firmwares changes */
+MODULE_FIRMWARE("brcm/bcm43xx-0.fw");
+MODULE_FIRMWARE("brcm/bcm43xx_hdr-0.fw");
 
 /* recognized BCMA Core IDs */
 static struct bcma_device_id brcms_coreid_table[] = {
@@ -265,7 +267,9 @@ static void brcms_set_basic_rate(struct brcm_rateset *rs, u16 rate, bool is_br)
        }
 }
 
-static void brcms_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void brcms_ops_tx(struct ieee80211_hw *hw,
+                        struct ieee80211_tx_control *control,
+                        struct sk_buff *skb)
 {
        struct brcms_info *wl = hw->priv;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -277,7 +281,7 @@ static void brcms_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
                goto done;
        }
        brcms_c_sendpkt_mac80211(wl->wlc, skb, hw);
-       tx_info->rate_driver_data[0] = tx_info->control.sta;
+       tx_info->rate_driver_data[0] = control->sta;
  done:
        spin_unlock_bh(&wl->lock);
 }
@@ -300,7 +304,10 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
        wl->mute_tx = true;
 
        if (!wl->pub->up)
-               err = brcms_up(wl);
+               if (!blocked)
+                       err = brcms_up(wl);
+               else
+                       err = -ERFKILL;
        else
                err = -ENODEV;
        spin_unlock_bh(&wl->lock);
index 03ca65324845f39f1e8e48b2669a243bddf7ce5c..75086b37c817b747fe5482bb097a66a859c14214 100644 (file)
@@ -7512,15 +7512,10 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
 
        channel = BRCMS_CHAN_CHANNEL(rxh->RxChan);
 
-       if (channel > 14) {
-               rx_status->band = IEEE80211_BAND_5GHZ;
-               rx_status->freq = ieee80211_ofdm_chan_to_freq(
-                                       WF_CHAN_FACTOR_5_G/2, channel);
-
-       } else {
-               rx_status->band = IEEE80211_BAND_2GHZ;
-               rx_status->freq = ieee80211_dsss_chan_to_freq(channel);
-       }
+       rx_status->band =
+               channel > 14 ? IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
+       rx_status->freq =
+               ieee80211_channel_to_frequency(channel, rx_status->band);
 
        rx_status->signal = wlc_phy_rssi_compute(wlc->hw->band->pi, rxh);
 
index bcc79b4e3267ba1434c3c0bfdec1b2de6f50d18b..e8682855b73a5189936576bc69367fb765d1bf7f 100644 (file)
@@ -34,6 +34,7 @@
 #define BCM43235_CHIP_ID       43235
 #define BCM43236_CHIP_ID       43236
 #define BCM43238_CHIP_ID       43238
+#define BCM43241_CHIP_ID       0x4324
 #define BCM4329_CHIP_ID                0x4329
 #define BCM4330_CHIP_ID                0x4330
 #define BCM4331_CHIP_ID                0x4331
index f10d30274c23ade434cc8149ebcaebaadedf8584..c11a290a1edf6c07e38cfd1035f783ddb9918ac2 100644 (file)
 #define WL_CHANSPEC_BAND_2G            0x2000
 #define INVCHANSPEC                    255
 
-/* used to calculate the chan_freq = chan_factor * 500Mhz + 5 * chan_number */
-#define WF_CHAN_FACTOR_2_4_G           4814    /* 2.4 GHz band, 2407 MHz */
-#define WF_CHAN_FACTOR_5_G             10000   /* 5   GHz band, 5000 MHz */
-#define WF_CHAN_FACTOR_4_G             8000    /* 4.9 GHz band for Japan */
-
 #define CHSPEC_CHANNEL(chspec) ((u8)((chspec) & WL_CHANSPEC_CHAN_MASK))
 #define CHSPEC_BAND(chspec)    ((chspec) & WL_CHANSPEC_BAND_MASK)
 
index 47932b28aac101217ee56d1db2dc736e7d8d1d5b..970a48baaf804a38ff1883702bbd9460d7d5b8c8 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/export.h>
+#include <linux/etherdevice.h>
 #include "hostap_wlan.h"
 #include "hostap.h"
 #include "hostap_ap.h"
@@ -463,8 +464,7 @@ static void handle_info_queue_scanresults(local_info_t *local)
                prism2_host_roaming(local);
 
        if (local->host_roaming == 2 && local->iw_mode == IW_MODE_INFRA &&
-           memcmp(local->preferred_ap, "\x00\x00\x00\x00\x00\x00",
-                  ETH_ALEN) != 0) {
+           !is_zero_ether_addr(local->preferred_ap)) {
                /*
                 * Firmware seems to be getting into odd state in host_roaming
                 * mode 2 when hostscan is used without join command, so try
index 18054d9c66887363cc7d028279c04e89732481ba..ac074731335a5ed1b7ef393dfd15ae6c87299d03 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/ethtool.h>
 #include <linux/if_arp.h>
 #include <linux/module.h>
+#include <linux/etherdevice.h>
 #include <net/lib80211.h>
 
 #include "hostap_wlan.h"
@@ -3221,8 +3222,7 @@ static int prism2_ioctl_siwencodeext(struct net_device *dev,
                return -EINVAL;
 
        addr = ext->addr.sa_data;
-       if (addr[0] == 0xff && addr[1] == 0xff && addr[2] == 0xff &&
-           addr[3] == 0xff && addr[4] == 0xff && addr[5] == 0xff) {
+       if (is_broadcast_ether_addr(addr)) {
                sta_ptr = NULL;
                crypt = &local->crypt_info.crypt[i];
        } else {
@@ -3394,8 +3394,7 @@ static int prism2_ioctl_giwencodeext(struct net_device *dev,
                i--;
 
        addr = ext->addr.sa_data;
-       if (addr[0] == 0xff && addr[1] == 0xff && addr[2] == 0xff &&
-           addr[3] == 0xff && addr[4] == 0xff && addr[5] == 0xff) {
+       if (is_broadcast_ether_addr(addr)) {
                sta_ptr = NULL;
                crypt = &local->crypt_info.crypt[i];
        } else {
@@ -3458,9 +3457,7 @@ static int prism2_ioctl_set_encryption(local_info_t *local,
            param->u.crypt.key_len)
                return -EINVAL;
 
-       if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
-           param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
-           param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
+       if (is_broadcast_ether_addr(param->sta_addr)) {
                if (param->u.crypt.idx >= WEP_KEYS)
                        return -EINVAL;
                sta_ptr = NULL;
@@ -3593,9 +3590,7 @@ static int prism2_ioctl_get_encryption(local_info_t *local,
        if (max_key_len < 0)
                return -EINVAL;
 
-       if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
-           param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
-           param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
+       if (is_broadcast_ether_addr(param->sta_addr)) {
                sta_ptr = NULL;
                if (param->u.crypt.idx >= WEP_KEYS)
                        param->u.crypt.idx = local->crypt_info.tx_keyidx;
index 627bc12074c729a37426199e52cb119dce01fdda..15f0fad39add227550e3d0a49fedd24737da6aba 100644 (file)
@@ -1084,7 +1084,7 @@ int prism2_sta_deauth(local_info_t *local, u16 reason)
        __le16 val = cpu_to_le16(reason);
 
        if (local->iw_mode != IW_MODE_INFRA ||
-           memcmp(local->bssid, "\x00\x00\x00\x00\x00\x00", ETH_ALEN) == 0 ||
+           is_zero_ether_addr(local->bssid) ||
            memcmp(local->bssid, "\x44\x44\x44\x44\x44\x44", ETH_ALEN) == 0)
                return 0;
 
index 534e6557e7e6c1f1eb8f2ca1732c0ad9080bd084..29b8fa1adefde125d62608f3b9513e53afba247a 100644 (file)
@@ -6962,13 +6962,6 @@ static int ipw2100_wx_set_wap(struct net_device *dev,
        struct ipw2100_priv *priv = libipw_priv(dev);
        int err = 0;
 
-       static const unsigned char any[] = {
-               0xff, 0xff, 0xff, 0xff, 0xff, 0xff
-       };
-       static const unsigned char off[] = {
-               0x00, 0x00, 0x00, 0x00, 0x00, 0x00
-       };
-
        // sanity checks
        if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
                return -EINVAL;
@@ -6979,8 +6972,8 @@ static int ipw2100_wx_set_wap(struct net_device *dev,
                goto done;
        }
 
-       if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
-           !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
+       if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data) ||
+           is_zero_ether_addr(wrqu->ap_addr.sa_data)) {
                /* we disable mandatory BSSID association */
                IPW_DEBUG_WX("exit - disable mandatory BSSID\n");
                priv->config &= ~CFG_STATIC_BSSID;
index 0df45914739489f5f7555423f9f647d8fdb7dff5..935120fc8c9397daef4cb822b3d00176e0ca0cb8 100644 (file)
@@ -9037,18 +9037,11 @@ static int ipw_wx_set_wap(struct net_device *dev,
 {
        struct ipw_priv *priv = libipw_priv(dev);
 
-       static const unsigned char any[] = {
-               0xff, 0xff, 0xff, 0xff, 0xff, 0xff
-       };
-       static const unsigned char off[] = {
-               0x00, 0x00, 0x00, 0x00, 0x00, 0x00
-       };
-
        if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
                return -EINVAL;
        mutex_lock(&priv->mutex);
-       if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
-           !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
+       if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data) ||
+           is_zero_ether_addr(wrqu->ap_addr.sa_data)) {
                /* we disable mandatory BSSID association */
                IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
                priv->config &= ~CFG_STATIC_BSSID;
index 1571505b1a38ba4903f6664cb0a25ada4e9a6e04..54aba474443867f463714e117f14a605ba251803 100644 (file)
@@ -675,7 +675,7 @@ int libipw_wx_set_encodeext(struct libipw_device *ieee,
        }
       done:
        if (ieee->set_security)
-               ieee->set_security(ieee->dev, &sec);
+               ieee->set_security(dev, &sec);
 
        return ret;
 }
index faec404672081d5f72d03da9f44abac0de3c27d1..e252acb9c86239aa0b031fad77d465beb4efb86d 100644 (file)
@@ -460,7 +460,9 @@ il3945_build_tx_cmd_basic(struct il_priv *il, struct il_device_cmd *cmd,
  * start C_TX command process
  */
 static int
-il3945_tx_skb(struct il_priv *il, struct sk_buff *skb)
+il3945_tx_skb(struct il_priv *il,
+             struct ieee80211_sta *sta,
+             struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -512,7 +514,7 @@ il3945_tx_skb(struct il_priv *il, struct sk_buff *skb)
        hdr_len = ieee80211_hdrlen(fc);
 
        /* Find idx into station table for destination station */
-       sta_id = il_sta_id_or_broadcast(il, info->control.sta);
+       sta_id = il_sta_id_or_broadcast(il, sta);
        if (sta_id == IL_INVALID_STATION) {
                D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
                goto drop;
@@ -2859,7 +2861,9 @@ il3945_mac_stop(struct ieee80211_hw *hw)
 }
 
 static void
-il3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+il3945_mac_tx(struct ieee80211_hw *hw,
+              struct ieee80211_tx_control *control,
+              struct sk_buff *skb)
 {
        struct il_priv *il = hw->priv;
 
@@ -2868,7 +2872,7 @@ il3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
             ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
 
-       if (il3945_tx_skb(il, skb))
+       if (il3945_tx_skb(il, control->sta, skb))
                dev_kfree_skb_any(skb);
 
        D_MAC80211("leave\n");
index 34f61a0581a22cf78054063ced014aa05cd72f38..eac4dc8bc879ffabeacf558b576f6a6fe8b5d9ed 100644 (file)
@@ -1526,8 +1526,11 @@ il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb,
 }
 
 static void
-il4965_tx_cmd_build_rate(struct il_priv *il, struct il_tx_cmd *tx_cmd,
-                        struct ieee80211_tx_info *info, __le16 fc)
+il4965_tx_cmd_build_rate(struct il_priv *il,
+                        struct il_tx_cmd *tx_cmd,
+                        struct ieee80211_tx_info *info,
+                        struct ieee80211_sta *sta,
+                        __le16 fc)
 {
        const u8 rts_retry_limit = 60;
        u32 rate_flags;
@@ -1561,9 +1564,7 @@ il4965_tx_cmd_build_rate(struct il_priv *il, struct il_tx_cmd *tx_cmd,
        rate_idx = info->control.rates[0].idx;
        if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0
            || rate_idx > RATE_COUNT_LEGACY)
-               rate_idx =
-                   rate_lowest_index(&il->bands[info->band],
-                                     info->control.sta);
+               rate_idx = rate_lowest_index(&il->bands[info->band], sta);
        /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
        if (info->band == IEEE80211_BAND_5GHZ)
                rate_idx += IL_FIRST_OFDM_RATE;
@@ -1630,11 +1631,12 @@ il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
  * start C_TX command process
  */
 int
-il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
+il4965_tx_skb(struct il_priv *il,
+             struct ieee80211_sta *sta,
+             struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_sta *sta = info->control.sta;
        struct il_station_priv *sta_priv = NULL;
        struct il_tx_queue *txq;
        struct il_queue *q;
@@ -1680,7 +1682,7 @@ il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
                sta_id = il->hw_params.bcast_id;
        else {
                /* Find idx into station table for destination station */
-               sta_id = il_sta_id_or_broadcast(il, info->control.sta);
+               sta_id = il_sta_id_or_broadcast(il, sta);
 
                if (sta_id == IL_INVALID_STATION) {
                        D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
@@ -1786,7 +1788,7 @@ il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
        /* TODO need this for burst mode later on */
        il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
 
-       il4965_tx_cmd_build_rate(il, tx_cmd, info, fc);
+       il4965_tx_cmd_build_rate(il, tx_cmd, info, sta, fc);
 
        il_update_stats(il, true, fc, len);
        /*
@@ -5828,7 +5830,9 @@ il4965_mac_stop(struct ieee80211_hw *hw)
 }
 
 void
-il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+il4965_mac_tx(struct ieee80211_hw *hw,
+             struct ieee80211_tx_control *control,
+             struct sk_buff *skb)
 {
        struct il_priv *il = hw->priv;
 
@@ -5837,7 +5841,7 @@ il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
             ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
 
-       if (il4965_tx_skb(il, skb))
+       if (il4965_tx_skb(il, control->sta, skb))
                dev_kfree_skb_any(skb);
 
        D_MACDUMP("leave\n");
index 1db677689cfe36b18940c406e302519c51591bef..2d092f328547d8ca37dfef233a4619c23f7b5aa4 100644 (file)
@@ -78,7 +78,9 @@ int il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
 int il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
 void il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
                                 struct ieee80211_tx_info *info);
-int il4965_tx_skb(struct il_priv *il, struct sk_buff *skb);
+int il4965_tx_skb(struct il_priv *il,
+                 struct ieee80211_sta *sta,
+                 struct sk_buff *skb);
 int il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
                        struct ieee80211_sta *sta, u16 tid, u16 * ssn);
 int il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
@@ -163,7 +165,9 @@ void il4965_eeprom_release_semaphore(struct il_priv *il);
 int il4965_eeprom_check_version(struct il_priv *il);
 
 /* mac80211 handlers (for 4965) */
-void il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+void il4965_mac_tx(struct ieee80211_hw *hw,
+                  struct ieee80211_tx_control *control,
+                  struct sk_buff *skb);
 int il4965_mac_start(struct ieee80211_hw *hw);
 void il4965_mac_stop(struct ieee80211_hw *hw);
 void il4965_configure_filter(struct ieee80211_hw *hw,
index 0370403fd0bd5d2345704f8fc766c5ad2260a7dc..318ed3c9fe7499899d08fc3de1de917438690109 100644 (file)
@@ -1586,9 +1586,9 @@ il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
                return 0;
 
        frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
-       memcpy(frame->da, il_bcast_addr, ETH_ALEN);
+       eth_broadcast_addr(frame->da);
        memcpy(frame->sa, ta, ETH_ALEN);
-       memcpy(frame->bssid, il_bcast_addr, ETH_ALEN);
+       eth_broadcast_addr(frame->bssid);
        frame->seq_ctrl = 0;
 
        len += 24;
@@ -4860,7 +4860,7 @@ EXPORT_SYMBOL(il_add_beacon_time);
 
 #ifdef CONFIG_PM
 
-int
+static int
 il_pci_suspend(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
@@ -4877,9 +4877,8 @@ il_pci_suspend(struct device *device)
 
        return 0;
 }
-EXPORT_SYMBOL(il_pci_suspend);
 
-int
+static int
 il_pci_resume(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
@@ -4906,16 +4905,8 @@ il_pci_resume(struct device *device)
 
        return 0;
 }
-EXPORT_SYMBOL(il_pci_resume);
 
-const struct dev_pm_ops il_pm_ops = {
-       .suspend = il_pci_suspend,
-       .resume = il_pci_resume,
-       .freeze = il_pci_suspend,
-       .thaw = il_pci_resume,
-       .poweroff = il_pci_suspend,
-       .restore = il_pci_resume,
-};
+SIMPLE_DEV_PM_OPS(il_pm_ops, il_pci_suspend, il_pci_resume);
 EXPORT_SYMBOL(il_pm_ops);
 
 #endif /* CONFIG_PM */
index 72468266906019972fbca813ff5b0e019fc47e29..b4bb813362bdbeb44afab3dd7b73d74dd556feb3 100644 (file)
@@ -1843,8 +1843,6 @@ __le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
                          u32 beacon_interval);
 
 #ifdef CONFIG_PM
-int il_pci_suspend(struct device *device);
-int il_pci_resume(struct device *device);
 extern const struct dev_pm_ops il_pm_ops;
 
 #define IL_LEGACY_PM_OPS       (&il_pm_ops)
index 9bb16bdf6d26118ccc4bb6708934400c59b21c44..75e12f29d9eb7dc0e4a16bb92aa5cd6aa587b062 100644 (file)
@@ -201,7 +201,9 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
 
 
 /* tx */
-int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
+int iwlagn_tx_skb(struct iwl_priv *priv,
+                 struct ieee80211_sta *sta,
+                 struct sk_buff *skb);
 int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
                        struct ieee80211_sta *sta, u16 tid, u16 *ssn);
 int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
@@ -485,16 +487,13 @@ static inline void iwl_dvm_set_pmi(struct iwl_priv *priv, bool state)
 }
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
-int iwl_dbgfs_register(struct iwl_priv *priv, const char *name);
-void iwl_dbgfs_unregister(struct iwl_priv *priv);
+int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir);
 #else
-static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
+static inline int iwl_dbgfs_register(struct iwl_priv *priv,
+                                    struct dentry *dbgfs_dir)
 {
        return 0;
 }
-static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
-{
-}
 #endif /* CONFIG_IWLWIFI_DEBUGFS */
 
 #ifdef CONFIG_IWLWIFI_DEBUG
index 4a361c55c543f5fdf491c265509f89f50903d815..01128c96b5d8c309e800bd08b01f378ceb382613 100644 (file)
@@ -1055,8 +1055,9 @@ struct iwl_wep_cmd {
 #define RX_RES_PHY_FLAGS_MOD_CCK_MSK           cpu_to_le16(1 << 1)
 #define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK    cpu_to_le16(1 << 2)
 #define RX_RES_PHY_FLAGS_NARROW_BAND_MSK       cpu_to_le16(1 << 3)
-#define RX_RES_PHY_FLAGS_ANTENNA_MSK           0xf0
+#define RX_RES_PHY_FLAGS_ANTENNA_MSK           0x70
 #define RX_RES_PHY_FLAGS_ANTENNA_POS           4
+#define RX_RES_PHY_FLAGS_AGG_MSK               cpu_to_le16(1 << 7)
 
 #define RX_RES_STATUS_SEC_TYPE_MSK     (0x7 << 8)
 #define RX_RES_STATUS_SEC_TYPE_NONE    (0x0 << 8)
index a47b306b522cd3a49fcbb1622083baacb245d621..1a98fa3ab06df6fbf9ae8ec3197680adef182883 100644 (file)
@@ -2352,24 +2352,19 @@ DEBUGFS_READ_WRITE_FILE_OPS(calib_disabled);
  * Create the debugfs files and directories
  *
  */
-int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
+int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir)
 {
-       struct dentry *phyd = priv->hw->wiphy->debugfsdir;
-       struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
+       struct dentry *dir_data, *dir_rf, *dir_debug;
 
-       dir_drv = debugfs_create_dir(name, phyd);
-       if (!dir_drv)
-               return -ENOMEM;
-
-       priv->debugfs_dir = dir_drv;
+       priv->debugfs_dir = dbgfs_dir;
 
-       dir_data = debugfs_create_dir("data", dir_drv);
+       dir_data = debugfs_create_dir("data", dbgfs_dir);
        if (!dir_data)
                goto err;
-       dir_rf = debugfs_create_dir("rf", dir_drv);
+       dir_rf = debugfs_create_dir("rf", dbgfs_dir);
        if (!dir_rf)
                goto err;
-       dir_debug = debugfs_create_dir("debug", dir_drv);
+       dir_debug = debugfs_create_dir("debug", dbgfs_dir);
        if (!dir_debug)
                goto err;
 
@@ -2415,25 +2410,30 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
        /* Calibrations disabled/enabled status*/
        DEBUGFS_ADD_FILE(calib_disabled, dir_rf, S_IWUSR | S_IRUSR);
 
-       if (iwl_trans_dbgfs_register(priv->trans, dir_debug))
-               goto err;
+       /*
+        * Create a symlink with mac80211. This is not very robust, as it does
+        * not remove the symlink created. The implicit assumption is that
+        * when the opmode exits, mac80211 will also exit, and will remove
+        * this symlink as part of its cleanup.
+        */
+       if (priv->mac80211_registered) {
+               char buf[100];
+               struct dentry *mac80211_dir, *dev_dir, *root_dir;
+
+               dev_dir = dbgfs_dir->d_parent;
+               root_dir = dev_dir->d_parent;
+               mac80211_dir = priv->hw->wiphy->debugfsdir;
+
+               snprintf(buf, 100, "../../%s/%s", root_dir->d_name.name,
+                        dev_dir->d_name.name);
+
+               if (!debugfs_create_symlink("iwlwifi", mac80211_dir, buf))
+                       goto err;
+       }
+
        return 0;
 
 err:
-       IWL_ERR(priv, "Can't create the debugfs directory\n");
-       iwl_dbgfs_unregister(priv);
+       IWL_ERR(priv, "failed to create the dvm debugfs entries\n");
        return -ENOMEM;
 }
-
-/**
- * Remove the debugfs files and directories
- *
- */
-void iwl_dbgfs_unregister(struct iwl_priv *priv)
-{
-       if (!priv->debugfs_dir)
-               return;
-
-       debugfs_remove_recursive(priv->debugfs_dir);
-       priv->debugfs_dir = NULL;
-}
index 054f728f6266fb6f4f4425e7824f62a0e910730c..8141f91c3725bd3c8f26d9830e61574dd3b4586f 100644 (file)
@@ -771,6 +771,7 @@ struct iwl_priv {
        u8 agg_tids_count;
 
        struct iwl_rx_phy_res last_phy_res;
+       u32 ampdu_ref;
        bool last_phy_res_valid;
 
        /*
index a5f7bce96325819f6c14471bec1b4232af533a3e..ff8162d4c4543d3d2976c964ed2f790a8f7cb6a1 100644 (file)
@@ -195,7 +195,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
                        ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
        }
 
-       hw->wiphy->max_remain_on_channel_duration = 1000;
+       hw->wiphy->max_remain_on_channel_duration = 500;
 
        hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
                            WIPHY_FLAG_DISABLE_BEACON_HINTS |
@@ -511,14 +511,16 @@ static void iwlagn_mac_set_wakeup(struct ieee80211_hw *hw, bool enabled)
 }
 #endif
 
-static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void iwlagn_mac_tx(struct ieee80211_hw *hw,
+                         struct ieee80211_tx_control *control,
+                         struct sk_buff *skb)
 {
        struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
 
        IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
                     ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
 
-       if (iwlagn_tx_skb(priv, skb))
+       if (iwlagn_tx_skb(priv, control->sta, skb))
                dev_kfree_skb_any(skb);
 }
 
index 84d3db5aa506c113c572f46cae1d58b4525c3b0a..7ff3f14306784169f886e5c7ca570d8b217a7309 100644 (file)
@@ -862,7 +862,8 @@ void iwl_down(struct iwl_priv *priv)
         * No race since we hold the mutex here and a new one
         * can't come in at this time.
         */
-       ieee80211_remain_on_channel_expired(priv->hw);
+       if (priv->ucode_loaded && priv->cur_ucode != IWL_UCODE_INIT)
+               ieee80211_remain_on_channel_expired(priv->hw);
 
        exit_pending =
                test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
@@ -994,7 +995,11 @@ static void iwl_bg_restart(struct work_struct *data)
                iwlagn_prepare_restart(priv);
                mutex_unlock(&priv->mutex);
                iwl_cancel_deferred_work(priv);
-               ieee80211_restart_hw(priv->hw);
+               if (priv->mac80211_registered)
+                       ieee80211_restart_hw(priv->hw);
+               else
+                       IWL_ERR(priv,
+                               "Cannot request restart before registrating with mac80211");
        } else {
                WARN_ON(1);
        }
@@ -1222,7 +1227,8 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
 
 static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
                                                 const struct iwl_cfg *cfg,
-                                                const struct iwl_fw *fw)
+                                                const struct iwl_fw *fw,
+                                                struct dentry *dbgfs_dir)
 {
        struct iwl_priv *priv;
        struct ieee80211_hw *hw;
@@ -1466,13 +1472,17 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
        if (iwlagn_mac_setup_register(priv, &fw->ucode_capa))
                goto out_destroy_workqueue;
 
-       if (iwl_dbgfs_register(priv, DRV_NAME))
-               IWL_ERR(priv,
-                       "failed to create debugfs files. Ignoring error\n");
+       if (iwl_dbgfs_register(priv, dbgfs_dir))
+               goto out_mac80211_unregister;
 
        return op_mode;
 
+out_mac80211_unregister:
+       iwlagn_mac_unregister(priv);
 out_destroy_workqueue:
+       iwl_tt_exit(priv);
+       iwl_testmode_free(priv);
+       iwl_cancel_deferred_work(priv);
        destroy_workqueue(priv->workqueue);
        priv->workqueue = NULL;
        iwl_uninit_drv(priv);
@@ -1493,8 +1503,6 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
 
        IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
 
-       iwl_dbgfs_unregister(priv);
-
        iwl_testmode_free(priv);
        iwlagn_mac_unregister(priv);
 
index fee5cffa166998c30437ac37b7490e0b85972dc1..5a9c325804f6dcdc8d47d44faf15dec1051d4471 100644 (file)
@@ -667,6 +667,7 @@ static int iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
 
        priv->last_phy_res_valid = true;
+       priv->ampdu_ref++;
        memcpy(&priv->last_phy_res, pkt->data,
               sizeof(struct iwl_rx_phy_res));
        return 0;
@@ -981,6 +982,16 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
        if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
                rx_status.flag |= RX_FLAG_SHORTPRE;
 
+       if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) {
+               /*
+                * We know which subframes of an A-MPDU belong
+                * together since we get a single PHY response
+                * from the firmware for all of them
+                */
+               rx_status.flag |= RX_FLAG_AMPDU_DETAILS;
+               rx_status.ampdu_reference = priv->ampdu_ref;
+       }
+
        /* Set up the HT phy flags */
        if (rate_n_flags & RATE_MCS_HT_MSK)
                rx_status.flag |= RX_FLAG_HT;
index e3467fa868996264f88e681edd8b5f6114fa5fe4..bb9f6252d28fad2d25340845f8f3a324e5d0fc56 100644 (file)
@@ -612,9 +612,9 @@ static u16 iwl_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
                return 0;
 
        frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
-       memcpy(frame->da, iwl_bcast_addr, ETH_ALEN);
+       eth_broadcast_addr(frame->da);
        memcpy(frame->sa, ta, ETH_ALEN);
-       memcpy(frame->bssid, iwl_bcast_addr, ETH_ALEN);
+       eth_broadcast_addr(frame->bssid);
        frame->seq_ctrl = 0;
 
        len += 24;
index b29b798f7550ad41b55efa7db5ab94e9d146a355..cd9b6de4273e8c035a8bcdd39bf715316825db90 100644 (file)
@@ -128,10 +128,11 @@ int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
                               struct iwl_device_cmd *cmd)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_addsta_cmd *addsta =
-               (struct iwl_addsta_cmd *) cmd->payload;
 
-       return iwl_process_add_sta_resp(priv, addsta, pkt);
+       if (!cmd)
+               return 0;
+
+       return iwl_process_add_sta_resp(priv, (void *)cmd->payload, pkt);
 }
 
 int iwl_send_add_sta(struct iwl_priv *priv,
@@ -150,7 +151,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
                       sta_id, sta->sta.addr, flags & CMD_ASYNC ?  "a" : "");
 
        if (!(flags & CMD_ASYNC)) {
-               cmd.flags |= CMD_WANT_SKB;
+               cmd.flags |= CMD_WANT_SKB | CMD_WANT_HCMD;
                might_sleep();
        }
 
index 5971a23aa47d1218317460404ef5401f38d43547..f5ca73a89870727a71d6a354abebaeb01221d6f5 100644 (file)
@@ -127,6 +127,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
 static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
                                     struct iwl_tx_cmd *tx_cmd,
                                     struct ieee80211_tx_info *info,
+                                    struct ieee80211_sta *sta,
                                     __le16 fc)
 {
        u32 rate_flags;
@@ -187,8 +188,7 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
        if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
                        (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
                rate_idx = rate_lowest_index(
-                               &priv->eeprom_data->bands[info->band],
-                               info->control.sta);
+                               &priv->eeprom_data->bands[info->band], sta);
        /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
        if (info->band == IEEE80211_BAND_5GHZ)
                rate_idx += IWL_FIRST_OFDM_RATE;
@@ -291,7 +291,9 @@ static int iwl_sta_id_or_broadcast(struct iwl_rxon_context *context,
 /*
  * start REPLY_TX command process
  */
-int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
+int iwlagn_tx_skb(struct iwl_priv *priv,
+                 struct ieee80211_sta *sta,
+                 struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -345,7 +347,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
                sta_id = ctx->bcast_sta_id;
        else {
                /* Find index into station table for destination station */
-               sta_id = iwl_sta_id_or_broadcast(ctx, info->control.sta);
+               sta_id = iwl_sta_id_or_broadcast(ctx, sta);
                if (sta_id == IWL_INVALID_STATION) {
                        IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
                                       hdr->addr1);
@@ -355,8 +357,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
 
        IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
 
-       if (info->control.sta)
-               sta_priv = (void *)info->control.sta->drv_priv;
+       if (sta)
+               sta_priv = (void *)sta->drv_priv;
 
        if (sta_priv && sta_priv->asleep &&
            (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
@@ -397,7 +399,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
        /* TODO need this for burst mode later on */
        iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
 
-       iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc);
+       iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, sta, fc);
 
        memset(&info->status, 0, sizeof(info->status));
 
@@ -431,7 +433,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
                 * only. Check this here.
                 */
                if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON &&
-                   tid_data->agg.state != IWL_AGG_OFF,
+                             tid_data->agg.state != IWL_AGG_OFF,
                    "Tx while agg.state = %d", tid_data->agg.state))
                        goto drop_unlock_sta;
 
index 6d8d6dd7943fc3cd2e58e872071078be8f4bfc0b..2cb1efbc5ed1f3d80127f678e4c82e42046eb7c1 100644 (file)
@@ -295,7 +295,7 @@ static int iwl_alive_notify(struct iwl_priv *priv)
 static int iwl_verify_sec_sparse(struct iwl_priv *priv,
                                  const struct fw_desc *fw_desc)
 {
-       __le32 *image = (__le32 *)fw_desc->v_addr;
+       __le32 *image = (__le32 *)fw_desc->data;
        u32 len = fw_desc->len;
        u32 val;
        u32 i;
@@ -319,7 +319,7 @@ static int iwl_verify_sec_sparse(struct iwl_priv *priv,
 static void iwl_print_mismatch_sec(struct iwl_priv *priv,
                                    const struct fw_desc *fw_desc)
 {
-       __le32 *image = (__le32 *)fw_desc->v_addr;
+       __le32 *image = (__le32 *)fw_desc->data;
        u32 len = fw_desc->len;
        u32 val;
        u32 offs;
index 06ca505bb2cc68cb84168f55107e52324639ff4a..59a5f78402fce35319014267c33514b7e4b7ef65 100644 (file)
@@ -29,6 +29,7 @@
 
 #include <linux/tracepoint.h>
 #include <linux/device.h>
+#include "iwl-trans.h"
 
 
 #if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__)
@@ -237,27 +238,34 @@ TRACE_EVENT(iwlwifi_dbg,
 #define TRACE_SYSTEM iwlwifi
 
 TRACE_EVENT(iwlwifi_dev_hcmd,
-       TP_PROTO(const struct device *dev, u32 flags,
-                const void *hcmd0, size_t len0,
-                const void *hcmd1, size_t len1,
-                const void *hcmd2, size_t len2),
-       TP_ARGS(dev, flags, hcmd0, len0, hcmd1, len1, hcmd2, len2),
+       TP_PROTO(const struct device *dev,
+                struct iwl_host_cmd *cmd, u16 total_size,
+                const void *hdr, size_t hdr_len),
+       TP_ARGS(dev, cmd, total_size, hdr, hdr_len),
        TP_STRUCT__entry(
                DEV_ENTRY
-               __dynamic_array(u8, hcmd0, len0)
-               __dynamic_array(u8, hcmd1, len1)
-               __dynamic_array(u8, hcmd2, len2)
+               __dynamic_array(u8, hcmd, total_size)
                __field(u32, flags)
        ),
        TP_fast_assign(
+               int i, offset = hdr_len;
+
                DEV_ASSIGN;
-               memcpy(__get_dynamic_array(hcmd0), hcmd0, len0);
-               memcpy(__get_dynamic_array(hcmd1), hcmd1, len1);
-               memcpy(__get_dynamic_array(hcmd2), hcmd2, len2);
-               __entry->flags = flags;
+               __entry->flags = cmd->flags;
+               memcpy(__get_dynamic_array(hcmd), hdr, hdr_len);
+
+               for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
+                       if (!cmd->len[i])
+                               continue;
+                       if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
+                               continue;
+                       memcpy((u8 *)__get_dynamic_array(hcmd) + offset,
+                              cmd->data[i], cmd->len[i]);
+                       offset += cmd->len[i];
+               }
        ),
        TP_printk("[%s] hcmd %#.2x (%ssync)",
-                 __get_str(dev), ((u8 *)__get_dynamic_array(hcmd0))[0],
+                 __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[0],
                  __entry->flags & CMD_ASYNC ? "a" : "")
 );
 
index cc41cfaedfbde4afaef1e54f23f215e658490dca..198634b75ed0e0cb5cb732525b6c9049f1a4b422 100644 (file)
@@ -64,6 +64,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/firmware.h>
 #include <linux/module.h>
+#include <linux/vmalloc.h>
 
 #include "iwl-drv.h"
 #include "iwl-debug.h"
@@ -101,6 +102,10 @@ MODULE_VERSION(DRV_VERSION);
 MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
 MODULE_LICENSE("GPL");
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static struct dentry *iwl_dbgfs_root;
+#endif
+
 /**
  * struct iwl_drv - drv common data
  * @list: list of drv structures using this opmode
@@ -126,6 +131,12 @@ struct iwl_drv {
        char firmware_name[25];         /* name of firmware file to load */
 
        struct completion request_firmware_complete;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       struct dentry *dbgfs_drv;
+       struct dentry *dbgfs_trans;
+       struct dentry *dbgfs_op_mode;
+#endif
 };
 
 #define DVM_OP_MODE    0
@@ -154,10 +165,8 @@ struct fw_sec {
 
 static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc)
 {
-       if (desc->v_addr)
-               dma_free_coherent(drv->trans->dev, desc->len,
-                                 desc->v_addr, desc->p_addr);
-       desc->v_addr = NULL;
+       vfree(desc->data);
+       desc->data = NULL;
        desc->len = 0;
 }
 
@@ -176,25 +185,29 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
 }
 
 static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc,
-                     struct fw_sec *sec)
+                            struct fw_sec *sec)
 {
-       if (!sec || !sec->size) {
-               desc->v_addr = NULL;
+       void *data;
+
+       desc->data = NULL;
+
+       if (!sec || !sec->size)
                return -EINVAL;
-       }
 
-       desc->v_addr = dma_alloc_coherent(drv->trans->dev, sec->size,
-                                         &desc->p_addr, GFP_KERNEL);
-       if (!desc->v_addr)
+       data = vmalloc(sec->size);
+       if (!data)
                return -ENOMEM;
 
        desc->len = sec->size;
        desc->offset = sec->offset;
-       memcpy(desc->v_addr, sec->data, sec->size);
+       memcpy(data, sec->data, desc->len);
+       desc->data = data;
+
        return 0;
 }
 
-static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
+static void iwl_req_fw_callback(const struct firmware *ucode_raw,
+                               void *context);
 
 #define UCODE_EXPERIMENTAL_INDEX       100
 #define UCODE_EXPERIMENTAL_TAG         "exp"
@@ -231,7 +244,7 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
 
        return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name,
                                       drv->trans->dev,
-                                      GFP_KERNEL, drv, iwl_ucode_callback);
+                                      GFP_KERNEL, drv, iwl_req_fw_callback);
 }
 
 struct fw_img_parsing {
@@ -759,13 +772,57 @@ static int validate_sec_sizes(struct iwl_drv *drv,
        return 0;
 }
 
+static struct iwl_op_mode *
+_iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
+{
+       const struct iwl_op_mode_ops *ops = op->ops;
+       struct dentry *dbgfs_dir = NULL;
+       struct iwl_op_mode *op_mode = NULL;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       drv->dbgfs_op_mode = debugfs_create_dir(op->name,
+                                               drv->dbgfs_drv);
+       if (!drv->dbgfs_op_mode) {
+               IWL_ERR(drv,
+                       "failed to create opmode debugfs directory\n");
+               return op_mode;
+       }
+       dbgfs_dir = drv->dbgfs_op_mode;
+#endif
+
+       op_mode = ops->start(drv->trans, drv->cfg, &drv->fw, dbgfs_dir);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       if (!op_mode) {
+               debugfs_remove_recursive(drv->dbgfs_op_mode);
+               drv->dbgfs_op_mode = NULL;
+       }
+#endif
+
+       return op_mode;
+}
+
+static void _iwl_op_mode_stop(struct iwl_drv *drv)
+{
+       /* op_mode can be NULL if its start failed */
+       if (drv->op_mode) {
+               iwl_op_mode_stop(drv->op_mode);
+               drv->op_mode = NULL;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+               debugfs_remove_recursive(drv->dbgfs_op_mode);
+               drv->dbgfs_op_mode = NULL;
+#endif
+       }
+}
+
 /**
- * iwl_ucode_callback - callback when firmware was loaded
+ * iwl_req_fw_callback - callback when firmware was loaded
  *
  * If loaded successfully, copies the firmware into buffers
  * for the card to fetch (via DMA).
  */
-static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
+static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
 {
        struct iwl_drv *drv = context;
        struct iwl_fw *fw = &drv->fw;
@@ -908,8 +965,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
        list_add_tail(&drv->list, &op->drv);
 
        if (op->ops) {
-               const struct iwl_op_mode_ops *ops = op->ops;
-               drv->op_mode = ops->start(drv->trans, drv->cfg, &drv->fw);
+               drv->op_mode = _iwl_op_mode_start(drv, op);
 
                if (!drv->op_mode) {
                        mutex_unlock(&iwlwifi_opmode_table_mtx);
@@ -969,14 +1025,43 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
        init_completion(&drv->request_firmware_complete);
        INIT_LIST_HEAD(&drv->list);
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       /* Create the device debugfs entries. */
+       drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev),
+                                           iwl_dbgfs_root);
+
+       if (!drv->dbgfs_drv) {
+               IWL_ERR(drv, "failed to create debugfs directory\n");
+               goto err_free_drv;
+       }
+
+       /* Create transport layer debugfs dir */
+       drv->trans->dbgfs_dir = debugfs_create_dir("trans", drv->dbgfs_drv);
+
+       if (!drv->trans->dbgfs_dir) {
+               IWL_ERR(drv, "failed to create transport debugfs directory\n");
+               goto err_free_dbgfs;
+       }
+#endif
+
        ret = iwl_request_firmware(drv, true);
 
        if (ret) {
                IWL_ERR(trans, "Couldn't request the fw\n");
-               kfree(drv);
-               drv = NULL;
+               goto err_fw;
        }
 
+       return drv;
+
+err_fw:
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+err_free_dbgfs:
+       debugfs_remove_recursive(drv->dbgfs_drv);
+err_free_drv:
+#endif
+       kfree(drv);
+       drv = NULL;
+
        return drv;
 }
 
@@ -984,9 +1069,7 @@ void iwl_drv_stop(struct iwl_drv *drv)
 {
        wait_for_completion(&drv->request_firmware_complete);
 
-       /* op_mode can be NULL if its start failed */
-       if (drv->op_mode)
-               iwl_op_mode_stop(drv->op_mode);
+       _iwl_op_mode_stop(drv);
 
        iwl_dealloc_ucode(drv);
 
@@ -1000,6 +1083,10 @@ void iwl_drv_stop(struct iwl_drv *drv)
                list_del(&drv->list);
        mutex_unlock(&iwlwifi_opmode_table_mtx);
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       debugfs_remove_recursive(drv->dbgfs_drv);
+#endif
+
        kfree(drv);
 }
 
@@ -1022,15 +1109,18 @@ int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops)
 {
        int i;
        struct iwl_drv *drv;
+       struct iwlwifi_opmode_table *op;
 
        mutex_lock(&iwlwifi_opmode_table_mtx);
        for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) {
-               if (strcmp(iwlwifi_opmode_table[i].name, name))
+               op = &iwlwifi_opmode_table[i];
+               if (strcmp(op->name, name))
                        continue;
-               iwlwifi_opmode_table[i].ops = ops;
-               list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list)
-                       drv->op_mode = ops->start(drv->trans, drv->cfg,
-                                                 &drv->fw);
+               op->ops = ops;
+               /* TODO: need to handle exceptional case */
+               list_for_each_entry(drv, &op->drv, list)
+                       drv->op_mode = _iwl_op_mode_start(drv, op);
+
                mutex_unlock(&iwlwifi_opmode_table_mtx);
                return 0;
        }
@@ -1051,12 +1141,9 @@ void iwl_opmode_deregister(const char *name)
                iwlwifi_opmode_table[i].ops = NULL;
 
                /* call the stop routine for all devices */
-               list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list) {
-                       if (drv->op_mode) {
-                               iwl_op_mode_stop(drv->op_mode);
-                               drv->op_mode = NULL;
-                       }
-               }
+               list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list)
+                       _iwl_op_mode_stop(drv);
+
                mutex_unlock(&iwlwifi_opmode_table_mtx);
                return;
        }
@@ -1076,6 +1163,14 @@ static int __init iwl_drv_init(void)
        pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
        pr_info(DRV_COPYRIGHT "\n");
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       /* Create the root of iwlwifi debugfs subsystem. */
+       iwl_dbgfs_root = debugfs_create_dir(DRV_NAME, NULL);
+
+       if (!iwl_dbgfs_root)
+               return -EFAULT;
+#endif
+
        return iwl_pci_register_driver();
 }
 module_init(iwl_drv_init);
@@ -1083,6 +1178,10 @@ module_init(iwl_drv_init);
 static void __exit iwl_drv_exit(void)
 {
        iwl_pci_unregister_driver();
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       debugfs_remove_recursive(iwl_dbgfs_root);
+#endif
 }
 module_exit(iwl_drv_exit);
 
index 2cbf137b25bf7b55ddddf1d4d9792b6d1c03fba9..285de5f68c051e39c9244188f234639dd8115328 100644 (file)
@@ -90,9 +90,9 @@
  * 4) The bus specific component configures the bus
  * 5) The bus specific component calls to the drv bus agnostic part
  *    (iwl_drv_start)
- * 6) iwl_drv_start fetches the fw ASYNC, iwl_ucode_callback
- * 7) iwl_ucode_callback parses the fw file
- * 8) iwl_ucode_callback starts the wifi implementation to matches the fw
+ * 6) iwl_drv_start fetches the fw ASYNC, iwl_req_fw_callback
+ * 7) iwl_req_fw_callback parses the fw file
+ * 8) iwl_req_fw_callback starts the wifi implementation to matches the fw
  */
 
 struct iwl_drv;
index 9c07c670a1ce7edf617dd88ea403041dcf481e31..a5e425718f56a052e365dc0e2f11657b2af88bd8 100644 (file)
@@ -85,8 +85,6 @@ struct iwl_eeprom_data {
        int n_hw_addrs;
        u8 hw_addr[ETH_ALEN];
 
-       u16 radio_config;
-
        u8 calib_version;
        __le16 calib_voltage;
 
index 2153e4cc5572ede3f15c1bb8a7eb5c3f279b24a1..d1a86b66bc51ee6fab8e797e8aa458b9db3812ea 100644 (file)
@@ -124,8 +124,7 @@ struct iwl_ucode_capabilities {
 
 /* one for each uCode image (inst/data, init/runtime/wowlan) */
 struct fw_desc {
-       dma_addr_t p_addr;      /* hardware address */
-       void *v_addr;           /* software address */
+       const void *data;       /* vmalloc'ed data */
        u32 len;                /* size in bytes */
        u32 offset;             /* offset in the device */
 };
index 64886f95664f996b370f621c97c14998b0c27171..c8d9b951746827b6ebd821eb0e72e2aef0c13a9c 100644 (file)
@@ -134,7 +134,8 @@ struct iwl_cfg;
 struct iwl_op_mode_ops {
        struct iwl_op_mode *(*start)(struct iwl_trans *trans,
                                     const struct iwl_cfg *cfg,
-                                    const struct iwl_fw *fw);
+                                    const struct iwl_fw *fw,
+                                    struct dentry *dbgfs_dir);
        void (*stop)(struct iwl_op_mode *op_mode);
        int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
                  struct iwl_device_cmd *cmd);
index 92576a3e84ef1c8cf04e6881b5b2691924a89b09..ff1154232885da82add146706ffe67b325d3e9b2 100644 (file)
@@ -184,14 +184,20 @@ struct iwl_rx_packet {
  * @CMD_SYNC: The caller will be stalled until the fw responds to the command
  * @CMD_ASYNC: Return right away and don't want for the response
  * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
- *     response.
+ *     response. The caller needs to call iwl_free_resp when done.
+ * @CMD_WANT_HCMD: The caller needs to get the HCMD that was sent in the
+ *     response handler. Chunks flagged by %IWL_HCMD_DFL_NOCOPY won't be
+ *     copied. The pointer passed to the response handler is in the transport
+ *     ownership and don't need to be freed by the op_mode. This also means
+ *     that the pointer is invalidated after the op_mode's handler returns.
  * @CMD_ON_DEMAND: This command is sent by the test mode pipe.
  */
 enum CMD_MODE {
        CMD_SYNC = 0,
        CMD_ASYNC = BIT(0),
        CMD_WANT_SKB = BIT(1),
-       CMD_ON_DEMAND = BIT(2),
+       CMD_WANT_HCMD = BIT(2),
+       CMD_ON_DEMAND = BIT(3),
 };
 
 #define DEF_CMD_PAYLOAD_SIZE 320
@@ -460,6 +466,8 @@ struct iwl_trans {
        size_t dev_cmd_headroom;
        char dev_cmd_pool_name[50];
 
+       struct dentry *dbgfs_dir;
+
        /* pointer to trans specific struct */
        /*Ensure that this pointer will always be aligned to sizeof pointer */
        char trans_specific[0] __aligned(sizeof(void *));
index f4c3500b68c682ced4ecec9f21c89c1d97cf7732..2a4675396707474fc0a0ced64009eddb967b4bec 100644 (file)
@@ -263,8 +263,6 @@ MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
 /* PCI registers */
 #define PCI_CFG_RETRY_TIMEOUT  0x041
 
-#ifndef CONFIG_IWLWIFI_IDI
-
 static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
@@ -282,8 +280,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (!trans_pcie->drv)
                goto out_free_trans;
 
+       /* register transport layer debugfs here */
+       if (iwl_trans_dbgfs_register(iwl_trans, iwl_trans->dbgfs_dir))
+               goto out_free_drv;
+
        return 0;
 
+out_free_drv:
+       iwl_drv_stop(trans_pcie->drv);
 out_free_trans:
        iwl_trans_pcie_free(iwl_trans);
        pci_set_drvdata(pdev, NULL);
@@ -301,8 +305,6 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
        pci_set_drvdata(pdev, NULL);
 }
 
-#endif /* CONFIG_IWLWIFI_IDI */
-
 #ifdef CONFIG_PM_SLEEP
 
 static int iwl_pci_suspend(struct device *device)
@@ -347,15 +349,6 @@ static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
 
 #endif
 
-#ifdef CONFIG_IWLWIFI_IDI
-/*
- * Defined externally in iwl-idi.c
- */
-int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
-void __devexit iwl_pci_remove(struct pci_dev *pdev);
-
-#endif /* CONFIG_IWLWIFI_IDI */
-
 static struct pci_driver iwl_pci_driver = {
        .name = DRV_NAME,
        .id_table = iwl_hw_card_ids,
index 4ffc18dc3a5761cc6b63c5aa60691727459e5e27..401178f44a3b130a9de6832bf10bd3e9b6e4b529 100644 (file)
@@ -184,6 +184,7 @@ struct iwl_queue {
 
 struct iwl_pcie_tx_queue_entry {
        struct iwl_device_cmd *cmd;
+       struct iwl_device_cmd *copy_cmd;
        struct sk_buff *skb;
        struct iwl_cmd_meta meta;
 };
@@ -310,7 +311,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
 ******************************************************/
 void iwl_bg_rx_replenish(struct work_struct *data);
 void iwl_irq_tasklet(struct iwl_trans *trans);
-void iwlagn_rx_replenish(struct iwl_trans *trans);
+void iwl_rx_replenish(struct iwl_trans *trans);
 void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
                                   struct iwl_rx_queue *q);
 
index d1a61ba6247ab68a13e4cd8943959c92d0fa3452..17c8e5d82681022383d657da550924d48d0d9289 100644 (file)
 #include "internal.h"
 #include "iwl-op-mode.h"
 
-#ifdef CONFIG_IWLWIFI_IDI
-#include "iwl-amfh.h"
-#endif
-
 /******************************************************************************
  *
  * RX path functions
@@ -181,15 +177,15 @@ void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
 }
 
 /**
- * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
  */
-static inline __le32 iwlagn_dma_addr2rbd_ptr(dma_addr_t dma_addr)
+static inline __le32 iwl_dma_addr2rbd_ptr(dma_addr_t dma_addr)
 {
        return cpu_to_le32((u32)(dma_addr >> 8));
 }
 
 /**
- * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
+ * iwl_rx_queue_restock - refill RX queue from pre-allocated pool
  *
  * If there are slots in the RX queue that need to be restocked,
  * and we have free pre-allocated buffers, fill the ranks as much
@@ -199,7 +195,7 @@ static inline __le32 iwlagn_dma_addr2rbd_ptr(dma_addr_t dma_addr)
  * also updates the memory address in the firmware to reference the new
  * target buffer.
  */
-static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
+static void iwl_rx_queue_restock(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rx_queue *rxq = &trans_pcie->rxq;
@@ -207,6 +203,17 @@ static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
        struct iwl_rx_mem_buffer *rxb;
        unsigned long flags;
 
+       /*
+        * If the device isn't enabled - not need to try to add buffers...
+        * This can happen when we stop the device and still have an interrupt
+        * pending. We stop the APM before we sync the interrupts / tasklets
+        * because we have to (see comment there). On the other hand, since
+        * the APM is stopped, we cannot access the HW (in particular not prph).
+        * So don't try to restock if the APM has been already stopped.
+        */
+       if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
+               return;
+
        spin_lock_irqsave(&rxq->lock, flags);
        while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
                /* The overwritten rxb must be a used one */
@@ -219,7 +226,7 @@ static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
                list_del(element);
 
                /* Point to Rx buffer via next RBD in circular buffer */
-               rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(rxb->page_dma);
+               rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(rxb->page_dma);
                rxq->queue[rxq->write] = rxb;
                rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
                rxq->free_count--;
@@ -230,7 +237,6 @@ static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
        if (rxq->free_count <= RX_LOW_WATERMARK)
                schedule_work(&trans_pcie->rx_replenish);
 
-
        /* If we've added more space for the firmware to place data, tell it.
         * Increment device's write pointer in multiples of 8. */
        if (rxq->write_actual != (rxq->write & ~0x7)) {
@@ -241,15 +247,16 @@ static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
        }
 }
 
-/**
- * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
- *
- * When moving to rx_free an SKB is allocated for the slot.
+/*
+ * iwl_rx_allocate - allocate a page for each used RBD
  *
- * Also restock the Rx queue via iwl_rx_queue_restock.
- * This is called as a scheduled work item (except for during initialization)
+ * A used RBD is an Rx buffer that has been given to the stack. To use it again
+ * a page must be allocated and the RBD must point to the page. This function
+ * doesn't change the HW pointer but handles the list of pages that is used by
+ * iwl_rx_queue_restock. The latter function will update the HW to use the newly
+ * allocated buffers.
  */
-static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
+static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rx_queue *rxq = &trans_pcie->rxq;
@@ -328,23 +335,31 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
        }
 }
 
-void iwlagn_rx_replenish(struct iwl_trans *trans)
+/*
+ * iwl_rx_replenish - Move all used buffers from rx_used to rx_free
+ *
+ * When moving to rx_free an page is allocated for the slot.
+ *
+ * Also restock the Rx queue via iwl_rx_queue_restock.
+ * This is called as a scheduled work item (except for during initialization)
+ */
+void iwl_rx_replenish(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        unsigned long flags;
 
-       iwlagn_rx_allocate(trans, GFP_KERNEL);
+       iwl_rx_allocate(trans, GFP_KERNEL);
 
        spin_lock_irqsave(&trans_pcie->irq_lock, flags);
-       iwlagn_rx_queue_restock(trans);
+       iwl_rx_queue_restock(trans);
        spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
 }
 
-static void iwlagn_rx_replenish_now(struct iwl_trans *trans)
+static void iwl_rx_replenish_now(struct iwl_trans *trans)
 {
-       iwlagn_rx_allocate(trans, GFP_ATOMIC);
+       iwl_rx_allocate(trans, GFP_ATOMIC);
 
-       iwlagn_rx_queue_restock(trans);
+       iwl_rx_queue_restock(trans);
 }
 
 void iwl_bg_rx_replenish(struct work_struct *data)
@@ -352,7 +367,7 @@ void iwl_bg_rx_replenish(struct work_struct *data)
        struct iwl_trans_pcie *trans_pcie =
            container_of(data, struct iwl_trans_pcie, rx_replenish);
 
-       iwlagn_rx_replenish(trans_pcie->trans);
+       iwl_rx_replenish(trans_pcie->trans);
 }
 
 static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
@@ -421,13 +436,23 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
                index = SEQ_TO_INDEX(sequence);
                cmd_index = get_cmd_index(&txq->q, index);
 
-               if (reclaim)
-                       cmd = txq->entries[cmd_index].cmd;
-               else
+               if (reclaim) {
+                       struct iwl_pcie_tx_queue_entry *ent;
+                       ent = &txq->entries[cmd_index];
+                       cmd = ent->copy_cmd;
+                       WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD);
+               } else {
                        cmd = NULL;
+               }
 
                err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
 
+               if (reclaim) {
+                       /* The original command isn't needed any more */
+                       kfree(txq->entries[cmd_index].copy_cmd);
+                       txq->entries[cmd_index].copy_cmd = NULL;
+               }
+
                /*
                 * After here, we should always check rxcb._page_stolen,
                 * if it is true then one of the handlers took the page.
@@ -520,7 +545,7 @@ static void iwl_rx_handle(struct iwl_trans *trans)
                        count++;
                        if (count >= 8) {
                                rxq->read = i;
-                               iwlagn_rx_replenish_now(trans);
+                               iwl_rx_replenish_now(trans);
                                count = 0;
                        }
                }
@@ -529,9 +554,9 @@ static void iwl_rx_handle(struct iwl_trans *trans)
        /* Backtrack one entry */
        rxq->read = i;
        if (fill_rx)
-               iwlagn_rx_replenish_now(trans);
+               iwl_rx_replenish_now(trans);
        else
-               iwlagn_rx_queue_restock(trans);
+               iwl_rx_queue_restock(trans);
 }
 
 /**
@@ -713,11 +738,9 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
                /* Disable periodic interrupt; we use it as just a one-shot. */
                iwl_write8(trans, CSR_INT_PERIODIC_REG,
                            CSR_INT_PERIODIC_DIS);
-#ifdef CONFIG_IWLWIFI_IDI
-               iwl_amfh_rx_handler();
-#else
+
                iwl_rx_handle(trans);
-#endif
+
                /*
                 * Enable periodic interrupt in 8 msec only if we received
                 * real RX interrupt (instead of just periodic int), to catch
index 063ecaff5b569962e3dc2bf3114368385a575629..fe0fffd043048f48adca5c110031f537d363d73e 100644 (file)
@@ -216,7 +216,7 @@ static int iwl_rx_init(struct iwl_trans *trans)
        rxq->free_count = 0;
        spin_unlock_irqrestore(&rxq->lock, flags);
 
-       iwlagn_rx_replenish(trans);
+       iwl_rx_replenish(trans);
 
        iwl_trans_rx_hw_init(trans, rxq);
 
@@ -492,10 +492,11 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
        iwl_tx_queue_unmap(trans, txq_id);
 
        /* De-alloc array of command/tx buffers */
-
        if (txq_id == trans_pcie->cmd_queue)
-               for (i = 0; i < txq->q.n_window; i++)
+               for (i = 0; i < txq->q.n_window; i++) {
                        kfree(txq->entries[i].cmd);
+                       kfree(txq->entries[i].copy_cmd);
+               }
 
        /* De-alloc circular buffer of TFDs */
        if (txq->q.n_bd) {
@@ -851,10 +852,8 @@ static int iwl_nic_init(struct iwl_trans *trans)
 
        iwl_op_mode_nic_config(trans->op_mode);
 
-#ifndef CONFIG_IWLWIFI_IDI
        /* Allocate the RX queue, or reset if it is already allocated */
        iwl_rx_init(trans);
-#endif
 
        /* Allocate or reset and init all Tx and Command queues */
        if (iwl_tx_init(trans))
@@ -893,6 +892,7 @@ static int iwl_set_hw_ready(struct iwl_trans *trans)
 static int iwl_prepare_card_hw(struct iwl_trans *trans)
 {
        int ret;
+       int t = 0;
 
        IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
 
@@ -905,30 +905,25 @@ static int iwl_prepare_card_hw(struct iwl_trans *trans)
        iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
                    CSR_HW_IF_CONFIG_REG_PREPARE);
 
-       ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
-                          ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
-                          CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
+       do {
+               ret = iwl_set_hw_ready(trans);
+               if (ret >= 0)
+                       return 0;
 
-       if (ret < 0)
-               return ret;
+               usleep_range(200, 1000);
+               t += 200;
+       } while (t < 150000);
 
-       /* HW should be ready by now, check again. */
-       ret = iwl_set_hw_ready(trans);
-       if (ret >= 0)
-               return 0;
        return ret;
 }
 
 /*
  * ucode
  */
-static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
-                           const struct fw_desc *section)
+static int iwl_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
+                                  dma_addr_t phy_addr, u32 byte_cnt)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       dma_addr_t phy_addr = section->p_addr;
-       u32 byte_cnt = section->len;
-       u32 dst_addr = section->offset;
        int ret;
 
        trans_pcie->ucode_write_complete = false;
@@ -942,8 +937,8 @@ static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
                           dst_addr);
 
        iwl_write_direct32(trans,
-               FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
-               phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
+                          FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
+                          phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
 
        iwl_write_direct32(trans,
                           FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
@@ -962,33 +957,64 @@ static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
                           FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
                           FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
 
-       IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
-                    section_num);
        ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
                                 trans_pcie->ucode_write_complete, 5 * HZ);
        if (!ret) {
-               IWL_ERR(trans, "Could not load the [%d] uCode section\n",
-                       section_num);
+               IWL_ERR(trans, "Failed to load firmware chunk!\n");
                return -ETIMEDOUT;
        }
 
        return 0;
 }
 
-static int iwl_load_given_ucode(struct iwl_trans *trans,
-                               const struct fw_img *image)
+static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
+                           const struct fw_desc *section)
 {
+       u8 *v_addr;
+       dma_addr_t p_addr;
+       u32 offset;
        int ret = 0;
-               int i;
 
-               for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
-                       if (!image->sec[i].p_addr)
-                               break;
+       IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
+                    section_num);
+
+       v_addr = dma_alloc_coherent(trans->dev, PAGE_SIZE, &p_addr, GFP_KERNEL);
+       if (!v_addr)
+               return -ENOMEM;
+
+       for (offset = 0; offset < section->len; offset += PAGE_SIZE) {
+               u32 copy_size;
+
+               copy_size = min_t(u32, PAGE_SIZE, section->len - offset);
 
-                       ret = iwl_load_section(trans, i, &image->sec[i]);
-                       if (ret)
-                               return ret;
+               memcpy(v_addr, (u8 *)section->data + offset, copy_size);
+               ret = iwl_load_firmware_chunk(trans, section->offset + offset,
+                                             p_addr, copy_size);
+               if (ret) {
+                       IWL_ERR(trans,
+                               "Could not load the [%d] uCode section\n",
+                               section_num);
+                       break;
                }
+       }
+
+       dma_free_coherent(trans->dev, PAGE_SIZE, v_addr, p_addr);
+       return ret;
+}
+
+static int iwl_load_given_ucode(struct iwl_trans *trans,
+                               const struct fw_img *image)
+{
+       int i, ret = 0;
+
+       for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
+               if (!image->sec[i].data)
+                       break;
+
+               ret = iwl_load_section(trans, i, &image->sec[i]);
+               if (ret)
+                       return ret;
+       }
 
        /* Remove all resets to allow NIC to operate */
        iwl_write32(trans, CSR_RESET, 0);
@@ -1181,9 +1207,8 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
         */
        if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
                iwl_trans_tx_stop(trans);
-#ifndef CONFIG_IWLWIFI_IDI
                iwl_trans_rx_stop(trans);
-#endif
+
                /* Power-down device's busmaster DMA clocks */
                iwl_write_prph(trans, APMG_CLK_DIS_REG,
                               APMG_CLK_VAL_DMA_CLK_RQT);
@@ -1454,14 +1479,16 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
        bool hw_rfkill;
        unsigned long flags;
 
+       spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+       iwl_disable_interrupts(trans);
+       spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+
        iwl_apm_stop(trans);
 
        spin_lock_irqsave(&trans_pcie->irq_lock, flags);
        iwl_disable_interrupts(trans);
        spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
 
-       iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
-
        if (!op_mode_leaving) {
                /*
                 * Even if we stop the HW, we still want the RF kill
@@ -1549,9 +1576,8 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
        iwl_trans_pcie_tx_free(trans);
-#ifndef CONFIG_IWLWIFI_IDI
        iwl_trans_pcie_rx_free(trans);
-#endif
+
        if (trans_pcie->irq_requested == true) {
                free_irq(trans_pcie->irq, trans);
                iwl_free_isr_ict(trans);
@@ -1769,7 +1795,7 @@ void iwl_dump_csr(struct iwl_trans *trans)
 #define DEBUGFS_ADD_FILE(name, parent, mode) do {                      \
        if (!debugfs_create_file(#name, mode, parent, trans,            \
                                 &iwl_dbgfs_##name##_ops))              \
-               return -ENOMEM;                                         \
+               goto err;                                               \
 } while (0)
 
 /* file operation */
@@ -2033,6 +2059,10 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
        DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
        DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR);
        return 0;
+
+err:
+       IWL_ERR(trans, "failed to create the trans debugfs entry\n");
+       return -ENOMEM;
 }
 #else
 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
index 6baf8deef5190abef3fa85a7854b8396530cd97f..105e3af3c621b0b9e335fbe42d1ea1e444bfa579 100644 (file)
@@ -521,12 +521,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
        u16 copy_size, cmd_size;
        bool had_nocopy = false;
        int i;
-       u8 *cmd_dest;
-#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
-       const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
-       int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
-       int trace_idx;
-#endif
+       u32 cmd_pos;
 
        copy_size = sizeof(out_cmd->hdr);
        cmd_size = sizeof(out_cmd->hdr);
@@ -584,15 +579,31 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
                                         INDEX_TO_SEQ(q->write_ptr));
 
        /* and copy the data that needs to be copied */
-
-       cmd_dest = out_cmd->payload;
+       cmd_pos = offsetof(struct iwl_device_cmd, payload);
        for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
                if (!cmd->len[i])
                        continue;
                if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
                        break;
-               memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
-               cmd_dest += cmd->len[i];
+               memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]);
+               cmd_pos += cmd->len[i];
+       }
+
+       WARN_ON_ONCE(txq->entries[idx].copy_cmd);
+
+       /*
+        * since out_cmd will be the source address of the FH, it will write
+        * the retry count there. So when the user needs to receivce the HCMD
+        * that corresponds to the response in the response handler, it needs
+        * to set CMD_WANT_HCMD.
+        */
+       if (cmd->flags & CMD_WANT_HCMD) {
+               txq->entries[idx].copy_cmd =
+                       kmemdup(out_cmd, cmd_pos, GFP_ATOMIC);
+               if (unlikely(!txq->entries[idx].copy_cmd)) {
+                       idx = -ENOMEM;
+                       goto out;
+               }
        }
 
        IWL_DEBUG_HC(trans,
@@ -612,11 +623,6 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
        dma_unmap_len_set(out_meta, len, copy_size);
 
        iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1);
-#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
-       trace_bufs[0] = &out_cmd->hdr;
-       trace_lens[0] = copy_size;
-       trace_idx = 1;
-#endif
 
        for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
                if (!cmd->len[i])
@@ -635,25 +641,14 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
 
                iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
                                             cmd->len[i], 0);
-#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
-               trace_bufs[trace_idx] = cmd->data[i];
-               trace_lens[trace_idx] = cmd->len[i];
-               trace_idx++;
-#endif
        }
 
        out_meta->flags = cmd->flags;
 
        txq->need_update = 1;
 
-       /* check that tracing gets all possible blocks */
-       BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
-#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
-       trace_iwlwifi_dev_hcmd(trans->dev, cmd->flags,
-                              trace_bufs[0], trace_lens[0],
-                              trace_bufs[1], trace_lens[1],
-                              trace_bufs[2], trace_lens[2]);
-#endif
+       trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size,
+                              &out_cmd->hdr, copy_size);
 
        /* start timer if queue currently empty */
        if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
index 26e68326710b04bce2a9e4e08f42ddf1ba645ce2..aaa297315c47102df73b6fd3948e4b8fd4cbba25 100644 (file)
@@ -1159,6 +1159,22 @@ void lbs_set_mac_control(struct lbs_private *priv)
        lbs_deb_leave(LBS_DEB_CMD);
 }
 
+int lbs_set_mac_control_sync(struct lbs_private *priv)
+{
+       struct cmd_ds_mac_control cmd;
+       int ret = 0;
+
+       lbs_deb_enter(LBS_DEB_CMD);
+
+       cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+       cmd.action = cpu_to_le16(priv->mac_control);
+       cmd.reserved = 0;
+       ret = lbs_cmd_with_response(priv, CMD_MAC_CONTROL, &cmd);
+
+       lbs_deb_leave(LBS_DEB_CMD);
+       return ret;
+}
+
 /**
  *  lbs_allocate_cmd_buffer - allocates the command buffer and links
  *  it to command free queue
index ab07608e13d07031bda8f3a7d6f712b1a01b73ac..4279e8ab95f2aa4545cef71daa5649b1314fdf3b 100644 (file)
@@ -96,6 +96,7 @@ void lbs_ps_confirm_sleep(struct lbs_private *priv);
 int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on);
 
 void lbs_set_mac_control(struct lbs_private *priv);
+int lbs_set_mac_control_sync(struct lbs_private *priv);
 
 int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel,
                     s16 *maxlevel);
index fe1ea43c5149ef03e9ed0e2809695773fa2a576d..0c02f0483d1fd65e8b2a61b55ab57cbcc95ad5f2 100644 (file)
@@ -682,8 +682,10 @@ static int lbs_setup_firmware(struct lbs_private *priv)
 
        /* Send cmd to FW to enable 11D function */
        ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_11D_ENABLE, 1);
+       if (ret)
+               goto done;
 
-       lbs_set_mac_control(priv);
+       ret = lbs_set_mac_control_sync(priv);
 done:
        lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
        return ret;
index a03457292c88c1fd5f96eb4ad5a60df671227d91..7001856241e60354e9ff5cbed505ba6fc8bac415 100644 (file)
@@ -227,7 +227,9 @@ static void lbtf_free_adapter(struct lbtf_private *priv)
        lbtf_deb_leave(LBTF_DEB_MAIN);
 }
 
-static void lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void lbtf_op_tx(struct ieee80211_hw *hw,
+                      struct ieee80211_tx_control *control,
+                      struct sk_buff *skb)
 {
        struct lbtf_private *priv = hw->priv;
 
index 00838395778cb4c98854c93de0e39e2d392bbadc..429ca3215fdbf3e0d48f82e3c1eb69381938fd46 100644 (file)
@@ -38,7 +38,7 @@ MODULE_AUTHOR("Jouni Malinen");
 MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211");
 MODULE_LICENSE("GPL");
 
-static u32 wmediumd_pid;
+static u32 wmediumd_portid;
 
 static int radios = 2;
 module_param(radios, int, 0444);
@@ -545,7 +545,7 @@ static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data,
 
 static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
                                       struct sk_buff *my_skb,
-                                      int dst_pid)
+                                      int dst_portid)
 {
        struct sk_buff *skb;
        struct mac80211_hwsim_data *data = hw->priv;
@@ -619,7 +619,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
                goto nla_put_failure;
 
        genlmsg_end(skb, msg_head);
-       genlmsg_unicast(&init_net, skb, dst_pid);
+       genlmsg_unicast(&init_net, skb, dst_portid);
 
        /* Enqueue the packet */
        skb_queue_tail(&data->pending, my_skb);
@@ -709,11 +709,13 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
        return ack;
 }
 
-static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
+                             struct ieee80211_tx_control *control,
+                             struct sk_buff *skb)
 {
        bool ack;
        struct ieee80211_tx_info *txi;
-       u32 _pid;
+       u32 _portid;
 
        mac80211_hwsim_monitor_rx(hw, skb);
 
@@ -724,10 +726,10 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        }
 
        /* wmediumd mode check */
-       _pid = ACCESS_ONCE(wmediumd_pid);
+       _portid = ACCESS_ONCE(wmediumd_portid);
 
-       if (_pid)
-               return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
+       if (_portid)
+               return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
 
        /* NO wmediumd detected, perfect medium simulation */
        ack = mac80211_hwsim_tx_frame_no_nl(hw, skb);
@@ -812,7 +814,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
        struct ieee80211_hw *hw = arg;
        struct sk_buff *skb;
        struct ieee80211_tx_info *info;
-       u32 _pid;
+       u32 _portid;
 
        hwsim_check_magic(vif);
 
@@ -829,10 +831,10 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
        mac80211_hwsim_monitor_rx(hw, skb);
 
        /* wmediumd mode check */
-       _pid = ACCESS_ONCE(wmediumd_pid);
+       _portid = ACCESS_ONCE(wmediumd_portid);
 
-       if (_pid)
-               return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
+       if (_portid)
+               return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
 
        mac80211_hwsim_tx_frame_no_nl(hw, skb);
        dev_kfree_skb(skb);
@@ -1313,7 +1315,7 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
        struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
        struct sk_buff *skb;
        struct ieee80211_pspoll *pspoll;
-       u32 _pid;
+       u32 _portid;
 
        if (!vp->assoc)
                return;
@@ -1334,10 +1336,10 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
        memcpy(pspoll->ta, mac, ETH_ALEN);
 
        /* wmediumd mode check */
-       _pid = ACCESS_ONCE(wmediumd_pid);
+       _portid = ACCESS_ONCE(wmediumd_portid);
 
-       if (_pid)
-               return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid);
+       if (_portid)
+               return mac80211_hwsim_tx_frame_nl(data->hw, skb, _portid);
 
        if (!mac80211_hwsim_tx_frame_no_nl(data->hw, skb))
                printk(KERN_DEBUG "%s: PS-poll frame not ack'ed\n", __func__);
@@ -1351,7 +1353,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
        struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
        struct sk_buff *skb;
        struct ieee80211_hdr *hdr;
-       u32 _pid;
+       u32 _portid;
 
        if (!vp->assoc)
                return;
@@ -1373,10 +1375,10 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
        memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
 
        /* wmediumd mode check */
-       _pid = ACCESS_ONCE(wmediumd_pid);
+       _portid = ACCESS_ONCE(wmediumd_portid);
 
-       if (_pid)
-               return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid);
+       if (_portid)
+               return mac80211_hwsim_tx_frame_nl(data->hw, skb, _portid);
 
        if (!mac80211_hwsim_tx_frame_no_nl(data->hw, skb))
                printk(KERN_DEBUG "%s: nullfunc frame not ack'ed\n", __func__);
@@ -1630,10 +1632,10 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2,
        if (info == NULL)
                goto out;
 
-       wmediumd_pid = info->snd_pid;
+       wmediumd_portid = info->snd_portid;
 
        printk(KERN_DEBUG "mac80211_hwsim: received a REGISTER, "
-              "switching to wmediumd mode with pid %d\n", info->snd_pid);
+              "switching to wmediumd mode with pid %d\n", info->snd_portid);
 
        return 0;
 out:
@@ -1670,10 +1672,10 @@ static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
        if (state != NETLINK_URELEASE)
                return NOTIFY_DONE;
 
-       if (notify->pid == wmediumd_pid) {
+       if (notify->portid == wmediumd_portid) {
                printk(KERN_INFO "mac80211_hwsim: wmediumd released netlink"
                       " socket, switching to perfect channel medium\n");
-               wmediumd_pid = 0;
+               wmediumd_portid = 0;
        }
        return NOTIFY_DONE;
 
@@ -1727,6 +1729,7 @@ static const struct ieee80211_iface_limit hwsim_if_limits[] = {
 #endif
                                 BIT(NL80211_IFTYPE_AP) |
                                 BIT(NL80211_IFTYPE_P2P_GO) },
+       { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
 };
 
 static const struct ieee80211_iface_combination hwsim_if_comb = {
@@ -1813,7 +1816,8 @@ static int __init init_mac80211_hwsim(void)
                        BIT(NL80211_IFTYPE_P2P_CLIENT) |
                        BIT(NL80211_IFTYPE_P2P_GO) |
                        BIT(NL80211_IFTYPE_ADHOC) |
-                       BIT(NL80211_IFTYPE_MESH_POINT);
+                       BIT(NL80211_IFTYPE_MESH_POINT) |
+                       BIT(NL80211_IFTYPE_P2P_DEVICE);
 
                hw->flags = IEEE80211_HW_MFP_CAPABLE |
                            IEEE80211_HW_SIGNAL_DBM |
@@ -2052,7 +2056,7 @@ failed:
        mac80211_hwsim_free();
        return err;
 }
-
+module_init(init_mac80211_hwsim);
 
 static void __exit exit_mac80211_hwsim(void)
 {
@@ -2063,7 +2067,4 @@ static void __exit exit_mac80211_hwsim(void)
        mac80211_hwsim_free();
        unregister_netdev(hwsim_mon);
 }
-
-
-module_init(init_mac80211_hwsim);
 module_exit(exit_mac80211_hwsim);
index e535c937628b4575d87ebb8435c7362367634c73..245a371f1a43a4746bda49912a4a2d656d5cf75e 100644 (file)
@@ -175,23 +175,6 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
        return 0;
 }
 
-/*
- * This function handles the command response of 11n configuration request.
- *
- * Handling includes changing the header fields into CPU format.
- */
-int mwifiex_ret_11n_cfg(struct host_cmd_ds_command *resp,
-                       struct mwifiex_ds_11n_tx_cfg *tx_cfg)
-{
-       struct host_cmd_ds_11n_cfg *htcfg = &resp->params.htcfg;
-
-       if (tx_cfg) {
-               tx_cfg->tx_htcap = le16_to_cpu(htcfg->ht_tx_cap);
-               tx_cfg->tx_htinfo = le16_to_cpu(htcfg->ht_tx_info);
-       }
-       return 0;
-}
-
 /*
  * This function prepares command of reconfigure Tx buffer.
  *
@@ -257,27 +240,6 @@ int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd,
        return 0;
 }
 
-/*
- * This function handles the command response of AMSDU aggregation
- * control request.
- *
- * Handling includes changing the header fields into CPU format.
- */
-int mwifiex_ret_amsdu_aggr_ctrl(struct host_cmd_ds_command *resp,
-                               struct mwifiex_ds_11n_amsdu_aggr_ctrl
-                               *amsdu_aggr_ctrl)
-{
-       struct host_cmd_ds_amsdu_aggr_ctrl *amsdu_ctrl =
-               &resp->params.amsdu_aggr_ctrl;
-
-       if (amsdu_aggr_ctrl) {
-               amsdu_aggr_ctrl->enable = le16_to_cpu(amsdu_ctrl->enable);
-               amsdu_aggr_ctrl->curr_buf_size =
-                       le16_to_cpu(amsdu_ctrl->curr_buf_size);
-       }
-       return 0;
-}
-
 /*
  * This function prepares 11n configuration command.
  *
@@ -726,3 +688,29 @@ int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
 
        return count;
 }
+
+/*
+ * This function retrieves the entry for specific tx BA stream table by RA and
+ * deletes it.
+ */
+void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra)
+{
+       struct mwifiex_tx_ba_stream_tbl *tbl, *tmp;
+       unsigned long flags;
+
+       if (!ra)
+               return;
+
+       spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
+       list_for_each_entry_safe(tbl, tmp, &priv->tx_ba_stream_tbl_ptr, list) {
+               if (!memcmp(tbl->ra, ra, ETH_ALEN)) {
+                       spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock,
+                                              flags);
+                       mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, tbl);
+                       spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
+               }
+       }
+       spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
+
+       return;
+}
index 28366e9211fbb8773f1eae98441401aea6473ca1..46006a54a6566ee1c744220926bc198159aac0dd 100644 (file)
@@ -28,8 +28,6 @@ int mwifiex_ret_11n_delba(struct mwifiex_private *priv,
                          struct host_cmd_ds_command *resp);
 int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
                              struct host_cmd_ds_command *resp);
-int mwifiex_ret_11n_cfg(struct host_cmd_ds_command *resp,
-                       struct mwifiex_ds_11n_tx_cfg *tx_cfg);
 int mwifiex_cmd_11n_cfg(struct host_cmd_ds_command *cmd, u16 cmd_action,
                        struct mwifiex_ds_11n_tx_cfg *txcfg);
 
@@ -60,15 +58,13 @@ int mwifiex_get_rx_reorder_tbl(struct mwifiex_private *priv,
                              struct mwifiex_ds_rx_reorder_tbl *buf);
 int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
                               struct mwifiex_ds_tx_ba_stream_tbl *buf);
-int mwifiex_ret_amsdu_aggr_ctrl(struct host_cmd_ds_command *resp,
-                               struct mwifiex_ds_11n_amsdu_aggr_ctrl
-                               *amsdu_aggr_ctrl);
 int mwifiex_cmd_recfg_tx_buf(struct mwifiex_private *priv,
                             struct host_cmd_ds_command *cmd,
                             int cmd_action, u16 *buf_size);
 int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd,
                                int cmd_action,
                                struct mwifiex_ds_11n_amsdu_aggr_ctrl *aa_ctrl);
+void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra);
 
 /*
  * This function checks whether AMPDU is allowed or not for a particular TID.
@@ -157,4 +153,18 @@ mwifiex_is_ba_stream_setup(struct mwifiex_private *priv,
 
        return false;
 }
+
+/*
+ * This function checks whether associated station is 11n enabled
+ */
+static inline int mwifiex_is_sta_11n_enabled(struct mwifiex_private *priv,
+                                            struct mwifiex_sta_node *node)
+{
+
+       if (!node || (priv->bss_role != MWIFIEX_BSS_ROLE_UAP) ||
+           !priv->ap_11n_enabled)
+               return 0;
+
+       return node->is_11n_enabled;
+}
 #endif /* !_MWIFIEX_11N_H_ */
index ab84eb94374905c166d6bdcd6c745db1992574fe..395f1bfd41027f788901b62b5ef4621ac019956d 100644 (file)
@@ -62,9 +62,7 @@ mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr,
        };
        struct tx_packet_hdr *tx_header;
 
-       skb_put(skb_aggr, sizeof(*tx_header));
-
-       tx_header = (struct tx_packet_hdr *) skb_aggr->data;
+       tx_header = (void *)skb_put(skb_aggr, sizeof(*tx_header));
 
        /* Copy DA and SA */
        dt_offset = 2 * ETH_ALEN;
@@ -82,12 +80,10 @@ mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr,
        tx_header->eth803_hdr.h_proto = htons(skb_src->len + LLC_SNAP_LEN);
 
        /* Add payload */
-       skb_put(skb_aggr, skb_src->len);
-       memcpy(skb_aggr->data + sizeof(*tx_header), skb_src->data,
-              skb_src->len);
-       *pad = (((skb_src->len + LLC_SNAP_LEN) & 3)) ? (4 - (((skb_src->len +
-                                                     LLC_SNAP_LEN)) & 3)) : 0;
-       skb_put(skb_aggr, *pad);
+       memcpy(skb_put(skb_aggr, skb_src->len), skb_src->data, skb_src->len);
+
+       /* Add padding for new MSDU to start from 4 byte boundary */
+       *pad = (4 - ((unsigned long)skb_aggr->tail & 0x3)) % 4;
 
        return skb_aggr->len + *pad;
 }
index 591ccd33f83c5482340c6667cf76660e24ac00d7..9402b93b9a363b350545fb24aad6f67f06afc172 100644 (file)
@@ -54,8 +54,13 @@ mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv,
                        tbl->rx_reorder_ptr[i] = NULL;
                }
                spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
-               if (rx_tmp_ptr)
-                       mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr);
+               if (rx_tmp_ptr) {
+                       if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+                               mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
+                       else
+                               mwifiex_process_rx_packet(priv->adapter,
+                                                         rx_tmp_ptr);
+               }
        }
 
        spin_lock_irqsave(&priv->rx_pkt_lock, flags);
@@ -97,7 +102,11 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
                rx_tmp_ptr = tbl->rx_reorder_ptr[i];
                tbl->rx_reorder_ptr[i] = NULL;
                spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
-               mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr);
+
+               if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+                       mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
+               else
+                       mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr);
        }
 
        spin_lock_irqsave(&priv->rx_pkt_lock, flags);
@@ -148,7 +157,7 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
  * This function returns the pointer to an entry in Rx reordering
  * table which matches the given TA/TID pair.
  */
-static struct mwifiex_rx_reorder_tbl *
+struct mwifiex_rx_reorder_tbl *
 mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
 {
        struct mwifiex_rx_reorder_tbl *tbl;
@@ -167,6 +176,31 @@ mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
        return NULL;
 }
 
+/* This function retrieves the pointer to an entry in Rx reordering
+ * table which matches the given TA and deletes it.
+ */
+void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
+{
+       struct mwifiex_rx_reorder_tbl *tbl, *tmp;
+       unsigned long flags;
+
+       if (!ta)
+               return;
+
+       spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+       list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
+               if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
+                       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+                                              flags);
+                       mwifiex_del_rx_reorder_entry(priv, tbl);
+                       spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+               }
+       }
+       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+
+       return;
+}
+
 /*
  * This function finds the last sequence number used in the packets
  * buffered in Rx reordering table.
@@ -226,6 +260,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
        struct mwifiex_rx_reorder_tbl *tbl, *new_node;
        u16 last_seq = 0;
        unsigned long flags;
+       struct mwifiex_sta_node *node;
 
        /*
         * If we get a TID, ta pair which is already present dispatch all the
@@ -248,19 +283,26 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
        new_node->tid = tid;
        memcpy(new_node->ta, ta, ETH_ALEN);
        new_node->start_win = seq_num;
-       if (mwifiex_queuing_ra_based(priv))
-               /* TODO for adhoc */
+
+       if (mwifiex_queuing_ra_based(priv)) {
                dev_dbg(priv->adapter->dev,
-                       "info: ADHOC:last_seq=%d start_win=%d\n",
+                       "info: AP/ADHOC:last_seq=%d start_win=%d\n",
                        last_seq, new_node->start_win);
-       else
+               if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) {
+                       node = mwifiex_get_sta_entry(priv, ta);
+                       if (node)
+                               last_seq = node->rx_seq[tid];
+               }
+       } else {
                last_seq = priv->rx_seq[tid];
+       }
 
        if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
            last_seq >= new_node->start_win)
                new_node->start_win = last_seq + 1;
 
        new_node->win_size = win_size;
+       new_node->flags = 0;
 
        new_node->rx_reorder_ptr = kzalloc(sizeof(void *) * win_size,
                                        GFP_KERNEL);
@@ -396,8 +438,13 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
 
        tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
        if (!tbl) {
-               if (pkt_type != PKT_TYPE_BAR)
-                       mwifiex_process_rx_packet(priv->adapter, payload);
+               if (pkt_type != PKT_TYPE_BAR) {
+                       if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+                               mwifiex_handle_uap_rx_forward(priv, payload);
+                       else
+                               mwifiex_process_rx_packet(priv->adapter,
+                                                         payload);
+               }
                return 0;
        }
        start_win = tbl->start_win;
@@ -411,13 +458,20 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
         * If seq_num is less then starting win then ignore and drop the
         * packet
         */
-       if ((start_win + TWOPOW11) > (MAX_TID_VALUE - 1)) {/* Wrap */
-               if (seq_num >= ((start_win + TWOPOW11) &
-                               (MAX_TID_VALUE - 1)) && (seq_num < start_win))
+       if (tbl->flags & RXREOR_FORCE_NO_DROP) {
+               dev_dbg(priv->adapter->dev,
+                       "RXREOR_FORCE_NO_DROP when HS is activated\n");
+               tbl->flags &= ~RXREOR_FORCE_NO_DROP;
+       } else {
+               if ((start_win + TWOPOW11) > (MAX_TID_VALUE - 1)) {
+                       if (seq_num >= ((start_win + TWOPOW11) &
+                                       (MAX_TID_VALUE - 1)) &&
+                           seq_num < start_win)
+                               return -1;
+               } else if ((seq_num < start_win) ||
+                          (seq_num > (start_win + TWOPOW11))) {
                        return -1;
-       } else if ((seq_num < start_win) ||
-                  (seq_num > (start_win + TWOPOW11))) {
-               return -1;
+               }
        }
 
        /*
@@ -428,8 +482,7 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
                seq_num = ((seq_num + win_size) - 1) & (MAX_TID_VALUE - 1);
 
        if (((end_win < start_win) &&
-            (seq_num < (TWOPOW11 - (MAX_TID_VALUE - start_win))) &&
-            (seq_num > end_win)) ||
+            (seq_num < start_win) && (seq_num > end_win)) ||
            ((end_win > start_win) && ((seq_num > end_win) ||
                                       (seq_num < start_win)))) {
                end_win = seq_num;
@@ -591,3 +644,29 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
        INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
        mwifiex_reset_11n_rx_seq_num(priv);
 }
+
+/*
+ * This function updates all rx_reorder_tbl's flags.
+ */
+void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags)
+{
+       struct mwifiex_private *priv;
+       struct mwifiex_rx_reorder_tbl *tbl;
+       unsigned long lock_flags;
+       int i;
+
+       for (i = 0; i < adapter->priv_num; i++) {
+               priv = adapter->priv[i];
+               if (!priv)
+                       continue;
+               if (list_empty(&priv->rx_reorder_tbl_ptr))
+                       continue;
+
+               spin_lock_irqsave(&priv->rx_reorder_tbl_lock, lock_flags);
+               list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
+                       tbl->flags = flags;
+               spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, lock_flags);
+       }
+
+       return;
+}
index 6c9815a0f5d8b0d7aebcb5d6a9953a24819ad45a..4064041ac852737602b8970eff8c40f4070c9117 100644 (file)
 #define ADDBA_RSP_STATUS_ACCEPT 0
 
 #define MWIFIEX_DEF_11N_RX_SEQ_NUM     0xffff
+#define BA_SETUP_MAX_PACKET_THRESHOLD  16
+#define BA_SETUP_PACKET_OFFSET         16
+
+enum mwifiex_rxreor_flags {
+       RXREOR_FORCE_NO_DROP    = 1<<0,
+};
 
 static inline void mwifiex_reset_11n_rx_seq_num(struct mwifiex_private *priv)
 {
@@ -68,5 +74,9 @@ struct mwifiex_rx_reorder_tbl *mwifiex_11n_get_rxreorder_tbl(struct
                                                           mwifiex_private
                                                           *priv, int tid,
                                                           u8 *ta);
+struct mwifiex_rx_reorder_tbl *
+mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta);
+void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta);
+void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags);
 
 #endif /* _MWIFIEX_11N_RXREORDER_H_ */
index 3f66ebb0a630813d3bc836c4412233a3f8883f7b..dd0410d2d465d8e279ec1104ea5ee51831fed97c 100644 (file)
@@ -33,8 +33,10 @@ mwifiex-y += uap_cmd.o
 mwifiex-y += ie.o
 mwifiex-y += sta_cmdresp.o
 mwifiex-y += sta_event.o
+mwifiex-y += uap_event.o
 mwifiex-y += sta_tx.o
 mwifiex-y += sta_rx.o
+mwifiex-y += uap_txrx.o
 mwifiex-y += cfg80211.o
 mwifiex-$(CONFIG_DEBUG_FS) += debugfs.o
 obj-$(CONFIG_MWIFIEX) += mwifiex.o
index fe42137384da0bbae54ee0e656bf51a50cb58e68..2691620393eae14f771f89641124ce9ff5b354c4 100644 (file)
@@ -22,7 +22,7 @@
 
 static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = {
        {
-               .max = 1, .types = BIT(NL80211_IFTYPE_STATION),
+               .max = 2, .types = BIT(NL80211_IFTYPE_STATION),
        },
        {
                .max = 1, .types = BIT(NL80211_IFTYPE_AP),
@@ -37,6 +37,36 @@ static const struct ieee80211_iface_combination mwifiex_iface_comb_ap_sta = {
        .beacon_int_infra_match = true,
 };
 
+static const struct ieee80211_regdomain mwifiex_world_regdom_custom = {
+       .n_reg_rules = 7,
+       .alpha2 =  "99",
+       .reg_rules = {
+               /* Channel 1 - 11 */
+               REG_RULE(2412-10, 2462+10, 40, 3, 20, 0),
+               /* Channel 12 - 13 */
+               REG_RULE(2467-10, 2472+10, 20, 3, 20,
+                        NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS),
+               /* Channel 14 */
+               REG_RULE(2484-10, 2484+10, 20, 3, 20,
+                        NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS |
+                        NL80211_RRF_NO_OFDM),
+               /* Channel 36 - 48 */
+               REG_RULE(5180-10, 5240+10, 40, 3, 20,
+                        NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS),
+               /* Channel 149 - 165 */
+               REG_RULE(5745-10, 5825+10, 40, 3, 20,
+                        NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS),
+               /* Channel 52 - 64 */
+               REG_RULE(5260-10, 5320+10, 40, 3, 30,
+                        NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS |
+                        NL80211_RRF_DFS),
+               /* Channel 100 - 140 */
+               REG_RULE(5500-10, 5700+10, 40, 3, 30,
+                        NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS |
+                        NL80211_RRF_DFS),
+       }
+};
+
 /*
  * This function maps the nl802.11 channel type into driver channel type.
  *
@@ -47,8 +77,7 @@ static const struct ieee80211_iface_combination mwifiex_iface_comb_ap_sta = {
  *      NL80211_CHAN_HT40MINUS -> IEEE80211_HT_PARAM_CHA_SEC_BELOW
  *      Others                 -> IEEE80211_HT_PARAM_CHA_SEC_NONE
  */
-static u8
-mwifiex_chan_type_to_sec_chan_offset(enum nl80211_channel_type chan_type)
+u8 mwifiex_chan_type_to_sec_chan_offset(enum nl80211_channel_type chan_type)
 {
        switch (chan_type) {
        case NL80211_CHAN_NO_HT:
@@ -99,7 +128,7 @@ mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
        const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
        const u8 *peer_mac = pairwise ? mac_addr : bc_mac;
 
-       if (mwifiex_set_encode(priv, NULL, 0, key_index, peer_mac, 1)) {
+       if (mwifiex_set_encode(priv, NULL, NULL, 0, key_index, peer_mac, 1)) {
                wiphy_err(wiphy, "deleting the crypto keys\n");
                return -EFAULT;
        }
@@ -108,6 +137,188 @@ mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
        return 0;
 }
 
+/*
+ * This function forms an skb for management frame.
+ */
+static int
+mwifiex_form_mgmt_frame(struct sk_buff *skb, const u8 *buf, size_t len)
+{
+       u8 addr[ETH_ALEN] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+       u16 pkt_len;
+       u32 tx_control = 0, pkt_type = PKT_TYPE_MGMT;
+       struct timeval tv;
+
+       pkt_len = len + ETH_ALEN;
+
+       skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN +
+                   MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(pkt_len));
+       memcpy(skb_push(skb, sizeof(pkt_len)), &pkt_len, sizeof(pkt_len));
+
+       memcpy(skb_push(skb, sizeof(tx_control)),
+              &tx_control, sizeof(tx_control));
+
+       memcpy(skb_push(skb, sizeof(pkt_type)), &pkt_type, sizeof(pkt_type));
+
+       /* Add packet data and address4 */
+       memcpy(skb_put(skb, sizeof(struct ieee80211_hdr_3addr)), buf,
+              sizeof(struct ieee80211_hdr_3addr));
+       memcpy(skb_put(skb, ETH_ALEN), addr, ETH_ALEN);
+       memcpy(skb_put(skb, len - sizeof(struct ieee80211_hdr_3addr)),
+              buf + sizeof(struct ieee80211_hdr_3addr),
+              len - sizeof(struct ieee80211_hdr_3addr));
+
+       skb->priority = LOW_PRIO_TID;
+       do_gettimeofday(&tv);
+       skb->tstamp = timeval_to_ktime(tv);
+
+       return 0;
+}
+
+/*
+ * CFG802.11 operation handler to transmit a management frame.
+ */
+static int
+mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+                        struct ieee80211_channel *chan, bool offchan,
+                        enum nl80211_channel_type channel_type,
+                        bool channel_type_valid, unsigned int wait,
+                        const u8 *buf, size_t len, bool no_cck,
+                        bool dont_wait_for_ack, u64 *cookie)
+{
+       struct sk_buff *skb;
+       u16 pkt_len;
+       const struct ieee80211_mgmt *mgmt;
+       struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
+
+       if (!buf || !len) {
+               wiphy_err(wiphy, "invalid buffer and length\n");
+               return -EFAULT;
+       }
+
+       mgmt = (const struct ieee80211_mgmt *)buf;
+       if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA &&
+           ieee80211_is_probe_resp(mgmt->frame_control)) {
+               /* Since we support offload probe resp, we need to skip probe
+                * resp in AP or GO mode */
+               wiphy_dbg(wiphy,
+                         "info: skip to send probe resp in AP or GO mode\n");
+               return 0;
+       }
+
+       pkt_len = len + ETH_ALEN;
+       skb = dev_alloc_skb(MWIFIEX_MIN_DATA_HEADER_LEN +
+                           MWIFIEX_MGMT_FRAME_HEADER_SIZE +
+                           pkt_len + sizeof(pkt_len));
+
+       if (!skb) {
+               wiphy_err(wiphy, "allocate skb failed for management frame\n");
+               return -ENOMEM;
+       }
+
+       mwifiex_form_mgmt_frame(skb, buf, len);
+       mwifiex_queue_tx_pkt(priv, skb);
+
+       *cookie = random32() | 1;
+       cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true, GFP_ATOMIC);
+
+       wiphy_dbg(wiphy, "info: management frame transmitted\n");
+       return 0;
+}
+
+/*
+ * CFG802.11 operation handler to register a mgmt frame.
+ */
+static void
+mwifiex_cfg80211_mgmt_frame_register(struct wiphy *wiphy,
+                                    struct wireless_dev *wdev,
+                                    u16 frame_type, bool reg)
+{
+       struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
+
+       if (reg)
+               priv->mgmt_frame_mask |= BIT(frame_type >> 4);
+       else
+               priv->mgmt_frame_mask &= ~BIT(frame_type >> 4);
+
+       mwifiex_send_cmd_async(priv, HostCmd_CMD_MGMT_FRAME_REG,
+                              HostCmd_ACT_GEN_SET, 0, &priv->mgmt_frame_mask);
+
+       wiphy_dbg(wiphy, "info: mgmt frame registered\n");
+}
+
+/*
+ * CFG802.11 operation handler to remain on channel.
+ */
+static int
+mwifiex_cfg80211_remain_on_channel(struct wiphy *wiphy,
+                                  struct wireless_dev *wdev,
+                                  struct ieee80211_channel *chan,
+                                  enum nl80211_channel_type channel_type,
+                                  unsigned int duration, u64 *cookie)
+{
+       struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
+       int ret;
+
+       if (!chan || !cookie) {
+               wiphy_err(wiphy, "Invalid parameter for ROC\n");
+               return -EINVAL;
+       }
+
+       if (priv->roc_cfg.cookie) {
+               wiphy_dbg(wiphy, "info: ongoing ROC, cookie = 0x%llu\n",
+                         priv->roc_cfg.cookie);
+               return -EBUSY;
+       }
+
+       ret = mwifiex_remain_on_chan_cfg(priv, HostCmd_ACT_GEN_SET, chan,
+                                        &channel_type, duration);
+
+       if (!ret) {
+               *cookie = random32() | 1;
+               priv->roc_cfg.cookie = *cookie;
+               priv->roc_cfg.chan = *chan;
+               priv->roc_cfg.chan_type = channel_type;
+
+               cfg80211_ready_on_channel(wdev, *cookie, chan, channel_type,
+                                         duration, GFP_ATOMIC);
+
+               wiphy_dbg(wiphy, "info: ROC, cookie = 0x%llx\n", *cookie);
+       }
+
+       return ret;
+}
+
+/*
+ * CFG802.11 operation handler to cancel remain on channel.
+ */
+static int
+mwifiex_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy,
+                                         struct wireless_dev *wdev, u64 cookie)
+{
+       struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
+       int ret;
+
+       if (cookie != priv->roc_cfg.cookie)
+               return -ENOENT;
+
+       ret = mwifiex_remain_on_chan_cfg(priv, HostCmd_ACT_GEN_REMOVE,
+                                        &priv->roc_cfg.chan,
+                                        &priv->roc_cfg.chan_type, 0);
+
+       if (!ret) {
+               cfg80211_remain_on_channel_expired(wdev, cookie,
+                                                  &priv->roc_cfg.chan,
+                                                  priv->roc_cfg.chan_type,
+                                                  GFP_ATOMIC);
+
+               memset(&priv->roc_cfg, 0, sizeof(struct mwifiex_roc_cfg));
+
+               wiphy_dbg(wiphy, "info: cancel ROC, cookie = 0x%llx\n", cookie);
+       }
+
+       return ret;
+}
+
 /*
  * CFG802.11 operation handler to set Tx power.
  */
@@ -171,7 +382,8 @@ mwifiex_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
 
        if (priv->bss_type == MWIFIEX_BSS_TYPE_UAP) {
                priv->wep_key_curr_index = key_index;
-       } else if (mwifiex_set_encode(priv, NULL, 0, key_index, NULL, 0)) {
+       } else if (mwifiex_set_encode(priv, NULL, NULL, 0, key_index,
+                                     NULL, 0)) {
                wiphy_err(wiphy, "set default Tx key index\n");
                return -EFAULT;
        }
@@ -207,7 +419,7 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
                return 0;
        }
 
-       if (mwifiex_set_encode(priv, params->key, params->key_len,
+       if (mwifiex_set_encode(priv, params, params->key, params->key_len,
                               key_index, peer_mac, 0)) {
                wiphy_err(wiphy, "crypto keys added\n");
                return -EFAULT;
@@ -462,6 +674,76 @@ mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
        return 0;
 }
 
+static int
+mwifiex_cfg80211_deinit_p2p(struct mwifiex_private *priv)
+{
+       u16 mode = P2P_MODE_DISABLE;
+
+       if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA)
+               mwifiex_set_bss_role(priv, MWIFIEX_BSS_ROLE_STA);
+
+       if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
+                                 HostCmd_ACT_GEN_SET, 0, &mode))
+               return -1;
+
+       return 0;
+}
+
+/*
+ * This function initializes the functionalities for P2P client.
+ * The P2P client initialization sequence is:
+ * disable -> device -> client
+ */
+static int
+mwifiex_cfg80211_init_p2p_client(struct mwifiex_private *priv)
+{
+       u16 mode;
+
+       if (mwifiex_cfg80211_deinit_p2p(priv))
+               return -1;
+
+       mode = P2P_MODE_DEVICE;
+       if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
+                                 HostCmd_ACT_GEN_SET, 0, &mode))
+               return -1;
+
+       mode = P2P_MODE_CLIENT;
+       if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
+                                 HostCmd_ACT_GEN_SET, 0, &mode))
+               return -1;
+
+       return 0;
+}
+
+/*
+ * This function initializes the functionalities for P2P GO.
+ * The P2P GO initialization sequence is:
+ * disable -> device -> GO
+ */
+static int
+mwifiex_cfg80211_init_p2p_go(struct mwifiex_private *priv)
+{
+       u16 mode;
+
+       if (mwifiex_cfg80211_deinit_p2p(priv))
+               return -1;
+
+       mode = P2P_MODE_DEVICE;
+       if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
+                                 HostCmd_ACT_GEN_SET, 0, &mode))
+               return -1;
+
+       mode = P2P_MODE_GO;
+       if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
+                                 HostCmd_ACT_GEN_SET, 0, &mode))
+               return -1;
+
+       if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP)
+               mwifiex_set_bss_role(priv, MWIFIEX_BSS_ROLE_UAP);
+
+       return 0;
+}
+
 /*
  * CFG802.11 operation handler to change interface type.
  */
@@ -494,6 +776,16 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
                switch (type) {
                case NL80211_IFTYPE_ADHOC:
                        break;
+               case NL80211_IFTYPE_P2P_CLIENT:
+                       if (mwifiex_cfg80211_init_p2p_client(priv))
+                               return -EFAULT;
+                       dev->ieee80211_ptr->iftype = type;
+                       return 0;
+               case NL80211_IFTYPE_P2P_GO:
+                       if (mwifiex_cfg80211_init_p2p_go(priv))
+                               return -EFAULT;
+                       dev->ieee80211_ptr->iftype = type;
+                       return 0;
                case NL80211_IFTYPE_UNSPECIFIED:
                        wiphy_warn(wiphy, "%s: kept type as STA\n", dev->name);
                case NL80211_IFTYPE_STATION:    /* This shouldn't happen */
@@ -519,6 +811,18 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
                        return -EOPNOTSUPP;
                }
                break;
+       case NL80211_IFTYPE_P2P_CLIENT:
+       case NL80211_IFTYPE_P2P_GO:
+               switch (type) {
+               case NL80211_IFTYPE_STATION:
+                       if (mwifiex_cfg80211_deinit_p2p(priv))
+                               return -EFAULT;
+                       dev->ieee80211_ptr->iftype = type;
+                       return 0;
+               default:
+                       return -EOPNOTSUPP;
+               }
+               break;
        default:
                wiphy_err(wiphy, "%s: unknown iftype: %d\n",
                          dev->name, dev->ieee80211_ptr->iftype);
@@ -657,7 +961,6 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
 }
 
 /* Supported rates to be advertised to the cfg80211 */
-
 static struct ieee80211_rate mwifiex_rates[] = {
        {.bitrate = 10, .hw_value = 2, },
        {.bitrate = 20, .hw_value = 4, },
@@ -674,7 +977,6 @@ static struct ieee80211_rate mwifiex_rates[] = {
 };
 
 /* Channel definitions to be advertised to cfg80211 */
-
 static struct ieee80211_channel mwifiex_channels_2ghz[] = {
        {.center_freq = 2412, .hw_value = 1, },
        {.center_freq = 2417, .hw_value = 2, },
@@ -742,12 +1044,41 @@ static struct ieee80211_supported_band mwifiex_band_5ghz = {
 
 
 /* Supported crypto cipher suits to be advertised to cfg80211 */
-
 static const u32 mwifiex_cipher_suites[] = {
        WLAN_CIPHER_SUITE_WEP40,
        WLAN_CIPHER_SUITE_WEP104,
        WLAN_CIPHER_SUITE_TKIP,
        WLAN_CIPHER_SUITE_CCMP,
+       WLAN_CIPHER_SUITE_AES_CMAC,
+};
+
+/* Supported mgmt frame types to be advertised to cfg80211 */
+static const struct ieee80211_txrx_stypes
+mwifiex_mgmt_stypes[NUM_NL80211_IFTYPES] = {
+       [NL80211_IFTYPE_STATION] = {
+               .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+                     BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+               .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+                     BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
+       },
+       [NL80211_IFTYPE_AP] = {
+               .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+                     BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+               .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+                     BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
+       },
+       [NL80211_IFTYPE_P2P_CLIENT] = {
+               .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+                     BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+               .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+                     BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
+       },
+       [NL80211_IFTYPE_P2P_GO] = {
+               .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+                     BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+               .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+                     BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
+       },
 };
 
 /*
@@ -842,7 +1173,7 @@ static int mwifiex_cfg80211_change_beacon(struct wiphy *wiphy,
 {
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
 
-       if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
+       if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP) {
                wiphy_err(wiphy, "%s: bss_type mismatched\n", __func__);
                return -EINVAL;
        }
@@ -906,6 +1237,8 @@ static int mwifiex_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
        if (mwifiex_del_mgmt_ies(priv))
                wiphy_err(wiphy, "Failed to delete mgmt IEs!\n");
 
+       priv->ap_11n_enabled = 0;
+
        if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
                                  HostCmd_ACT_GEN_SET, 0, NULL)) {
                wiphy_err(wiphy, "Failed to stop the BSS\n");
@@ -928,7 +1261,7 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
        u8 config_bands = 0;
 
-       if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP)
+       if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP)
                return -1;
        if (mwifiex_set_mgmt_ies(priv, &params->beacon))
                return -1;
@@ -965,15 +1298,18 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
 
        bss_cfg->channel =
            (u8)ieee80211_frequency_to_channel(params->channel->center_freq);
-       bss_cfg->band_cfg = BAND_CONFIG_MANUAL;
 
        /* Set appropriate bands */
        if (params->channel->band == IEEE80211_BAND_2GHZ) {
+               bss_cfg->band_cfg = BAND_CONFIG_BG;
+
                if (params->channel_type == NL80211_CHAN_NO_HT)
                        config_bands = BAND_B | BAND_G;
                else
                        config_bands = BAND_B | BAND_G | BAND_GN;
        } else {
+               bss_cfg->band_cfg = BAND_CONFIG_A;
+
                if (params->channel_type == NL80211_CHAN_NO_HT)
                        config_bands = BAND_A;
                else
@@ -984,6 +1320,7 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
              ~priv->adapter->fw_bands))
                priv->adapter->config_bands = config_bands;
 
+       mwifiex_set_uap_rates(bss_cfg, params);
        mwifiex_send_domain_info_cmd_fw(wiphy);
 
        if (mwifiex_set_secure_params(priv, bss_cfg, params)) {
@@ -994,6 +1331,12 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
 
        mwifiex_set_ht_params(priv, bss_cfg, params);
 
+       if (params->inactivity_timeout > 0) {
+               /* sta_ao_timer/ps_sta_ao_timer is in unit of 100ms */
+               bss_cfg->sta_ao_timer = 10 * params->inactivity_timeout;
+               bss_cfg->ps_sta_ao_timer = 10 * params->inactivity_timeout;
+       }
+
        if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
                                  HostCmd_ACT_GEN_SET, 0, NULL)) {
                wiphy_err(wiphy, "Failed to stop the BSS\n");
@@ -1149,7 +1492,6 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
                              ~priv->adapter->fw_bands))
                                priv->adapter->config_bands = config_bands;
                }
-               mwifiex_send_domain_info_cmd_fw(priv->wdev->wiphy);
        }
 
        /* As this is new association, clear locally stored
@@ -1159,7 +1501,7 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
        priv->wep_key_curr_index = 0;
        priv->sec_info.encryption_mode = 0;
        priv->sec_info.is_authtype_auto = 0;
-       ret = mwifiex_set_encode(priv, NULL, 0, 0, NULL, 1);
+       ret = mwifiex_set_encode(priv, NULL, NULL, 0, 0, NULL, 1);
 
        if (mode == NL80211_IFTYPE_ADHOC) {
                /* "privacy" is set only for ad-hoc mode */
@@ -1206,8 +1548,9 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
                                "info: setting wep encryption"
                                " with key len %d\n", sme->key_len);
                        priv->wep_key_curr_index = sme->key_idx;
-                       ret = mwifiex_set_encode(priv, sme->key, sme->key_len,
-                                                sme->key_idx, NULL, 0);
+                       ret = mwifiex_set_encode(priv, NULL, sme->key,
+                                                sme->key_len, sme->key_idx,
+                                                NULL, 0);
                }
        }
 done:
@@ -1459,11 +1802,18 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
 {
        struct net_device *dev = request->wdev->netdev;
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
-       int i;
+       int i, offset;
        struct ieee80211_channel *chan;
+       struct ieee_types_header *ie;
 
        wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name);
 
+       if (atomic_read(&priv->wmm.tx_pkts_queued) >=
+           MWIFIEX_MIN_TX_PENDING_TO_CANCEL_SCAN) {
+               dev_dbg(priv->adapter->dev, "scan rejected due to traffic\n");
+               return -EBUSY;
+       }
+
        priv->scan_request = request;
 
        priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
@@ -1477,13 +1827,17 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
        priv->user_scan_cfg->ssid_list = request->ssids;
 
        if (request->ie && request->ie_len) {
+               offset = 0;
                for (i = 0; i < MWIFIEX_MAX_VSIE_NUM; i++) {
                        if (priv->vs_ie[i].mask != MWIFIEX_VSIE_MASK_CLEAR)
                                continue;
                        priv->vs_ie[i].mask = MWIFIEX_VSIE_MASK_SCAN;
-                       memcpy(&priv->vs_ie[i].ie, request->ie,
-                              request->ie_len);
-                       break;
+                       ie = (struct ieee_types_header *)(request->ie + offset);
+                       memcpy(&priv->vs_ie[i].ie, ie, sizeof(*ie) + ie->len);
+                       offset += sizeof(*ie) + ie->len;
+
+                       if (offset >= request->ie_len)
+                               break;
                }
        }
 
@@ -1592,7 +1946,7 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
  *  create a new virtual interface with the given name
  */
 struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
-                                             char *name,
+                                             const char *name,
                                              enum nl80211_iftype type,
                                              u32 *flags,
                                              struct vif_params *params)
@@ -1632,7 +1986,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
 
                priv->bss_type = MWIFIEX_BSS_TYPE_STA;
                priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
-               priv->bss_priority = MWIFIEX_BSS_ROLE_STA;
+               priv->bss_priority = 0;
                priv->bss_role = MWIFIEX_BSS_ROLE_STA;
                priv->bss_num = 0;
 
@@ -1655,12 +2009,47 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
 
                priv->bss_type = MWIFIEX_BSS_TYPE_UAP;
                priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
-               priv->bss_priority = MWIFIEX_BSS_ROLE_UAP;
+               priv->bss_priority = 0;
                priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
                priv->bss_started = 0;
                priv->bss_num = 0;
                priv->bss_mode = type;
 
+               break;
+       case NL80211_IFTYPE_P2P_CLIENT:
+               priv = adapter->priv[MWIFIEX_BSS_TYPE_P2P];
+
+               if (priv->bss_mode) {
+                       wiphy_err(wiphy, "Can't create multiple P2P ifaces");
+                       return ERR_PTR(-EINVAL);
+               }
+
+               wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
+               if (!wdev)
+                       return ERR_PTR(-ENOMEM);
+
+               priv->wdev = wdev;
+               wdev->wiphy = wiphy;
+
+               /* At start-up, wpa_supplicant tries to change the interface
+                * to NL80211_IFTYPE_STATION if it is not managed mode.
+                * So, we initialize it to STA mode.
+                */
+               wdev->iftype = NL80211_IFTYPE_STATION;
+               priv->bss_mode = NL80211_IFTYPE_STATION;
+
+               /* Setting bss_type to P2P tells firmware that this interface
+                * is receiving P2P peers found during find phase and doing
+                * action frame handshake.
+                */
+               priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
+
+               priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
+               priv->bss_priority = MWIFIEX_BSS_ROLE_STA;
+               priv->bss_role = MWIFIEX_BSS_ROLE_STA;
+               priv->bss_started = 0;
+               priv->bss_num = 0;
+
                break;
        default:
                wiphy_err(wiphy, "type not supported\n");
@@ -1769,6 +2158,10 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
        .leave_ibss = mwifiex_cfg80211_leave_ibss,
        .add_key = mwifiex_cfg80211_add_key,
        .del_key = mwifiex_cfg80211_del_key,
+       .mgmt_tx = mwifiex_cfg80211_mgmt_tx,
+       .mgmt_frame_register = mwifiex_cfg80211_mgmt_frame_register,
+       .remain_on_channel = mwifiex_cfg80211_remain_on_channel,
+       .cancel_remain_on_channel = mwifiex_cfg80211_cancel_remain_on_channel,
        .set_default_key = mwifiex_cfg80211_set_default_key,
        .set_power_mgmt = mwifiex_cfg80211_set_power_mgmt,
        .set_tx_power = mwifiex_cfg80211_set_tx_power,
@@ -1805,8 +2198,12 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
        }
        wiphy->max_scan_ssids = MWIFIEX_MAX_SSID_LIST_LENGTH;
        wiphy->max_scan_ie_len = MWIFIEX_MAX_VSIE_LEN;
+       wiphy->mgmt_stypes = mwifiex_mgmt_stypes;
+       wiphy->max_remain_on_channel_duration = 5000;
        wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
                                 BIT(NL80211_IFTYPE_ADHOC) |
+                                BIT(NL80211_IFTYPE_P2P_CLIENT) |
+                                BIT(NL80211_IFTYPE_P2P_GO) |
                                 BIT(NL80211_IFTYPE_AP);
 
        wiphy->bands[IEEE80211_BAND_2GHZ] = &mwifiex_band_2ghz;
@@ -1825,15 +2222,21 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
        memcpy(wiphy->perm_addr, priv->curr_addr, ETH_ALEN);
        wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
        wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
-                       WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+                       WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
+                       WIPHY_FLAG_CUSTOM_REGULATORY |
+                       WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+
+       wiphy_apply_custom_regulatory(wiphy, &mwifiex_world_regdom_custom);
 
        wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
-                                   NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2;
+                                   NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+                                   NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
 
        wiphy->available_antennas_tx = BIT(adapter->number_of_antenna) - 1;
        wiphy->available_antennas_rx = BIT(adapter->number_of_antenna) - 1;
 
-       wiphy->features = NL80211_FEATURE_HT_IBSS;
+       wiphy->features = NL80211_FEATURE_HT_IBSS |
+                         NL80211_FEATURE_INACTIVITY_TIMER;
 
        /* Reserve space for mwifiex specific private data for BSS */
        wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv);
@@ -1854,8 +2257,9 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
                return ret;
        }
        country_code = mwifiex_11d_code_2_region(priv->adapter->region_code);
-       if (country_code && regulatory_hint(wiphy, country_code))
-               dev_err(adapter->dev, "regulatory_hint() failed\n");
+       if (country_code)
+               dev_info(adapter->dev,
+                        "ignoring F/W country code %2.2s\n", country_code);
 
        adapter->wiphy = wiphy;
        return ret;
index 565527aee0ea3f73caa832f336c0ded06a3b22d9..8d465107f52b2c5073acad20d8d0acbe0485be5e 100644 (file)
@@ -460,7 +460,10 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
                        priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
        }
 
-       ret = mwifiex_process_sta_event(priv);
+       if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+               ret = mwifiex_process_uap_event(priv);
+       else
+               ret = mwifiex_process_sta_event(priv);
 
        adapter->event_cause = 0;
        adapter->event_skb = NULL;
@@ -1085,6 +1088,8 @@ mwifiex_hs_activated_event(struct mwifiex_private *priv, u8 activated)
        if (activated) {
                if (priv->adapter->is_hs_configured) {
                        priv->adapter->hs_activated = true;
+                       mwifiex_update_rxreor_flags(priv->adapter,
+                                                   RXREOR_FORCE_NO_DROP);
                        dev_dbg(priv->adapter->dev, "event: hs_activated\n");
                        priv->adapter->hs_activate_wait_q_woken = true;
                        wake_up_interruptible(
index 070ef25f51867a1bb0d3baf3279d0eb02b2ba3d6..e9357d87d3279f7ba1d19c8246347f44ca46a269 100644 (file)
 #include <linux/ieee80211.h>
 
 
-#define MWIFIEX_MAX_BSS_NUM         (2)
+#define MWIFIEX_MAX_BSS_NUM         (3)
 
 #define MWIFIEX_MIN_DATA_HEADER_LEN 36 /* sizeof(mwifiex_txpd)
                                         *   + 4 byte alignment
                                         */
+#define MWIFIEX_MGMT_FRAME_HEADER_SIZE 8       /* sizeof(pkt_type)
+                                                *   + sizeof(tx_control)
+                                                */
 
 #define MWIFIEX_MAX_TX_BASTREAM_SUPPORTED      2
 #define MWIFIEX_MAX_RX_BASTREAM_SUPPORTED      16
 #define MWIFIEX_SDIO_BLOCK_SIZE            256
 
 #define MWIFIEX_BUF_FLAG_REQUEUED_PKT      BIT(0)
+#define MWIFIEX_BUF_FLAG_BRIDGED_PKT      BIT(1)
+
+#define MWIFIEX_BRIDGED_PKTS_THRESHOLD     1024
 
 enum mwifiex_bss_type {
        MWIFIEX_BSS_TYPE_STA = 0,
        MWIFIEX_BSS_TYPE_UAP = 1,
+       MWIFIEX_BSS_TYPE_P2P = 2,
        MWIFIEX_BSS_TYPE_ANY = 0xff,
 };
 
index e831b440a24a3f2c654e8ad04e17701fb0e1b85f..dda588b3557063e95bfc92746422e3d3942a1719 100644 (file)
@@ -65,10 +65,12 @@ enum KEY_TYPE_ID {
        KEY_TYPE_ID_TKIP,
        KEY_TYPE_ID_AES,
        KEY_TYPE_ID_WAPI,
+       KEY_TYPE_ID_AES_CMAC,
 };
 #define KEY_MCAST      BIT(0)
 #define KEY_UNICAST    BIT(1)
 #define KEY_ENABLED    BIT(2)
+#define KEY_IGTK       BIT(10)
 
 #define WAPI_KEY_LEN                   50
 
@@ -92,6 +94,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 };
 
 #define CAL_SNR(RSSI, NF)              ((s16)((s16)(RSSI)-(s16)(NF)))
+#define CAL_RSSI(SNR, NF)              ((s16)((s16)(SNR)+(s16)(NF)))
 
 #define UAP_BSS_PARAMS_I                       0
 #define UAP_CUSTOM_IE_I                                1
@@ -106,6 +109,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define MGMT_MASK_BEACON                       0x100
 
 #define TLV_TYPE_UAP_SSID                      0x0000
+#define TLV_TYPE_UAP_RATES                     0x0001
 
 #define PROPRIETARY_TLV_BASE_ID                 0x0100
 #define TLV_TYPE_KEY_MATERIAL       (PROPRIETARY_TLV_BASE_ID + 0)
@@ -124,6 +128,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define TLV_TYPE_UAP_DTIM_PERIOD    (PROPRIETARY_TLV_BASE_ID + 45)
 #define TLV_TYPE_UAP_BCAST_SSID     (PROPRIETARY_TLV_BASE_ID + 48)
 #define TLV_TYPE_UAP_RTS_THRESHOLD  (PROPRIETARY_TLV_BASE_ID + 51)
+#define TLV_TYPE_UAP_AO_TIMER       (PROPRIETARY_TLV_BASE_ID + 57)
 #define TLV_TYPE_UAP_WEP_KEY        (PROPRIETARY_TLV_BASE_ID + 59)
 #define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60)
 #define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64)
@@ -138,6 +143,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define TLV_TYPE_MGMT_IE            (PROPRIETARY_TLV_BASE_ID + 105)
 #define TLV_TYPE_AUTO_DS_PARAM      (PROPRIETARY_TLV_BASE_ID + 113)
 #define TLV_TYPE_PS_PARAM           (PROPRIETARY_TLV_BASE_ID + 114)
+#define TLV_TYPE_UAP_PS_AO_TIMER    (PROPRIETARY_TLV_BASE_ID + 123)
 #define TLV_TYPE_PWK_CIPHER         (PROPRIETARY_TLV_BASE_ID + 145)
 #define TLV_TYPE_GWK_CIPHER         (PROPRIETARY_TLV_BASE_ID + 146)
 
@@ -257,9 +263,12 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define HostCmd_CMD_TX_RATE_CFG                       0x00d6
 #define HostCmd_CMD_802_11_PS_MODE_ENH                0x00e4
 #define HostCmd_CMD_802_11_HS_CFG_ENH                 0x00e5
+#define HostCmd_CMD_P2P_MODE_CFG                      0x00eb
 #define HostCmd_CMD_CAU_REG_ACCESS                    0x00ed
 #define HostCmd_CMD_SET_BSS_MODE                      0x00f7
 #define HostCmd_CMD_PCIE_DESC_DETAILS                 0x00fa
+#define HostCmd_CMD_MGMT_FRAME_REG                    0x010c
+#define HostCmd_CMD_REMAIN_ON_CHAN                    0x010d
 
 #define PROTOCOL_NO_SECURITY        0x01
 #define PROTOCOL_STATIC_WEP         0x02
@@ -285,9 +294,17 @@ enum ENH_PS_MODES {
        DIS_AUTO_PS = 0xfe,
 };
 
+enum P2P_MODES {
+       P2P_MODE_DISABLE = 0,
+       P2P_MODE_DEVICE = 1,
+       P2P_MODE_GO = 2,
+       P2P_MODE_CLIENT = 3,
+};
+
 #define HostCmd_RET_BIT                       0x8000
 #define HostCmd_ACT_GEN_GET                   0x0000
 #define HostCmd_ACT_GEN_SET                   0x0001
+#define HostCmd_ACT_GEN_REMOVE                0x0004
 #define HostCmd_ACT_BITWISE_SET               0x0002
 #define HostCmd_ACT_BITWISE_CLR               0x0003
 #define HostCmd_RESULT_OK                     0x0000
@@ -307,7 +324,7 @@ enum ENH_PS_MODES {
 #define HostCmd_SCAN_RADIO_TYPE_A           1
 
 #define HOST_SLEEP_CFG_CANCEL          0xffffffff
-#define HOST_SLEEP_CFG_COND_DEF                0x0000000f
+#define HOST_SLEEP_CFG_COND_DEF                0x00000000
 #define HOST_SLEEP_CFG_GPIO_DEF                0xff
 #define HOST_SLEEP_CFG_GAP_DEF         0
 
@@ -385,6 +402,7 @@ enum ENH_PS_MODES {
 #define EVENT_BW_CHANGE                 0x00000048
 #define EVENT_UAP_MIC_COUNTERMEASURES   0x0000004c
 #define EVENT_HOSTWAKE_STAIE           0x0000004d
+#define EVENT_REMAIN_ON_CHAN_EXPIRED    0x0000005f
 
 #define EVENT_ID_MASK                   0xffff
 #define BSS_NUM_MASK                    0xf
@@ -424,10 +442,10 @@ struct txpd {
 struct rxpd {
        u8 bss_type;
        u8 bss_num;
-       u16 rx_pkt_length;
-       u16 rx_pkt_offset;
-       u16 rx_pkt_type;
-       u16 seq_num;
+       __le16 rx_pkt_length;
+       __le16 rx_pkt_offset;
+       __le16 rx_pkt_type;
+       __le16 seq_num;
        u8 priority;
        u8 rx_rate;
        s8 snr;
@@ -439,6 +457,31 @@ struct rxpd {
        u8 reserved;
 } __packed;
 
+struct uap_txpd {
+       u8 bss_type;
+       u8 bss_num;
+       __le16 tx_pkt_length;
+       __le16 tx_pkt_offset;
+       __le16 tx_pkt_type;
+       __le32 tx_control;
+       u8 priority;
+       u8 flags;
+       u8 pkt_delay_2ms;
+       u8 reserved1;
+       __le32 reserved2;
+};
+
+struct uap_rxpd {
+       u8 bss_type;
+       u8 bss_num;
+       __le16 rx_pkt_length;
+       __le16 rx_pkt_offset;
+       __le16 rx_pkt_type;
+       __le16 seq_num;
+       u8 priority;
+       u8 reserved1;
+};
+
 enum mwifiex_chan_scan_mode_bitmasks {
        MWIFIEX_PASSIVE_SCAN = BIT(0),
        MWIFIEX_DISABLE_CHAN_FILT = BIT(1),
@@ -558,6 +601,13 @@ struct mwifiex_ie_type_key_param_set {
        u8 key[50];
 } __packed;
 
+#define IGTK_PN_LEN            8
+
+struct mwifiex_cmac_param {
+       u8 ipn[IGTK_PN_LEN];
+       u8 key[WLAN_KEY_LEN_AES_CMAC];
+} __packed;
+
 struct host_cmd_ds_802_11_key_material {
        __le16 action;
        struct mwifiex_ie_type_key_param_set key_param_set;
@@ -1250,6 +1300,11 @@ struct host_cmd_tlv_ssid {
        u8 ssid[0];
 } __packed;
 
+struct host_cmd_tlv_rates {
+       struct host_cmd_tlv tlv;
+       u8 rates[0];
+} __packed;
+
 struct host_cmd_tlv_bcast_ssid {
        struct host_cmd_tlv tlv;
        u8 bcast_ctl;
@@ -1291,11 +1346,35 @@ struct host_cmd_tlv_channel_band {
        u8 channel;
 } __packed;
 
+struct host_cmd_tlv_ageout_timer {
+       struct host_cmd_tlv tlv;
+       __le32 sta_ao_timer;
+} __packed;
+
 struct host_cmd_ds_version_ext {
        u8 version_str_sel;
        char version_str[128];
 } __packed;
 
+struct host_cmd_ds_mgmt_frame_reg {
+       __le16 action;
+       __le32 mask;
+} __packed;
+
+struct host_cmd_ds_p2p_mode_cfg {
+       __le16 action;
+       __le16 mode;
+} __packed;
+
+struct host_cmd_ds_remain_on_chan {
+       __le16 action;
+       u8 status;
+       u8 reserved;
+       u8 band_cfg;
+       u8 channel;
+       __le32 duration;
+} __packed;
+
 struct host_cmd_ds_802_11_ibss_status {
        __le16 action;
        __le16 enable;
@@ -1307,6 +1386,7 @@ struct host_cmd_ds_802_11_ibss_status {
 
 #define CONNECTION_TYPE_INFRA   0
 #define CONNECTION_TYPE_ADHOC   1
+#define CONNECTION_TYPE_AP      2
 
 struct host_cmd_ds_set_bss_mode {
        u8 con_type;
@@ -1404,6 +1484,9 @@ struct host_cmd_ds_command {
                struct host_cmd_ds_wmm_get_status get_wmm_status;
                struct host_cmd_ds_802_11_key_material key_material;
                struct host_cmd_ds_version_ext verext;
+               struct host_cmd_ds_mgmt_frame_reg reg_mask;
+               struct host_cmd_ds_remain_on_chan roc_cfg;
+               struct host_cmd_ds_p2p_mode_cfg mode_cfg;
                struct host_cmd_ds_802_11_ibss_status ibss_coalescing;
                struct host_cmd_ds_mac_reg_access mac_reg;
                struct host_cmd_ds_bbp_reg_access bbp_reg;
index 1d8dd003e39617124ff87d92d2d3437391b7ba84..e38342f86c515e6e574fbae572e7c51dbe7c8fa8 100644 (file)
@@ -114,9 +114,6 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv,
                                                        cpu_to_le16(mask);
 
                        ie->ie_index = cpu_to_le16(index);
-                       ie->ie_length = priv->mgmt_ie[index].ie_length;
-                       memcpy(&ie->ie_buffer, &priv->mgmt_ie[index].ie_buffer,
-                              le16_to_cpu(priv->mgmt_ie[index].ie_length));
                } else {
                        if (mask != MWIFIEX_DELETE_MASK)
                                return -1;
@@ -160,7 +157,7 @@ mwifiex_update_uap_custom_ie(struct mwifiex_private *priv,
        u16 len;
        int ret;
 
-       ap_custom_ie = kzalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
+       ap_custom_ie = kzalloc(sizeof(*ap_custom_ie), GFP_KERNEL);
        if (!ap_custom_ie)
                return -ENOMEM;
 
@@ -214,30 +211,35 @@ mwifiex_update_uap_custom_ie(struct mwifiex_private *priv,
        return ret;
 }
 
-/* This function checks if WPS IE is present in passed buffer and copies it to
- * mwifiex_ie structure.
+/* This function checks if the vendor specified IE is present in passed buffer
+ * and copies it to mwifiex_ie structure.
  * Function takes pointer to struct mwifiex_ie pointer as argument.
- * If WPS IE is present memory is allocated for mwifiex_ie pointer and filled
- * in with WPS IE. Caller should take care of freeing this memory.
+ * If the vendor specified IE is present then memory is allocated for
+ * mwifiex_ie pointer and filled in with IE. Caller should take care of freeing
+ * this memory.
  */
-static int mwifiex_update_wps_ie(const u8 *ies, int ies_len,
-                                struct mwifiex_ie **ie_ptr, u16 mask)
+static int mwifiex_update_vs_ie(const u8 *ies, int ies_len,
+                               struct mwifiex_ie **ie_ptr, u16 mask,
+                               unsigned int oui, u8 oui_type)
 {
-       struct ieee_types_header *wps_ie;
-       struct mwifiex_ie *ie = NULL;
+       struct ieee_types_header *vs_ie;
+       struct mwifiex_ie *ie = *ie_ptr;
        const u8 *vendor_ie;
 
-       vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
-                                           WLAN_OUI_TYPE_MICROSOFT_WPS,
-                                           ies, ies_len);
+       vendor_ie = cfg80211_find_vendor_ie(oui, oui_type, ies, ies_len);
        if (vendor_ie) {
-               ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
-               if (!ie)
-                       return -ENOMEM;
+               if (!*ie_ptr) {
+                       *ie_ptr = kzalloc(sizeof(struct mwifiex_ie),
+                                         GFP_KERNEL);
+                       if (!*ie_ptr)
+                               return -ENOMEM;
+                       ie = *ie_ptr;
+               }
 
-               wps_ie = (struct ieee_types_header *)vendor_ie;
-               memcpy(ie->ie_buffer, wps_ie, wps_ie->len + 2);
-               ie->ie_length = cpu_to_le16(wps_ie->len + 2);
+               vs_ie = (struct ieee_types_header *)vendor_ie;
+               memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length),
+                      vs_ie, vs_ie->len + 2);
+               le16_add_cpu(&ie->ie_length, vs_ie->len + 2);
                ie->mgmt_subtype_mask = cpu_to_le16(mask);
                ie->ie_index = cpu_to_le16(MWIFIEX_AUTO_IDX_MASK);
        }
@@ -257,20 +259,40 @@ static int mwifiex_set_mgmt_beacon_data_ies(struct mwifiex_private *priv,
        u16 ar_idx = MWIFIEX_AUTO_IDX_MASK;
        int ret = 0;
 
-       if (data->beacon_ies && data->beacon_ies_len)
-               mwifiex_update_wps_ie(data->beacon_ies, data->beacon_ies_len,
-                                     &beacon_ie, MGMT_MASK_BEACON);
+       if (data->beacon_ies && data->beacon_ies_len) {
+               mwifiex_update_vs_ie(data->beacon_ies, data->beacon_ies_len,
+                                    &beacon_ie, MGMT_MASK_BEACON,
+                                    WLAN_OUI_MICROSOFT,
+                                    WLAN_OUI_TYPE_MICROSOFT_WPS);
+               mwifiex_update_vs_ie(data->beacon_ies, data->beacon_ies_len,
+                                    &beacon_ie, MGMT_MASK_BEACON,
+                                    WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P);
+       }
 
-       if (data->proberesp_ies && data->proberesp_ies_len)
-               mwifiex_update_wps_ie(data->proberesp_ies,
-                                     data->proberesp_ies_len, &pr_ie,
-                                     MGMT_MASK_PROBE_RESP);
+       if (data->proberesp_ies && data->proberesp_ies_len) {
+               mwifiex_update_vs_ie(data->proberesp_ies,
+                                    data->proberesp_ies_len, &pr_ie,
+                                    MGMT_MASK_PROBE_RESP, WLAN_OUI_MICROSOFT,
+                                    WLAN_OUI_TYPE_MICROSOFT_WPS);
+               mwifiex_update_vs_ie(data->proberesp_ies,
+                                    data->proberesp_ies_len, &pr_ie,
+                                    MGMT_MASK_PROBE_RESP,
+                                    WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P);
+       }
 
-       if (data->assocresp_ies && data->assocresp_ies_len)
-               mwifiex_update_wps_ie(data->assocresp_ies,
-                                     data->assocresp_ies_len, &ar_ie,
-                                     MGMT_MASK_ASSOC_RESP |
-                                     MGMT_MASK_REASSOC_RESP);
+       if (data->assocresp_ies && data->assocresp_ies_len) {
+               mwifiex_update_vs_ie(data->assocresp_ies,
+                                    data->assocresp_ies_len, &ar_ie,
+                                    MGMT_MASK_ASSOC_RESP |
+                                    MGMT_MASK_REASSOC_RESP,
+                                    WLAN_OUI_MICROSOFT,
+                                    WLAN_OUI_TYPE_MICROSOFT_WPS);
+               mwifiex_update_vs_ie(data->assocresp_ies,
+                                    data->assocresp_ies_len, &ar_ie,
+                                    MGMT_MASK_ASSOC_RESP |
+                                    MGMT_MASK_REASSOC_RESP, WLAN_OUI_WFA,
+                                    WLAN_OUI_TYPE_WFA_P2P);
+       }
 
        if (beacon_ie || pr_ie || ar_ie) {
                ret = mwifiex_update_uap_custom_ie(priv, beacon_ie,
index 21fdc6c02775b4f9b119afbab1de01420062c28f..b5d37a8caa09a4429504fd804d8a6a5b1542b1af 100644 (file)
@@ -64,60 +64,77 @@ static void scan_delay_timer_fn(unsigned long data)
        struct cmd_ctrl_node *cmd_node, *tmp_node;
        unsigned long flags;
 
-       if (!mwifiex_wmm_lists_empty(adapter)) {
-               if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) {
+       if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) {
+               /*
+                * Abort scan operation by cancelling all pending scan
+                * commands
+                */
+               spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+               list_for_each_entry_safe(cmd_node, tmp_node,
+                                        &adapter->scan_pending_q, list) {
+                       list_del(&cmd_node->list);
+                       mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+               }
+               spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+
+               spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+               adapter->scan_processing = false;
+               adapter->scan_delay_cnt = 0;
+               adapter->empty_tx_q_cnt = 0;
+               spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+
+               if (priv->user_scan_cfg) {
+                       dev_dbg(priv->adapter->dev,
+                               "info: %s: scan aborted\n", __func__);
+                       cfg80211_scan_done(priv->scan_request, 1);
+                       priv->scan_request = NULL;
+                       kfree(priv->user_scan_cfg);
+                       priv->user_scan_cfg = NULL;
+               }
+
+               if (priv->scan_pending_on_block) {
+                       priv->scan_pending_on_block = false;
+                       up(&priv->async_sem);
+               }
+               goto done;
+       }
+
+       if (!atomic_read(&priv->adapter->is_tx_received)) {
+               adapter->empty_tx_q_cnt++;
+               if (adapter->empty_tx_q_cnt == MWIFIEX_MAX_EMPTY_TX_Q_CNT) {
                        /*
-                        * Abort scan operation by cancelling all pending scan
-                        * command
+                        * No Tx traffic for 200msec. Get scan command from
+                        * scan pending queue and put to cmd pending queue to
+                        * resume scan operation
                         */
+                       adapter->scan_delay_cnt = 0;
+                       adapter->empty_tx_q_cnt = 0;
                        spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
-                       list_for_each_entry_safe(cmd_node, tmp_node,
-                                                &adapter->scan_pending_q,
-                                                list) {
-                               list_del(&cmd_node->list);
-                               cmd_node->wait_q_enabled = false;
-                               mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
-                       }
+                       cmd_node = list_first_entry(&adapter->scan_pending_q,
+                                                   struct cmd_ctrl_node, list);
+                       list_del(&cmd_node->list);
                        spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
                                               flags);
 
-                       spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
-                       adapter->scan_processing = false;
-                       spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock,
-                                              flags);
-
-                       if (priv->user_scan_cfg) {
-                               dev_dbg(priv->adapter->dev,
-                                       "info: %s: scan aborted\n", __func__);
-                               cfg80211_scan_done(priv->scan_request, 1);
-                               priv->scan_request = NULL;
-                               kfree(priv->user_scan_cfg);
-                               priv->user_scan_cfg = NULL;
-                       }
-               } else {
-                       /*
-                        * Tx data queue is still not empty, delay scan
-                        * operation further by 20msec.
-                        */
-                       mod_timer(&priv->scan_delay_timer, jiffies +
-                                 msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
-                       adapter->scan_delay_cnt++;
+                       mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
+                                                       true);
+                       queue_work(adapter->workqueue, &adapter->main_work);
+                       goto done;
                }
-               queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
        } else {
-               /*
-                * Tx data queue is empty. Get scan command from scan_pending_q
-                * and put to cmd_pending_q to resume scan operation
-                */
-               adapter->scan_delay_cnt = 0;
-               spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
-               cmd_node = list_first_entry(&adapter->scan_pending_q,
-                                           struct cmd_ctrl_node, list);
-               list_del(&cmd_node->list);
-               spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
-
-               mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
+               adapter->empty_tx_q_cnt = 0;
        }
+
+       /* Delay scan operation further by 20msec */
+       mod_timer(&priv->scan_delay_timer, jiffies +
+                 msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
+       adapter->scan_delay_cnt++;
+
+done:
+       if (atomic_read(&priv->adapter->is_tx_received))
+               atomic_set(&priv->adapter->is_tx_received, false);
+
+       return;
 }
 
 /*
@@ -127,7 +144,7 @@ static void scan_delay_timer_fn(unsigned long data)
  * Additionally, it also initializes all the locks and sets up all the
  * lists.
  */
-static int mwifiex_init_priv(struct mwifiex_private *priv)
+int mwifiex_init_priv(struct mwifiex_private *priv)
 {
        u32 i;
 
@@ -196,6 +213,8 @@ static int mwifiex_init_priv(struct mwifiex_private *priv)
        priv->curr_bcn_size = 0;
        priv->wps_ie = NULL;
        priv->wps_ie_len = 0;
+       priv->ap_11n_enabled = 0;
+       memset(&priv->roc_cfg, 0, sizeof(priv->roc_cfg));
 
        priv->scan_block = false;
 
@@ -345,6 +364,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
        memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter));
        adapter->arp_filter_size = 0;
        adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX;
+       adapter->empty_tx_q_cnt = 0;
 }
 
 /*
@@ -410,6 +430,7 @@ static void mwifiex_free_lock_list(struct mwifiex_adapter *adapter)
                                list_del(&priv->wmm.tid_tbl_ptr[j].ra_list);
                        list_del(&priv->tx_ba_stream_tbl_ptr);
                        list_del(&priv->rx_reorder_tbl_ptr);
+                       list_del(&priv->sta_list);
                }
        }
 }
@@ -472,6 +493,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
                        spin_lock_init(&priv->rx_pkt_lock);
                        spin_lock_init(&priv->wmm.ra_list_spinlock);
                        spin_lock_init(&priv->curr_bcn_buf_lock);
+                       spin_lock_init(&priv->sta_list_spinlock);
                }
        }
 
@@ -504,6 +526,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
                }
                INIT_LIST_HEAD(&priv->tx_ba_stream_tbl_ptr);
                INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
+               INIT_LIST_HEAD(&priv->sta_list);
 
                spin_lock_init(&priv->tx_ba_stream_tbl_lock);
                spin_lock_init(&priv->rx_reorder_tbl_lock);
@@ -625,6 +648,17 @@ static void mwifiex_delete_bss_prio_tbl(struct mwifiex_private *priv)
        }
 }
 
+/*
+ * This function frees the private structure, including cleans
+ * up the TX and RX queues and frees the BSS priority tables.
+ */
+void mwifiex_free_priv(struct mwifiex_private *priv)
+{
+       mwifiex_clean_txrx(priv);
+       mwifiex_delete_bss_prio_tbl(priv);
+       mwifiex_free_curr_bcn(priv);
+}
+
 /*
  * This function is used to shutdown the driver.
  *
index 50191539bb322ed206bc46f11cfa6154cbda2ad4..4e31c6013ebe5d73db79e427ee3296c5daa940c5 100644 (file)
@@ -81,7 +81,11 @@ struct wep_key {
 
 #define KEY_MGMT_ON_HOST        0x03
 #define MWIFIEX_AUTH_MODE_AUTO  0xFF
-#define BAND_CONFIG_MANUAL      0x00
+#define BAND_CONFIG_BG          0x00
+#define BAND_CONFIG_A           0x01
+#define MWIFIEX_SUPPORTED_RATES                 14
+#define MWIFIEX_SUPPORTED_RATES_EXT             32
+
 struct mwifiex_uap_bss_param {
        u8 channel;
        u8 band_cfg;
@@ -100,6 +104,9 @@ struct mwifiex_uap_bss_param {
        struct wpa_param wpa_cfg;
        struct wep_key wep_cfg[NUM_WEP_KEYS];
        struct ieee80211_ht_cap ht_cap;
+       u8 rates[MWIFIEX_SUPPORTED_RATES];
+       u32 sta_ao_timer;
+       u32 ps_sta_ao_timer;
 };
 
 enum {
@@ -213,7 +220,7 @@ struct mwifiex_debug_info {
 };
 
 #define MWIFIEX_KEY_INDEX_UNICAST      0x40000000
-#define WAPI_RXPN_LEN                  16
+#define PN_LEN                         16
 
 struct mwifiex_ds_encrypt_key {
        u32 key_disable;
@@ -222,7 +229,8 @@ struct mwifiex_ds_encrypt_key {
        u8 key_material[WLAN_MAX_KEY_LEN];
        u8 mac_addr[ETH_ALEN];
        u32 is_wapi_key;
-       u8 wapi_rxpn[WAPI_RXPN_LEN];
+       u8 pn[PN_LEN];          /* packet number */
+       u8 is_igtk_key;
 };
 
 struct mwifiex_power_cfg {
index 46803621d01511dad87b91b59ef2b6fb8a38a9ef..eb22dd248d5491e0644a7b60225216faeae7ad1a 100644 (file)
@@ -72,7 +72,6 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
                        goto error;
 
                adapter->priv[i]->adapter = adapter;
-               adapter->priv[i]->bss_priority = i;
                adapter->priv_num++;
        }
        mwifiex_init_lock_list(adapter);
@@ -370,6 +369,13 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
                dev_err(adapter->dev, "cannot create default AP interface\n");
                goto err_add_intf;
        }
+
+       /* Create P2P interface by default */
+       if (!mwifiex_add_virtual_intf(adapter->wiphy, "p2p%d",
+                                     NL80211_IFTYPE_P2P_CLIENT, NULL, NULL)) {
+               dev_err(adapter->dev, "cannot create default P2P interface\n");
+               goto err_add_intf;
+       }
        rtnl_unlock();
 
        mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
@@ -469,6 +475,27 @@ mwifiex_close(struct net_device *dev)
        return 0;
 }
 
+/*
+ * Add buffer into wmm tx queue and queue work to transmit it.
+ */
+int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb)
+{
+       mwifiex_wmm_add_buf_txqueue(priv, skb);
+       atomic_inc(&priv->adapter->tx_pending);
+
+       if (priv->adapter->scan_delay_cnt)
+               atomic_set(&priv->adapter->is_tx_received, true);
+
+       if (atomic_read(&priv->adapter->tx_pending) >= MAX_TX_PENDING) {
+               mwifiex_set_trans_start(priv->netdev);
+               mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
+       }
+
+       queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
+
+       return 0;
+}
+
 /*
  * CFG802.11 network device handler for data transmission.
  */
@@ -517,15 +544,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
        tx_info->bss_type = priv->bss_type;
        mwifiex_fill_buffer(skb);
 
-       mwifiex_wmm_add_buf_txqueue(priv, skb);
-       atomic_inc(&priv->adapter->tx_pending);
-
-       if (atomic_read(&priv->adapter->tx_pending) >= MAX_TX_PENDING) {
-               mwifiex_set_trans_start(dev);
-               mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
-       }
-
-       queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
+       mwifiex_queue_tx_pkt(priv, skb);
 
        return 0;
 }
index e7c2a82fd6106481d63d7beb9df0388d1a0798c2..bfb3fa69805c8d34d5d25586ec014c7d71728c9a 100644 (file)
@@ -88,13 +88,18 @@ enum {
 #define MWIFIEX_MAX_TOTAL_SCAN_TIME    (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S)
 
 #define MWIFIEX_MAX_SCAN_DELAY_CNT                     50
+#define MWIFIEX_MAX_EMPTY_TX_Q_CNT                     10
 #define MWIFIEX_SCAN_DELAY_MSEC                                20
 
+#define MWIFIEX_MIN_TX_PENDING_TO_CANCEL_SCAN          2
+
 #define RSN_GTK_OUI_OFFSET                             2
 
 #define MWIFIEX_OUI_NOT_PRESENT                        0
 #define MWIFIEX_OUI_PRESENT                            1
 
+#define PKT_TYPE_MGMT  0xE5
+
 /*
  * Do not check for data_received for USB, as data_received
  * is handled in mwifiex_usb_recv for USB
@@ -115,6 +120,7 @@ enum {
 #define MAX_BITMAP_RATES_SIZE                  10
 
 #define MAX_CHANNEL_BAND_BG     14
+#define MAX_CHANNEL_BAND_A      165
 
 #define MAX_FREQUENCY_BAND_BG   2484
 
@@ -199,6 +205,9 @@ struct mwifiex_ra_list_tbl {
        u8 ra[ETH_ALEN];
        u32 total_pkts_size;
        u32 is_11n_enabled;
+       u16 max_amsdu;
+       u16 pkt_count;
+       u8 ba_packet_thr;
 };
 
 struct mwifiex_tid_tbl {
@@ -245,10 +254,6 @@ struct ieee_types_header {
        u8 len;
 } __packed;
 
-#define MWIFIEX_SUPPORTED_RATES                 14
-
-#define MWIFIEX_SUPPORTED_RATES_EXT             32
-
 struct ieee_types_vendor_specific {
        struct ieee_types_vendor_header vend_hdr;
        u8 data[IEEE_MAX_IE_SIZE - sizeof(struct ieee_types_vendor_header)];
@@ -365,6 +370,12 @@ struct wps {
        u8 session_enable;
 };
 
+struct mwifiex_roc_cfg {
+       u64 cookie;
+       struct ieee80211_channel chan;
+       enum nl80211_channel_type chan_type;
+};
+
 struct mwifiex_adapter;
 struct mwifiex_private;
 
@@ -431,6 +442,9 @@ struct mwifiex_private {
        u8 wmm_enabled;
        u8 wmm_qosinfo;
        struct mwifiex_wmm_desc wmm;
+       struct list_head sta_list;
+       /* spin lock for associated station list */
+       spinlock_t sta_list_spinlock;
        struct list_head tx_ba_stream_tbl_ptr;
        /* spin lock for tx_ba_stream_tbl_ptr queue */
        spinlock_t tx_ba_stream_tbl_lock;
@@ -480,12 +494,16 @@ struct mwifiex_private {
        s32 cqm_rssi_thold;
        u32 cqm_rssi_hyst;
        u8 subsc_evt_rssi_state;
+       struct mwifiex_ds_misc_subsc_evt async_subsc_evt_storage;
        struct mwifiex_ie mgmt_ie[MAX_MGMT_IE_INDEX];
        u16 beacon_idx;
        u16 proberesp_idx;
        u16 assocresp_idx;
        u16 rsn_idx;
        struct timer_list scan_delay_timer;
+       u8 ap_11n_enabled;
+       u32 mgmt_frame_mask;
+       struct mwifiex_roc_cfg roc_cfg;
 };
 
 enum mwifiex_ba_status {
@@ -517,6 +535,7 @@ struct mwifiex_rx_reorder_tbl {
        int win_size;
        void **rx_reorder_ptr;
        struct reorder_tmr_cnxt timer_context;
+       u8 flags;
 };
 
 struct mwifiex_bss_prio_node {
@@ -550,6 +569,19 @@ struct mwifiex_bss_priv {
        u64 fw_tsf;
 };
 
+/* This is AP specific structure which stores information
+ * about associated STA
+ */
+struct mwifiex_sta_node {
+       struct list_head list;
+       u8 mac_addr[ETH_ALEN];
+       u8 is_wmm_enabled;
+       u8 is_11n_enabled;
+       u8 ampdu_sta[MAX_NUM_TID];
+       u16 rx_seq[MAX_NUM_TID];
+       u16 max_amsdu;
+};
+
 struct mwifiex_if_ops {
        int (*init_if) (struct mwifiex_adapter *);
        void (*cleanup_if) (struct mwifiex_adapter *);
@@ -690,6 +722,9 @@ struct mwifiex_adapter {
        u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
        u16 max_mgmt_ie_index;
        u8 scan_delay_cnt;
+       u8 empty_tx_q_cnt;
+       atomic_t is_tx_received;
+       atomic_t pending_bridged_pkts;
 };
 
 int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
@@ -702,6 +737,9 @@ void mwifiex_stop_net_dev_queue(struct net_device *netdev,
 void mwifiex_wake_up_net_dev_queue(struct net_device *netdev,
                struct mwifiex_adapter *adapter);
 
+int mwifiex_init_priv(struct mwifiex_private *priv);
+void mwifiex_free_priv(struct mwifiex_private *priv);
+
 int mwifiex_init_fw(struct mwifiex_adapter *adapter);
 
 int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter);
@@ -714,6 +752,9 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *, struct mwifiex_fw_image *);
 
 int mwifiex_recv_packet(struct mwifiex_adapter *, struct sk_buff *skb);
 
+int mwifiex_process_mgmt_packet(struct mwifiex_adapter *adapter,
+                               struct sk_buff *skb);
+
 int mwifiex_process_event(struct mwifiex_adapter *adapter);
 
 int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
@@ -780,8 +821,17 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *, u16 cmdresp_no,
                                struct host_cmd_ds_command *resp);
 int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *,
                                  struct sk_buff *skb);
+int mwifiex_process_uap_rx_packet(struct mwifiex_adapter *adapter,
+                                 struct sk_buff *skb);
+int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
+                                 struct sk_buff *skb);
 int mwifiex_process_sta_event(struct mwifiex_private *);
+int mwifiex_process_uap_event(struct mwifiex_private *);
+struct mwifiex_sta_node *
+mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac);
+void mwifiex_delete_all_station_list(struct mwifiex_private *priv);
 void *mwifiex_process_sta_txpd(struct mwifiex_private *, struct sk_buff *skb);
+void *mwifiex_process_uap_txpd(struct mwifiex_private *, struct sk_buff *skb);
 int mwifiex_sta_init_cmd(struct mwifiex_private *, u8 first_sta);
 int mwifiex_cmd_802_11_scan(struct host_cmd_ds_command *cmd,
                            struct mwifiex_scan_cmd_config *scan_cfg);
@@ -840,6 +890,8 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
 void mwifiex_set_ht_params(struct mwifiex_private *priv,
                           struct mwifiex_uap_bss_param *bss_cfg,
                           struct cfg80211_ap_settings *params);
+void mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
+                          struct cfg80211_ap_settings *params);
 
 /*
  * This function checks if the queuing is RA based or not.
@@ -925,6 +977,14 @@ mwifiex_netdev_get_priv(struct net_device *dev)
        return (struct mwifiex_private *) (*(unsigned long *) netdev_priv(dev));
 }
 
+/*
+ * This function checks if a skb holds a management frame.
+ */
+static inline bool mwifiex_is_skb_mgmt_frame(struct sk_buff *skb)
+{
+       return (*(u32 *)skb->data == PKT_TYPE_MGMT);
+}
+
 int mwifiex_init_shutdown_fw(struct mwifiex_private *priv,
                             u32 func_init_shutdown);
 int mwifiex_add_card(void *, struct semaphore *, struct mwifiex_if_ops *, u8);
@@ -949,14 +1009,21 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
                          const struct mwifiex_user_scan_cfg *user_scan_in);
 int mwifiex_set_radio(struct mwifiex_private *priv, u8 option);
 
-int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key,
-                      int key_len, u8 key_index, const u8 *mac_addr,
-                      int disable);
+int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
+                      const u8 *key, int key_len, u8 key_index,
+                      const u8 *mac_addr, int disable);
 
 int mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len);
 
 int mwifiex_get_ver_ext(struct mwifiex_private *priv);
 
+int mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action,
+                              struct ieee80211_channel *chan,
+                              enum nl80211_channel_type *channel_type,
+                              unsigned int duration);
+
+int mwifiex_set_bss_role(struct mwifiex_private *priv, u8 bss_role);
+
 int mwifiex_get_stats_info(struct mwifiex_private *priv,
                           struct mwifiex_ds_get_stats *log);
 
@@ -987,6 +1054,8 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
 
 int mwifiex_main_process(struct mwifiex_adapter *);
 
+int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb);
+
 int mwifiex_get_bss_info(struct mwifiex_private *,
                         struct mwifiex_bss_info *);
 int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
@@ -997,8 +1066,10 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
 int mwifiex_check_network_compatibility(struct mwifiex_private *priv,
                                        struct mwifiex_bssdescriptor *bss_desc);
 
+u8 mwifiex_chan_type_to_sec_chan_offset(enum nl80211_channel_type chan_type);
+
 struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
-                                             char *name,
+                                             const char *name,
                                              enum nl80211_iftype type,
                                              u32 *flags,
                                              struct vif_params *params);
index 04dc7ca4ac221a3b2c54d644f2b04fed8c180852..e36a75988f877600978c083595c4844d951860c5 100644 (file)
@@ -614,9 +614,8 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
 
                        /* Increment the TLV header length by the size
                           appended */
-                       chan_tlv_out->header.len =
-                       cpu_to_le16(le16_to_cpu(chan_tlv_out->header.len) +
-                       (sizeof(chan_tlv_out->chan_scan_param)));
+                       le16_add_cpu(&chan_tlv_out->header.len,
+                                    sizeof(chan_tlv_out->chan_scan_param));
 
                        /*
                         * The tlv buffer length is set to the number of bytes
@@ -726,7 +725,6 @@ mwifiex_config_scan(struct mwifiex_private *priv,
        struct mwifiex_ie_types_num_probes *num_probes_tlv;
        struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv;
        struct mwifiex_ie_types_rates_param_set *rates_tlv;
-       const u8 zero_mac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
        u8 *tlv_pos;
        u32 num_probes;
        u32 ssid_len;
@@ -840,8 +838,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
                 *  or BSSID filter applied to the scan results in the firmware.
                 */
                if ((i && ssid_filter) ||
-                   memcmp(scan_cfg_out->specific_bssid, &zero_mac,
-                          sizeof(zero_mac)))
+                   !is_zero_ether_addr(scan_cfg_out->specific_bssid))
                        *filtered_scan = true;
        } else {
                scan_cfg_out->bss_mode = (u8) adapter->scan_mode;
@@ -989,6 +986,8 @@ mwifiex_config_scan(struct mwifiex_private *priv,
                        *max_chan_per_scan = 2;
                else if (chan_num < MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD)
                        *max_chan_per_scan = 3;
+               else
+                       *max_chan_per_scan = 4;
        }
 }
 
@@ -1433,9 +1432,9 @@ int mwifiex_check_network_compatibility(struct mwifiex_private *priv,
                        if (ret)
                                dev_err(priv->adapter->dev, "cannot find ssid "
                                        "%s\n", bss_desc->ssid.ssid);
-                               break;
+                       break;
                default:
-                               ret = 0;
+                       ret = 0;
                }
        }
 
index df3a33c530cf1a30f9a4058b2bb01d3b24777d0a..5d87195390f863a9492aadaa9ea1d51d6ba8ae8e 100644 (file)
@@ -551,7 +551,6 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
        struct host_cmd_tlv_mac_addr *tlv_mac;
        u16 key_param_len = 0, cmd_size;
        int ret = 0;
-       const u8 bc_mac[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
 
        cmd->command = cpu_to_le16(HostCmd_CMD_802_11_KEY_MATERIAL);
        key_material->action = cpu_to_le16(cmd_action);
@@ -593,7 +592,7 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
                        /* set 0 when re-key */
                        key_material->key_param_set.key[1] = 0;
 
-               if (0 != memcmp(enc_key->mac_addr, bc_mac, sizeof(bc_mac))) {
+               if (!is_broadcast_ether_addr(enc_key->mac_addr)) {
                        /* WAPI pairwise key: unicast */
                        key_material->key_param_set.key_info |=
                                cpu_to_le16(KEY_UNICAST);
@@ -610,7 +609,7 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
                memcpy(&key_material->key_param_set.key[2],
                       enc_key->key_material, enc_key->key_len);
                memcpy(&key_material->key_param_set.key[2 + enc_key->key_len],
-                      enc_key->wapi_rxpn, WAPI_RXPN_LEN);
+                      enc_key->pn, PN_LEN);
                key_material->key_param_set.length =
                        cpu_to_le16(WAPI_KEY_LEN + KEYPARAMSET_FIXED_LEN);
 
@@ -621,23 +620,38 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
                return ret;
        }
        if (enc_key->key_len == WLAN_KEY_LEN_CCMP) {
-               dev_dbg(priv->adapter->dev, "cmd: WPA_AES\n");
-               key_material->key_param_set.key_type_id =
+               if (enc_key->is_igtk_key) {
+                       dev_dbg(priv->adapter->dev, "cmd: CMAC_AES\n");
+                       key_material->key_param_set.key_type_id =
+                                       cpu_to_le16(KEY_TYPE_ID_AES_CMAC);
+                       if (cmd_oid == KEY_INFO_ENABLED)
+                               key_material->key_param_set.key_info =
+                                               cpu_to_le16(KEY_ENABLED);
+                       else
+                               key_material->key_param_set.key_info =
+                                               cpu_to_le16(!KEY_ENABLED);
+
+                       key_material->key_param_set.key_info |=
+                                                       cpu_to_le16(KEY_IGTK);
+               } else {
+                       dev_dbg(priv->adapter->dev, "cmd: WPA_AES\n");
+                       key_material->key_param_set.key_type_id =
                                                cpu_to_le16(KEY_TYPE_ID_AES);
-               if (cmd_oid == KEY_INFO_ENABLED)
-                       key_material->key_param_set.key_info =
+                       if (cmd_oid == KEY_INFO_ENABLED)
+                               key_material->key_param_set.key_info =
                                                cpu_to_le16(KEY_ENABLED);
-               else
-                       key_material->key_param_set.key_info =
+                       else
+                               key_material->key_param_set.key_info =
                                                cpu_to_le16(!KEY_ENABLED);
 
-               if (enc_key->key_index & MWIFIEX_KEY_INDEX_UNICAST)
+                       if (enc_key->key_index & MWIFIEX_KEY_INDEX_UNICAST)
                                /* AES pairwise key: unicast */
-                       key_material->key_param_set.key_info |=
+                               key_material->key_param_set.key_info |=
                                                cpu_to_le16(KEY_UNICAST);
-               else            /* AES group key: multicast */
-                       key_material->key_param_set.key_info |=
+                       else    /* AES group key: multicast */
+                               key_material->key_param_set.key_info |=
                                                        cpu_to_le16(KEY_MCAST);
+               }
        } else if (enc_key->key_len == WLAN_KEY_LEN_TKIP) {
                dev_dbg(priv->adapter->dev, "cmd: WPA_TKIP\n");
                key_material->key_param_set.key_type_id =
@@ -668,6 +682,24 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
                key_param_len = (u16)(enc_key->key_len + KEYPARAMSET_FIXED_LEN)
                                + sizeof(struct mwifiex_ie_types_header);
 
+               if (le16_to_cpu(key_material->key_param_set.key_type_id) ==
+                                                       KEY_TYPE_ID_AES_CMAC) {
+                       struct mwifiex_cmac_param *param =
+                                       (void *)key_material->key_param_set.key;
+
+                       memcpy(param->ipn, enc_key->pn, IGTK_PN_LEN);
+                       memcpy(param->key, enc_key->key_material,
+                              WLAN_KEY_LEN_AES_CMAC);
+
+                       key_param_len = sizeof(struct mwifiex_cmac_param);
+                       key_material->key_param_set.key_len =
+                                               cpu_to_le16(key_param_len);
+                       key_param_len += KEYPARAMSET_FIXED_LEN;
+                       key_material->key_param_set.length =
+                                               cpu_to_le16(key_param_len);
+                       key_param_len += sizeof(struct mwifiex_ie_types_header);
+               }
+
                cmd->size = cpu_to_le16(sizeof(key_material->action) + S_DS_GEN
                                        + key_param_len);
 
@@ -1135,6 +1167,31 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
                                    S_DS_GEN);
                ret = 0;
                break;
+       case HostCmd_CMD_MGMT_FRAME_REG:
+               cmd_ptr->command = cpu_to_le16(cmd_no);
+               cmd_ptr->params.reg_mask.action = cpu_to_le16(cmd_action);
+               cmd_ptr->params.reg_mask.mask = cpu_to_le32(*(u32 *)data_buf);
+               cmd_ptr->size =
+                       cpu_to_le16(sizeof(struct host_cmd_ds_mgmt_frame_reg) +
+                                   S_DS_GEN);
+               ret = 0;
+               break;
+       case HostCmd_CMD_REMAIN_ON_CHAN:
+               cmd_ptr->command = cpu_to_le16(cmd_no);
+               memcpy(&cmd_ptr->params, data_buf,
+                      sizeof(struct host_cmd_ds_remain_on_chan));
+               cmd_ptr->size =
+                     cpu_to_le16(sizeof(struct host_cmd_ds_remain_on_chan) +
+                                 S_DS_GEN);
+               break;
+       case HostCmd_CMD_P2P_MODE_CFG:
+               cmd_ptr->command = cpu_to_le16(cmd_no);
+               cmd_ptr->params.mode_cfg.action = cpu_to_le16(cmd_action);
+               cmd_ptr->params.mode_cfg.mode = cpu_to_le16(*(u16 *)data_buf);
+               cmd_ptr->size =
+                       cpu_to_le16(sizeof(struct host_cmd_ds_p2p_mode_cfg) +
+                                   S_DS_GEN);
+               break;
        case HostCmd_CMD_FUNC_INIT:
                if (priv->adapter->hw_status == MWIFIEX_HW_STATUS_RESET)
                        priv->adapter->hw_status = MWIFIEX_HW_STATUS_READY;
@@ -1204,6 +1261,8 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
                else if (priv->bss_mode == NL80211_IFTYPE_STATION)
                        cmd_ptr->params.bss_mode.con_type =
                                CONNECTION_TYPE_INFRA;
+               else if (priv->bss_mode == NL80211_IFTYPE_AP)
+                       cmd_ptr->params.bss_mode.con_type = CONNECTION_TYPE_AP;
                cmd_ptr->size = cpu_to_le16(sizeof(struct
                                host_cmd_ds_set_bss_mode) + S_DS_GEN);
                ret = 0;
@@ -1253,35 +1312,35 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
 
        if (first_sta) {
                if (priv->adapter->iface_type == MWIFIEX_PCIE) {
-                       ret = mwifiex_send_cmd_async(priv,
+                       ret = mwifiex_send_cmd_sync(priv,
                                                HostCmd_CMD_PCIE_DESC_DETAILS,
                                                HostCmd_ACT_GEN_SET, 0, NULL);
                        if (ret)
                                return -1;
                }
 
-               ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_FUNC_INIT,
-                                            HostCmd_ACT_GEN_SET, 0, NULL);
+               ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_FUNC_INIT,
+                                           HostCmd_ACT_GEN_SET, 0, NULL);
                if (ret)
                        return -1;
                /* Read MAC address from HW */
-               ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_GET_HW_SPEC,
-                                            HostCmd_ACT_GEN_GET, 0, NULL);
+               ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_GET_HW_SPEC,
+                                           HostCmd_ACT_GEN_GET, 0, NULL);
                if (ret)
                        return -1;
 
                /* Reconfigure tx buf size */
-               ret = mwifiex_send_cmd_async(priv,
-                                            HostCmd_CMD_RECONFIGURE_TX_BUFF,
-                                            HostCmd_ACT_GEN_SET, 0,
-                                            &priv->adapter->tx_buf_size);
+               ret = mwifiex_send_cmd_sync(priv,
+                                           HostCmd_CMD_RECONFIGURE_TX_BUFF,
+                                           HostCmd_ACT_GEN_SET, 0,
+                                           &priv->adapter->tx_buf_size);
                if (ret)
                        return -1;
 
                if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
                        /* Enable IEEE PS by default */
                        priv->adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP;
-                       ret = mwifiex_send_cmd_async(
+                       ret = mwifiex_send_cmd_sync(
                                        priv, HostCmd_CMD_802_11_PS_MODE_ENH,
                                        EN_AUTO_PS, BITMAP_STA_PS, NULL);
                        if (ret)
@@ -1290,21 +1349,21 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
        }
 
        /* get tx rate */
-       ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_TX_RATE_CFG,
-                                    HostCmd_ACT_GEN_GET, 0, NULL);
+       ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_TX_RATE_CFG,
+                                   HostCmd_ACT_GEN_GET, 0, NULL);
        if (ret)
                return -1;
        priv->data_rate = 0;
 
        /* get tx power */
-       ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_RF_TX_PWR,
-                                    HostCmd_ACT_GEN_GET, 0, NULL);
+       ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_RF_TX_PWR,
+                                   HostCmd_ACT_GEN_GET, 0, NULL);
        if (ret)
                return -1;
 
        if (priv->bss_type == MWIFIEX_BSS_TYPE_STA) {
                /* set ibss coalescing_status */
-               ret = mwifiex_send_cmd_async(
+               ret = mwifiex_send_cmd_sync(
                                priv, HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
                                HostCmd_ACT_GEN_SET, 0, &enable);
                if (ret)
@@ -1314,16 +1373,16 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
        memset(&amsdu_aggr_ctrl, 0, sizeof(amsdu_aggr_ctrl));
        amsdu_aggr_ctrl.enable = true;
        /* Send request to firmware */
-       ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_AMSDU_AGGR_CTRL,
-                                    HostCmd_ACT_GEN_SET, 0,
-                                    &amsdu_aggr_ctrl);
+       ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_AMSDU_AGGR_CTRL,
+                                   HostCmd_ACT_GEN_SET, 0,
+                                   &amsdu_aggr_ctrl);
        if (ret)
                return -1;
        /* MAC Control must be the last command in init_fw */
        /* set MAC Control */
-       ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL,
-                                    HostCmd_ACT_GEN_SET, 0,
-                                    &priv->curr_pkt_filter);
+       ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_MAC_CONTROL,
+                                   HostCmd_ACT_GEN_SET, 0,
+                                   &priv->curr_pkt_filter);
        if (ret)
                return -1;
 
@@ -1332,10 +1391,10 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
                /* Enable auto deep sleep */
                auto_ds.auto_ds = DEEP_SLEEP_ON;
                auto_ds.idle_time = DEEP_SLEEP_IDLE_TIME;
-               ret = mwifiex_send_cmd_async(priv,
-                                            HostCmd_CMD_802_11_PS_MODE_ENH,
-                                            EN_AUTO_PS, BITMAP_AUTO_DS,
-                                            &auto_ds);
+               ret = mwifiex_send_cmd_sync(priv,
+                                           HostCmd_CMD_802_11_PS_MODE_ENH,
+                                           EN_AUTO_PS, BITMAP_AUTO_DS,
+                                           &auto_ds);
                if (ret)
                        return -1;
        }
@@ -1343,23 +1402,24 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
        if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
                /* Send cmd to FW to enable/disable 11D function */
                state_11d = ENABLE_11D;
-               ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SNMP_MIB,
-                                            HostCmd_ACT_GEN_SET, DOT11D_I,
-                                            &state_11d);
+               ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
+                                           HostCmd_ACT_GEN_SET, DOT11D_I,
+                                           &state_11d);
                if (ret)
                        dev_err(priv->adapter->dev,
                                "11D: failed to enable 11D\n");
        }
 
+       /* set last_init_cmd before sending the command */
+       priv->adapter->last_init_cmd = HostCmd_CMD_11N_CFG;
+
        /* Send cmd to FW to configure 11n specific configuration
         * (Short GI, Channel BW, Green field support etc.) for transmit
         */
        tx_cfg.tx_htcap = MWIFIEX_FW_DEF_HTTXCFG;
-       ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_CFG,
-                                    HostCmd_ACT_GEN_SET, 0, &tx_cfg);
+       ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_11N_CFG,
+                                   HostCmd_ACT_GEN_SET, 0, &tx_cfg);
 
-       /* set last_init_cmd */
-       priv->adapter->last_init_cmd = HostCmd_CMD_11N_CFG;
        ret = -EINPROGRESS;
 
        return ret;
index 0b09004ebb25a3eebf6f5fe8f85bfd9c196f351c..e380171c4c5dd08918669863d4a756894e46a2e9 100644 (file)
@@ -123,7 +123,8 @@ static int mwifiex_ret_802_11_rssi_info(struct mwifiex_private *priv,
 {
        struct host_cmd_ds_802_11_rssi_info_rsp *rssi_info_rsp =
                                                &resp->params.rssi_info_rsp;
-       struct mwifiex_ds_misc_subsc_evt subsc_evt;
+       struct mwifiex_ds_misc_subsc_evt *subsc_evt =
+                                               &priv->async_subsc_evt_storage;
 
        priv->data_rssi_last = le16_to_cpu(rssi_info_rsp->data_rssi_last);
        priv->data_nf_last = le16_to_cpu(rssi_info_rsp->data_nf_last);
@@ -140,26 +141,27 @@ static int mwifiex_ret_802_11_rssi_info(struct mwifiex_private *priv,
        if (priv->subsc_evt_rssi_state == EVENT_HANDLED)
                return 0;
 
+       memset(subsc_evt, 0x00, sizeof(struct mwifiex_ds_misc_subsc_evt));
+
        /* Resubscribe low and high rssi events with new thresholds */
-       memset(&subsc_evt, 0x00, sizeof(struct mwifiex_ds_misc_subsc_evt));
-       subsc_evt.events = BITMASK_BCN_RSSI_LOW | BITMASK_BCN_RSSI_HIGH;
-       subsc_evt.action = HostCmd_ACT_BITWISE_SET;
+       subsc_evt->events = BITMASK_BCN_RSSI_LOW | BITMASK_BCN_RSSI_HIGH;
+       subsc_evt->action = HostCmd_ACT_BITWISE_SET;
        if (priv->subsc_evt_rssi_state == RSSI_LOW_RECVD) {
-               subsc_evt.bcn_l_rssi_cfg.abs_value = abs(priv->bcn_rssi_avg -
+               subsc_evt->bcn_l_rssi_cfg.abs_value = abs(priv->bcn_rssi_avg -
                                priv->cqm_rssi_hyst);
-               subsc_evt.bcn_h_rssi_cfg.abs_value = abs(priv->cqm_rssi_thold);
+               subsc_evt->bcn_h_rssi_cfg.abs_value = abs(priv->cqm_rssi_thold);
        } else if (priv->subsc_evt_rssi_state == RSSI_HIGH_RECVD) {
-               subsc_evt.bcn_l_rssi_cfg.abs_value = abs(priv->cqm_rssi_thold);
-               subsc_evt.bcn_h_rssi_cfg.abs_value = abs(priv->bcn_rssi_avg +
+               subsc_evt->bcn_l_rssi_cfg.abs_value = abs(priv->cqm_rssi_thold);
+               subsc_evt->bcn_h_rssi_cfg.abs_value = abs(priv->bcn_rssi_avg +
                                priv->cqm_rssi_hyst);
        }
-       subsc_evt.bcn_l_rssi_cfg.evt_freq = 1;
-       subsc_evt.bcn_h_rssi_cfg.evt_freq = 1;
+       subsc_evt->bcn_l_rssi_cfg.evt_freq = 1;
+       subsc_evt->bcn_h_rssi_cfg.evt_freq = 1;
 
        priv->subsc_evt_rssi_state = EVENT_HANDLED;
 
        mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
-                              0, 0, &subsc_evt);
+                              0, 0, subsc_evt);
 
        return 0;
 }
@@ -651,6 +653,38 @@ static int mwifiex_ret_ver_ext(struct mwifiex_private *priv,
        return 0;
 }
 
+/*
+ * This function handles the command response of remain on channel.
+ */
+static int
+mwifiex_ret_remain_on_chan(struct mwifiex_private *priv,
+                          struct host_cmd_ds_command *resp,
+                          struct host_cmd_ds_remain_on_chan *roc_cfg)
+{
+       struct host_cmd_ds_remain_on_chan *resp_cfg = &resp->params.roc_cfg;
+
+       if (roc_cfg)
+               memcpy(roc_cfg, resp_cfg, sizeof(*roc_cfg));
+
+       return 0;
+}
+
+/*
+ * This function handles the command response of P2P mode cfg.
+ */
+static int
+mwifiex_ret_p2p_mode_cfg(struct mwifiex_private *priv,
+                        struct host_cmd_ds_command *resp,
+                        void *data_buf)
+{
+       struct host_cmd_ds_p2p_mode_cfg *mode_cfg = &resp->params.mode_cfg;
+
+       if (data_buf)
+               *((u16 *)data_buf) = le16_to_cpu(mode_cfg->mode);
+
+       return 0;
+}
+
 /*
  * This function handles the command response of register access.
  *
@@ -736,7 +770,6 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
 {
        struct host_cmd_ds_802_11_ibss_status *ibss_coal_resp =
                                        &(resp->params.ibss_coalescing);
-       u8 zero_mac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
 
        if (le16_to_cpu(ibss_coal_resp->action) == HostCmd_ACT_GEN_SET)
                return 0;
@@ -745,7 +778,7 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
                "info: new BSSID %pM\n", ibss_coal_resp->bssid);
 
        /* If rsp has NULL BSSID, Just return..... No Action */
-       if (!memcmp(ibss_coal_resp->bssid, zero_mac, ETH_ALEN)) {
+       if (is_zero_ether_addr(ibss_coal_resp->bssid)) {
                dev_warn(priv->adapter->dev, "new BSSID is NULL\n");
                return 0;
        }
@@ -775,8 +808,7 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
  * This function handles the command response for subscribe event command.
  */
 static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv,
-                                struct host_cmd_ds_command *resp,
-                                struct mwifiex_ds_misc_subsc_evt *sub_event)
+                                struct host_cmd_ds_command *resp)
 {
        struct host_cmd_ds_802_11_subsc_evt *cmd_sub_event =
                &resp->params.subsc_evt;
@@ -786,10 +818,6 @@ static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv,
        dev_dbg(priv->adapter->dev, "Bitmap of currently subscribed events: %16x\n",
                le16_to_cpu(cmd_sub_event->events));
 
-       /*Return the subscribed event info for a Get request*/
-       if (sub_event)
-               sub_event->events = le16_to_cpu(cmd_sub_event->events);
-
        return 0;
 }
 
@@ -879,6 +907,13 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
        case HostCmd_CMD_VERSION_EXT:
                ret = mwifiex_ret_ver_ext(priv, resp, data_buf);
                break;
+       case HostCmd_CMD_REMAIN_ON_CHAN:
+               ret = mwifiex_ret_remain_on_chan(priv, resp, data_buf);
+               break;
+       case HostCmd_CMD_P2P_MODE_CFG:
+               ret = mwifiex_ret_p2p_mode_cfg(priv, resp, data_buf);
+               break;
+       case HostCmd_CMD_MGMT_FRAME_REG:
        case HostCmd_CMD_FUNC_INIT:
        case HostCmd_CMD_FUNC_SHUTDOWN:
                break;
@@ -913,7 +948,6 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
                                le16_to_cpu(resp->params.tx_buf.mp_end_port));
                break;
        case HostCmd_CMD_AMSDU_AGGR_CTRL:
-               ret = mwifiex_ret_amsdu_aggr_ctrl(resp, data_buf);
                break;
        case HostCmd_CMD_WMM_GET_STATUS:
                ret = mwifiex_ret_wmm_get_status(priv, resp);
@@ -932,12 +966,11 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
        case HostCmd_CMD_SET_BSS_MODE:
                break;
        case HostCmd_CMD_11N_CFG:
-               ret = mwifiex_ret_11n_cfg(resp, data_buf);
                break;
        case HostCmd_CMD_PCIE_DESC_DETAILS:
                break;
        case HostCmd_CMD_802_11_SUBSCRIBE_EVENT:
-               ret = mwifiex_ret_subsc_evt(priv, resp, data_buf);
+               ret = mwifiex_ret_subsc_evt(priv, resp);
                break;
        case HostCmd_CMD_UAP_SYS_CONFIG:
                break;
index b8614a82546072a25099994768ade4bcad28e02a..aafde30e714aa97e40ecbbedc86a39fb08b26866 100644 (file)
@@ -184,10 +184,9 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv)
 int mwifiex_process_sta_event(struct mwifiex_private *priv)
 {
        struct mwifiex_adapter *adapter = priv->adapter;
-       int len, ret = 0;
+       int ret = 0;
        u32 eventcause = adapter->event_cause;
-       struct station_info sinfo;
-       struct mwifiex_assoc_event *event;
+       u16 ctrl;
 
        switch (eventcause) {
        case EVENT_DUMMY_HOST_WAKEUP_SIGNAL:
@@ -279,10 +278,16 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
 
        case EVENT_MIC_ERR_UNICAST:
                dev_dbg(adapter->dev, "event: UNICAST MIC ERROR\n");
+               cfg80211_michael_mic_failure(priv->netdev, priv->cfg_bssid,
+                                            NL80211_KEYTYPE_PAIRWISE,
+                                            -1, NULL, GFP_KERNEL);
                break;
 
        case EVENT_MIC_ERR_MULTICAST:
                dev_dbg(adapter->dev, "event: MULTICAST MIC ERROR\n");
+               cfg80211_michael_mic_failure(priv->netdev, priv->cfg_bssid,
+                                            NL80211_KEYTYPE_GROUP,
+                                            -1, NULL, GFP_KERNEL);
                break;
        case EVENT_MIB_CHANGED:
        case EVENT_INIT_DONE:
@@ -384,11 +389,11 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                                              adapter->event_body);
                break;
        case EVENT_AMSDU_AGGR_CTRL:
-               dev_dbg(adapter->dev, "event:  AMSDU_AGGR_CTRL %d\n",
-                       *(u16 *) adapter->event_body);
+               ctrl = le16_to_cpu(*(__le16 *)adapter->event_body);
+               dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl);
+
                adapter->tx_buf_size =
-                       min(adapter->curr_tx_buf_size,
-                           le16_to_cpu(*(__le16 *) adapter->event_body));
+                               min_t(u16, adapter->curr_tx_buf_size, ctrl);
                dev_dbg(adapter->dev, "event: tx_buf_size %d\n",
                        adapter->tx_buf_size);
                break;
@@ -405,51 +410,18 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                dev_dbg(adapter->dev, "event: HOSTWAKE_STAIE %d\n", eventcause);
                break;
 
-       case EVENT_UAP_STA_ASSOC:
-               memset(&sinfo, 0, sizeof(sinfo));
-               event = (struct mwifiex_assoc_event *)
-                       (adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER);
-               if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) {
-                       len = -1;
-
-                       if (ieee80211_is_assoc_req(event->frame_control))
-                               len = 0;
-                       else if (ieee80211_is_reassoc_req(event->frame_control))
-                               /* There will be ETH_ALEN bytes of
-                                * current_ap_addr before the re-assoc ies.
-                                */
-                               len = ETH_ALEN;
-
-                       if (len != -1) {
-                               sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
-                               sinfo.assoc_req_ies = &event->data[len];
-                               len = (u8 *)sinfo.assoc_req_ies -
-                                     (u8 *)&event->frame_control;
-                               sinfo.assoc_req_ies_len =
-                                       le16_to_cpu(event->len) - (u16)len;
-                       }
-               }
-               cfg80211_new_sta(priv->netdev, event->sta_addr, &sinfo,
-                                GFP_KERNEL);
-               break;
-       case EVENT_UAP_STA_DEAUTH:
-               cfg80211_del_sta(priv->netdev, adapter->event_body +
-                                MWIFIEX_UAP_EVENT_EXTRA_HEADER, GFP_KERNEL);
-               break;
-       case EVENT_UAP_BSS_IDLE:
-               priv->media_connected = false;
-               break;
-       case EVENT_UAP_BSS_ACTIVE:
-               priv->media_connected = true;
-               break;
-       case EVENT_UAP_BSS_START:
-               dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
-               memcpy(priv->netdev->dev_addr, adapter->event_body+2, ETH_ALEN);
-               break;
-       case EVENT_UAP_MIC_COUNTERMEASURES:
-               /* For future development */
-               dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
+       case EVENT_REMAIN_ON_CHAN_EXPIRED:
+               dev_dbg(adapter->dev, "event: Remain on channel expired\n");
+               cfg80211_remain_on_channel_expired(priv->wdev,
+                                                  priv->roc_cfg.cookie,
+                                                  &priv->roc_cfg.chan,
+                                                  priv->roc_cfg.chan_type,
+                                                  GFP_ATOMIC);
+
+               memset(&priv->roc_cfg, 0x00, sizeof(struct mwifiex_roc_cfg));
+
                break;
+
        default:
                dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
                        eventcause);
index fb2136089a2241318a0a697461dc02e0ec240dd6..0c9f70b2cbe61de8b77084fb71135bc7fda36f00 100644 (file)
@@ -26,6 +26,9 @@
 #include "11n.h"
 #include "cfg80211.h"
 
+static int disconnect_on_suspend = 1;
+module_param(disconnect_on_suspend, int, 0644);
+
 /*
  * Copies the multicast address list from device to driver.
  *
@@ -192,6 +195,44 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
        return ret;
 }
 
+static int mwifiex_process_country_ie(struct mwifiex_private *priv,
+                                     struct cfg80211_bss *bss)
+{
+       u8 *country_ie, country_ie_len;
+       struct mwifiex_802_11d_domain_reg *domain_info =
+                                       &priv->adapter->domain_reg;
+
+       country_ie = (u8 *)ieee80211_bss_get_ie(bss, WLAN_EID_COUNTRY);
+
+       if (!country_ie)
+               return 0;
+
+       country_ie_len = country_ie[1];
+       if (country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN)
+               return 0;
+
+       domain_info->country_code[0] = country_ie[2];
+       domain_info->country_code[1] = country_ie[3];
+       domain_info->country_code[2] = ' ';
+
+       country_ie_len -= IEEE80211_COUNTRY_STRING_LEN;
+
+       domain_info->no_of_triplet =
+               country_ie_len / sizeof(struct ieee80211_country_ie_triplet);
+
+       memcpy((u8 *)domain_info->triplet,
+              &country_ie[2] + IEEE80211_COUNTRY_STRING_LEN, country_ie_len);
+
+       if (mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
+                                  HostCmd_ACT_GEN_SET, 0, NULL)) {
+               wiphy_err(priv->adapter->wiphy,
+                         "11D: setting domain info in FW\n");
+               return -1;
+       }
+
+       return 0;
+}
+
 /*
  * In Ad-Hoc mode, the IBSS is created if not found in scan list.
  * In both Ad-Hoc and infra mode, an deauthentication is performed
@@ -207,6 +248,8 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
        priv->scan_block = false;
 
        if (bss) {
+               mwifiex_process_country_ie(priv, bss);
+
                /* Allocate and fill new bss descriptor */
                bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor),
                                GFP_KERNEL);
@@ -408,6 +451,16 @@ EXPORT_SYMBOL_GPL(mwifiex_cancel_hs);
 int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
 {
        struct mwifiex_ds_hs_cfg hscfg;
+       struct mwifiex_private *priv;
+       int i;
+
+       if (disconnect_on_suspend) {
+               for (i = 0; i < adapter->priv_num; i++) {
+                       priv = adapter->priv[i];
+                       if (priv)
+                               mwifiex_deauthenticate(priv, NULL);
+               }
+       }
 
        if (adapter->hs_activated) {
                dev_dbg(adapter->dev, "cmd: HS Already actived\n");
@@ -942,20 +995,26 @@ mwifiex_drv_get_driver_version(struct mwifiex_adapter *adapter, char *version,
  * This function allocates the IOCTL request buffer, fills it
  * with requisite parameters and calls the IOCTL handler.
  */
-int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key,
-                       int key_len, u8 key_index,
-                       const u8 *mac_addr, int disable)
+int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
+                      const u8 *key, int key_len, u8 key_index,
+                      const u8 *mac_addr, int disable)
 {
        struct mwifiex_ds_encrypt_key encrypt_key;
 
        memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key));
        encrypt_key.key_len = key_len;
+
+       if (kp && kp->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+               encrypt_key.is_igtk_key = true;
+
        if (!disable) {
                encrypt_key.key_index = key_index;
                if (key_len)
                        memcpy(encrypt_key.key_material, key, key_len);
                if (mac_addr)
                        memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN);
+               if (kp && kp->seq && kp->seq_len)
+                       memcpy(encrypt_key.pn, kp->seq, kp->seq_len);
        } else {
                encrypt_key.key_disable = true;
                if (mac_addr)
@@ -984,6 +1043,65 @@ mwifiex_get_ver_ext(struct mwifiex_private *priv)
        return 0;
 }
 
+int
+mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action,
+                          struct ieee80211_channel *chan,
+                          enum nl80211_channel_type *ct,
+                          unsigned int duration)
+{
+       struct host_cmd_ds_remain_on_chan roc_cfg;
+       u8 sc;
+
+       memset(&roc_cfg, 0, sizeof(roc_cfg));
+       roc_cfg.action = cpu_to_le16(action);
+       if (action == HostCmd_ACT_GEN_SET) {
+               roc_cfg.band_cfg = chan->band;
+               sc = mwifiex_chan_type_to_sec_chan_offset(*ct);
+               roc_cfg.band_cfg |= (sc << 2);
+
+               roc_cfg.channel =
+                       ieee80211_frequency_to_channel(chan->center_freq);
+               roc_cfg.duration = cpu_to_le32(duration);
+       }
+       if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_REMAIN_ON_CHAN,
+                                 action, 0, &roc_cfg)) {
+               dev_err(priv->adapter->dev, "failed to remain on channel\n");
+               return -1;
+       }
+
+       return roc_cfg.status;
+}
+
+int
+mwifiex_set_bss_role(struct mwifiex_private *priv, u8 bss_role)
+{
+       if (GET_BSS_ROLE(priv) == bss_role) {
+               dev_dbg(priv->adapter->dev,
+                       "info: already in the desired role.\n");
+               return 0;
+       }
+
+       mwifiex_free_priv(priv);
+       mwifiex_init_priv(priv);
+
+       priv->bss_role = bss_role;
+       switch (bss_role) {
+       case MWIFIEX_BSS_ROLE_UAP:
+               priv->bss_mode = NL80211_IFTYPE_AP;
+               break;
+       case MWIFIEX_BSS_ROLE_STA:
+       case MWIFIEX_BSS_ROLE_ANY:
+       default:
+               priv->bss_mode = NL80211_IFTYPE_STATION;
+               break;
+       }
+
+       mwifiex_send_cmd_sync(priv, HostCmd_CMD_SET_BSS_MODE,
+                             HostCmd_ACT_GEN_SET, 0, NULL);
+
+       return mwifiex_sta_init_cmd(priv, false);
+}
+
 /*
  * Sends IOCTL request to get statistics information.
  *
index 02ce3b77d3e772c4e4cde5615e806be500c4ef43..07d32b73783ea2cdd0859d80849eed1eb4a94804 100644 (file)
@@ -54,8 +54,8 @@ int mwifiex_process_rx_packet(struct mwifiex_adapter *adapter,
 
        local_rx_pd = (struct rxpd *) (skb->data);
 
-       rx_pkt_hdr = (struct rx_packet_hdr *) ((u8 *) local_rx_pd +
-                               local_rx_pd->rx_pkt_offset);
+       rx_pkt_hdr = (void *)local_rx_pd +
+                    le16_to_cpu(local_rx_pd->rx_pkt_offset);
 
        if (!memcmp(&rx_pkt_hdr->rfc1042_hdr,
                    rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) {
@@ -125,7 +125,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
        struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
        struct rx_packet_hdr *rx_pkt_hdr;
        u8 ta[ETH_ALEN];
-       u16 rx_pkt_type;
+       u16 rx_pkt_type, rx_pkt_offset, rx_pkt_length, seq_num;
        struct mwifiex_private *priv =
                        mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
                                               rx_info->bss_type);
@@ -134,16 +134,17 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
                return -1;
 
        local_rx_pd = (struct rxpd *) (skb->data);
-       rx_pkt_type = local_rx_pd->rx_pkt_type;
+       rx_pkt_type = le16_to_cpu(local_rx_pd->rx_pkt_type);
+       rx_pkt_offset = le16_to_cpu(local_rx_pd->rx_pkt_offset);
+       rx_pkt_length = le16_to_cpu(local_rx_pd->rx_pkt_length);
+       seq_num = le16_to_cpu(local_rx_pd->seq_num);
 
-       rx_pkt_hdr = (struct rx_packet_hdr *) ((u8 *) local_rx_pd +
-                                       local_rx_pd->rx_pkt_offset);
+       rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_offset;
 
-       if ((local_rx_pd->rx_pkt_offset + local_rx_pd->rx_pkt_length) >
-           (u16) skb->len) {
-               dev_err(adapter->dev, "wrong rx packet: len=%d,"
-                       " rx_pkt_offset=%d, rx_pkt_length=%d\n", skb->len,
-                      local_rx_pd->rx_pkt_offset, local_rx_pd->rx_pkt_length);
+       if ((rx_pkt_offset + rx_pkt_length) > (u16) skb->len) {
+               dev_err(adapter->dev,
+                       "wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n",
+                       skb->len, rx_pkt_offset, rx_pkt_length);
                priv->stats.rx_dropped++;
 
                if (adapter->if_ops.data_complete)
@@ -154,14 +155,14 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
                return ret;
        }
 
-       if (local_rx_pd->rx_pkt_type == PKT_TYPE_AMSDU) {
+       if (rx_pkt_type == PKT_TYPE_AMSDU) {
                struct sk_buff_head list;
                struct sk_buff *rx_skb;
 
                __skb_queue_head_init(&list);
 
-               skb_pull(skb, local_rx_pd->rx_pkt_offset);
-               skb_trim(skb, local_rx_pd->rx_pkt_length);
+               skb_pull(skb, rx_pkt_offset);
+               skb_trim(skb, rx_pkt_length);
 
                ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
                                         priv->wdev->iftype, 0, false);
@@ -173,6 +174,12 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
                                dev_err(adapter->dev, "Rx of A-MSDU failed");
                }
                return 0;
+       } else if (rx_pkt_type == PKT_TYPE_MGMT) {
+               ret = mwifiex_process_mgmt_packet(adapter, skb);
+               if (ret)
+                       dev_err(adapter->dev, "Rx of mgmt packet failed");
+               dev_kfree_skb_any(skb);
+               return ret;
        }
 
        /*
@@ -189,17 +196,14 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
                memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN);
        } else {
                if (rx_pkt_type != PKT_TYPE_BAR)
-                       priv->rx_seq[local_rx_pd->priority] =
-                                               local_rx_pd->seq_num;
+                       priv->rx_seq[local_rx_pd->priority] = seq_num;
                memcpy(ta, priv->curr_bss_params.bss_descriptor.mac_address,
                       ETH_ALEN);
        }
 
        /* Reorder and send to OS */
-       ret = mwifiex_11n_rx_reorder_pkt(priv, local_rx_pd->seq_num,
-                                            local_rx_pd->priority, ta,
-                                            (u8) local_rx_pd->rx_pkt_type,
-                                            skb);
+       ret = mwifiex_11n_rx_reorder_pkt(priv, seq_num, local_rx_pd->priority,
+                                        ta, (u8) rx_pkt_type, skb);
 
        if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
                if (adapter->if_ops.data_complete)
index 0a046d3a0c16d62effecfe17b9633369c7c6c3c8..7b581af24f5f6479ac185430be2bb35306e53c0e 100644 (file)
@@ -48,6 +48,7 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
        struct txpd *local_tx_pd;
        struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
        u8 pad;
+       u16 pkt_type, pkt_offset;
 
        if (!skb->len) {
                dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len);
@@ -55,6 +56,8 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
                return skb->data;
        }
 
+       pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0;
+
        /* If skb->data is not aligned; add padding */
        pad = (4 - (((void *)skb->data - NULL) & 0x3)) % 4;
 
@@ -93,7 +96,14 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
        }
 
        /* Offset of actual data */
-       local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd) + pad);
+       pkt_offset = sizeof(struct txpd) + pad;
+       if (pkt_type == PKT_TYPE_MGMT) {
+               /* Set the packet type and add header for management frame */
+               local_tx_pd->tx_pkt_type = cpu_to_le16(pkt_type);
+               pkt_offset += MWIFIEX_MGMT_FRAME_HEADER_SIZE;
+       }
+
+       local_tx_pd->tx_pkt_offset = cpu_to_le16(pkt_offset);
 
        /* make space for INTF_HEADER_LEN */
        skb_push(skb, INTF_HEADER_LEN);
index cecb27283196150afdcecc56c451fa396456891f..2af263992e83a23ff30bb6558f85204105bb4180 100644 (file)
@@ -51,6 +51,9 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
        rx_info->bss_num = priv->bss_num;
        rx_info->bss_type = priv->bss_type;
 
+       if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+               return mwifiex_process_uap_rx_packet(adapter, skb);
+
        return mwifiex_process_sta_rx_packet(adapter, skb);
 }
 EXPORT_SYMBOL_GPL(mwifiex_handle_rx_packet);
@@ -72,7 +75,11 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
        u8 *head_ptr;
        struct txpd *local_tx_pd = NULL;
 
-       head_ptr = mwifiex_process_sta_txpd(priv, skb);
+       if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+               head_ptr = mwifiex_process_uap_txpd(priv, skb);
+       else
+               head_ptr = mwifiex_process_sta_txpd(priv, skb);
+
        if (head_ptr) {
                if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA)
                        local_tx_pd =
@@ -157,6 +164,8 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
                priv->stats.tx_errors++;
        }
 
+       if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT)
+               atomic_dec_return(&adapter->pending_bridged_pkts);
        if (atomic_dec_return(&adapter->tx_pending) >= LOW_TX_PENDING)
                goto done;
 
index f40e93fe894aca64702219b8223c464a526c92bf..d95a2d558fcfbb2d84950650344c1656db173351 100644 (file)
@@ -167,6 +167,7 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
        if (ht_ie) {
                memcpy(&bss_cfg->ht_cap, ht_ie + 2,
                       sizeof(struct ieee80211_ht_cap));
+               priv->ap_11n_enabled = 1;
        } else {
                memset(&bss_cfg->ht_cap , 0, sizeof(struct ieee80211_ht_cap));
                bss_cfg->ht_cap.cap_info = cpu_to_le16(MWIFIEX_DEF_HT_CAP);
@@ -176,6 +177,25 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
        return;
 }
 
+/* This function finds supported rates IE from beacon parameter and sets
+ * these rates into bss_config structure.
+ */
+void
+mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
+                     struct cfg80211_ap_settings *params)
+{
+       struct ieee_types_header *rate_ie;
+       int var_offset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
+       const u8 *var_pos = params->beacon.head + var_offset;
+       int len = params->beacon.head_len - var_offset;
+
+       rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len);
+       if (rate_ie)
+               memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len);
+
+       return;
+}
+
 /* This function initializes some of mwifiex_uap_bss_param variables.
  * This helps FW in ignoring invalid values. These values may or may not
  * be get updated to valid ones at later stage.
@@ -322,8 +342,11 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
        struct host_cmd_tlv_retry_limit *retry_limit;
        struct host_cmd_tlv_encrypt_protocol *encrypt_protocol;
        struct host_cmd_tlv_auth_type *auth_type;
+       struct host_cmd_tlv_rates *tlv_rates;
+       struct host_cmd_tlv_ageout_timer *ao_timer, *ps_ao_timer;
        struct mwifiex_ie_types_htcap *htcap;
        struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
+       int i;
        u16 cmd_size = *param_size;
 
        if (bss_cfg->ssid.ssid_len) {
@@ -343,7 +366,23 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
                cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid);
                tlv += sizeof(struct host_cmd_tlv_bcast_ssid);
        }
-       if (bss_cfg->channel && bss_cfg->channel <= MAX_CHANNEL_BAND_BG) {
+       if (bss_cfg->rates[0]) {
+               tlv_rates = (struct host_cmd_tlv_rates *)tlv;
+               tlv_rates->tlv.type = cpu_to_le16(TLV_TYPE_UAP_RATES);
+
+               for (i = 0; i < MWIFIEX_SUPPORTED_RATES && bss_cfg->rates[i];
+                    i++)
+                       tlv_rates->rates[i] = bss_cfg->rates[i];
+
+               tlv_rates->tlv.len = cpu_to_le16(i);
+               cmd_size += sizeof(struct host_cmd_tlv_rates) + i;
+               tlv += sizeof(struct host_cmd_tlv_rates) + i;
+       }
+       if (bss_cfg->channel &&
+           ((bss_cfg->band_cfg == BAND_CONFIG_BG &&
+             bss_cfg->channel <= MAX_CHANNEL_BAND_BG) ||
+           (bss_cfg->band_cfg == BAND_CONFIG_A &&
+            bss_cfg->channel <= MAX_CHANNEL_BAND_A))) {
                chan_band = (struct host_cmd_tlv_channel_band *)tlv;
                chan_band->tlv.type = cpu_to_le16(TLV_TYPE_CHANNELBANDLIST);
                chan_band->tlv.len =
@@ -459,6 +498,27 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
                tlv += sizeof(struct mwifiex_ie_types_htcap);
        }
 
+       if (bss_cfg->sta_ao_timer) {
+               ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv;
+               ao_timer->tlv.type = cpu_to_le16(TLV_TYPE_UAP_AO_TIMER);
+               ao_timer->tlv.len = cpu_to_le16(sizeof(*ao_timer) -
+                                               sizeof(struct host_cmd_tlv));
+               ao_timer->sta_ao_timer = cpu_to_le32(bss_cfg->sta_ao_timer);
+               cmd_size += sizeof(*ao_timer);
+               tlv += sizeof(*ao_timer);
+       }
+
+       if (bss_cfg->ps_sta_ao_timer) {
+               ps_ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv;
+               ps_ao_timer->tlv.type = cpu_to_le16(TLV_TYPE_UAP_PS_AO_TIMER);
+               ps_ao_timer->tlv.len = cpu_to_le16(sizeof(*ps_ao_timer) -
+                                                  sizeof(struct host_cmd_tlv));
+               ps_ao_timer->sta_ao_timer =
+                                       cpu_to_le32(bss_cfg->ps_sta_ao_timer);
+               cmd_size += sizeof(*ps_ao_timer);
+               tlv += sizeof(*ps_ao_timer);
+       }
+
        *param_size = cmd_size;
 
        return 0;
diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c
new file mode 100644 (file)
index 0000000..a33fa39
--- /dev/null
@@ -0,0 +1,290 @@
+/*
+ * Marvell Wireless LAN device driver: AP event handling
+ *
+ * Copyright (C) 2012, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License").  You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available by writing to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
+ * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED.  The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#include "decl.h"
+#include "main.h"
+#include "11n.h"
+
+/*
+ * This function will return the pointer to station entry in station list
+ * table which matches specified mac address.
+ * This function should be called after acquiring RA list spinlock.
+ * NULL is returned if station entry is not found in associated STA list.
+ */
+struct mwifiex_sta_node *
+mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac)
+{
+       struct mwifiex_sta_node *node;
+
+       if (!mac)
+               return NULL;
+
+       list_for_each_entry(node, &priv->sta_list, list) {
+               if (!memcmp(node->mac_addr, mac, ETH_ALEN))
+                       return node;
+       }
+
+       return NULL;
+}
+
+/*
+ * This function will add a sta_node entry to associated station list
+ * table with the given mac address.
+ * If entry exist already, existing entry is returned.
+ * If received mac address is NULL, NULL is returned.
+ */
+static struct mwifiex_sta_node *
+mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac)
+{
+       struct mwifiex_sta_node *node;
+       unsigned long flags;
+
+       if (!mac)
+               return NULL;
+
+       spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+       node = mwifiex_get_sta_entry(priv, mac);
+       if (node)
+               goto done;
+
+       node = kzalloc(sizeof(struct mwifiex_sta_node), GFP_ATOMIC);
+       if (!node)
+               goto done;
+
+       memcpy(node->mac_addr, mac, ETH_ALEN);
+       list_add_tail(&node->list, &priv->sta_list);
+
+done:
+       spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+       return node;
+}
+
+/*
+ * This function will search for HT IE in association request IEs
+ * and set station HT parameters accordingly.
+ */
+static void
+mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
+                      int ies_len, struct mwifiex_sta_node *node)
+{
+       const struct ieee80211_ht_cap *ht_cap;
+
+       if (!ies)
+               return;
+
+       ht_cap = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len);
+       if (ht_cap) {
+               node->is_11n_enabled = 1;
+               node->max_amsdu = le16_to_cpu(ht_cap->cap_info) &
+                                 IEEE80211_HT_CAP_MAX_AMSDU ?
+                                 MWIFIEX_TX_DATA_BUF_SIZE_8K :
+                                 MWIFIEX_TX_DATA_BUF_SIZE_4K;
+       } else {
+               node->is_11n_enabled = 0;
+       }
+
+       return;
+}
+
+/*
+ * This function will delete a station entry from station list
+ */
+static void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac)
+{
+       struct mwifiex_sta_node *node, *tmp;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+
+       node = mwifiex_get_sta_entry(priv, mac);
+       if (node) {
+               list_for_each_entry_safe(node, tmp, &priv->sta_list,
+                                        list) {
+                       list_del(&node->list);
+                       kfree(node);
+               }
+       }
+
+       spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+       return;
+}
+
+/*
+ * This function will delete all stations from associated station list.
+ */
+static void mwifiex_del_all_sta_list(struct mwifiex_private *priv)
+{
+       struct mwifiex_sta_node *node, *tmp;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+
+       list_for_each_entry_safe(node, tmp, &priv->sta_list, list) {
+               list_del(&node->list);
+               kfree(node);
+       }
+
+       INIT_LIST_HEAD(&priv->sta_list);
+       spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+       return;
+}
+
+/*
+ * This function handles AP interface specific events generated by firmware.
+ *
+ * Event specific routines are called by this function based
+ * upon the generated event cause.
+ *
+ *
+ * Events supported for AP -
+ *      - EVENT_UAP_STA_ASSOC
+ *      - EVENT_UAP_STA_DEAUTH
+ *      - EVENT_UAP_BSS_ACTIVE
+ *      - EVENT_UAP_BSS_START
+ *      - EVENT_UAP_BSS_IDLE
+ *      - EVENT_UAP_MIC_COUNTERMEASURES:
+ */
+int mwifiex_process_uap_event(struct mwifiex_private *priv)
+{
+       struct mwifiex_adapter *adapter = priv->adapter;
+       int len, i;
+       u32 eventcause = adapter->event_cause;
+       struct station_info sinfo;
+       struct mwifiex_assoc_event *event;
+       struct mwifiex_sta_node *node;
+       u8 *deauth_mac;
+       struct host_cmd_ds_11n_batimeout *ba_timeout;
+       u16 ctrl;
+
+       switch (eventcause) {
+       case EVENT_UAP_STA_ASSOC:
+               memset(&sinfo, 0, sizeof(sinfo));
+               event = (struct mwifiex_assoc_event *)
+                       (adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER);
+               if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) {
+                       len = -1;
+
+                       if (ieee80211_is_assoc_req(event->frame_control))
+                               len = 0;
+                       else if (ieee80211_is_reassoc_req(event->frame_control))
+                               /* There will be ETH_ALEN bytes of
+                                * current_ap_addr before the re-assoc ies.
+                                */
+                               len = ETH_ALEN;
+
+                       if (len != -1) {
+                               sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
+                               sinfo.assoc_req_ies = &event->data[len];
+                               len = (u8 *)sinfo.assoc_req_ies -
+                                     (u8 *)&event->frame_control;
+                               sinfo.assoc_req_ies_len =
+                                       le16_to_cpu(event->len) - (u16)len;
+                       }
+               }
+               cfg80211_new_sta(priv->netdev, event->sta_addr, &sinfo,
+                                GFP_KERNEL);
+
+               node = mwifiex_add_sta_entry(priv, event->sta_addr);
+               if (!node) {
+                       dev_warn(adapter->dev,
+                                "could not create station entry!\n");
+                       return -1;
+               }
+
+               if (!priv->ap_11n_enabled)
+                       break;
+
+               mwifiex_set_sta_ht_cap(priv, sinfo.assoc_req_ies,
+                                      sinfo.assoc_req_ies_len, node);
+
+               for (i = 0; i < MAX_NUM_TID; i++) {
+                       if (node->is_11n_enabled)
+                               node->ampdu_sta[i] =
+                                             priv->aggr_prio_tbl[i].ampdu_user;
+                       else
+                               node->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED;
+               }
+               memset(node->rx_seq, 0xff, sizeof(node->rx_seq));
+               break;
+       case EVENT_UAP_STA_DEAUTH:
+               deauth_mac = adapter->event_body +
+                            MWIFIEX_UAP_EVENT_EXTRA_HEADER;
+               cfg80211_del_sta(priv->netdev, deauth_mac, GFP_KERNEL);
+
+               if (priv->ap_11n_enabled) {
+                       mwifiex_11n_del_rx_reorder_tbl_by_ta(priv, deauth_mac);
+                       mwifiex_del_tx_ba_stream_tbl_by_ra(priv, deauth_mac);
+               }
+               mwifiex_del_sta_entry(priv, deauth_mac);
+               break;
+       case EVENT_UAP_BSS_IDLE:
+               priv->media_connected = false;
+               mwifiex_clean_txrx(priv);
+               mwifiex_del_all_sta_list(priv);
+               break;
+       case EVENT_UAP_BSS_ACTIVE:
+               priv->media_connected = true;
+               break;
+       case EVENT_UAP_BSS_START:
+               dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
+               memcpy(priv->netdev->dev_addr, adapter->event_body + 2,
+                      ETH_ALEN);
+               break;
+       case EVENT_UAP_MIC_COUNTERMEASURES:
+               /* For future development */
+               dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
+               break;
+       case EVENT_AMSDU_AGGR_CTRL:
+               ctrl = le16_to_cpu(*(__le16 *)adapter->event_body);
+               dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl);
+
+               if (priv->media_connected) {
+                       adapter->tx_buf_size =
+                               min_t(u16, adapter->curr_tx_buf_size, ctrl);
+                       dev_dbg(adapter->dev, "event: tx_buf_size %d\n",
+                               adapter->tx_buf_size);
+               }
+               break;
+       case EVENT_ADDBA:
+               dev_dbg(adapter->dev, "event: ADDBA Request\n");
+               if (priv->media_connected)
+                       mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_ADDBA_RSP,
+                                              HostCmd_ACT_GEN_SET, 0,
+                                              adapter->event_body);
+               break;
+       case EVENT_DELBA:
+               dev_dbg(adapter->dev, "event: DELBA Request\n");
+               if (priv->media_connected)
+                       mwifiex_11n_delete_ba_stream(priv, adapter->event_body);
+               break;
+       case EVENT_BA_STREAM_TIEMOUT:
+               dev_dbg(adapter->dev, "event:  BA Stream timeout\n");
+               if (priv->media_connected) {
+                       ba_timeout = (void *)adapter->event_body;
+                       mwifiex_11n_ba_stream_timeout(priv, ba_timeout);
+               }
+               break;
+       default:
+               dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
+                       eventcause);
+               break;
+       }
+
+       return 0;
+}
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
new file mode 100644 (file)
index 0000000..0966ac2
--- /dev/null
@@ -0,0 +1,340 @@
+/*
+ * Marvell Wireless LAN device driver: AP TX and RX data handling
+ *
+ * Copyright (C) 2012, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License").  You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available by writing to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
+ * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED.  The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#include "decl.h"
+#include "ioctl.h"
+#include "main.h"
+#include "wmm.h"
+#include "11n_aggr.h"
+#include "11n_rxreorder.h"
+
+static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
+                                        struct sk_buff *skb)
+{
+       struct mwifiex_adapter *adapter = priv->adapter;
+       struct uap_rxpd *uap_rx_pd;
+       struct rx_packet_hdr *rx_pkt_hdr;
+       struct sk_buff *new_skb;
+       struct mwifiex_txinfo *tx_info;
+       int hdr_chop;
+       struct timeval tv;
+       u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
+
+       uap_rx_pd = (struct uap_rxpd *)(skb->data);
+       rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
+
+       if ((atomic_read(&adapter->pending_bridged_pkts) >=
+                                            MWIFIEX_BRIDGED_PKTS_THRESHOLD)) {
+               dev_err(priv->adapter->dev,
+                       "Tx: Bridge packet limit reached. Drop packet!\n");
+               kfree_skb(skb);
+               return;
+       }
+
+       if (!memcmp(&rx_pkt_hdr->rfc1042_hdr,
+                   rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr)))
+               /* Chop off the rxpd + the excess memory from
+                * 802.2/llc/snap header that was removed.
+                */
+               hdr_chop = (u8 *)eth_hdr - (u8 *)uap_rx_pd;
+       else
+               /* Chop off the rxpd */
+               hdr_chop = (u8 *)&rx_pkt_hdr->eth803_hdr - (u8 *)uap_rx_pd;
+
+       /* Chop off the leading header bytes so the it points
+        * to the start of either the reconstructed EthII frame
+        * or the 802.2/llc/snap frame.
+        */
+       skb_pull(skb, hdr_chop);
+
+       if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) {
+               dev_dbg(priv->adapter->dev,
+                       "data: Tx: insufficient skb headroom %d\n",
+                       skb_headroom(skb));
+               /* Insufficient skb headroom - allocate a new skb */
+               new_skb =
+                       skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
+               if (unlikely(!new_skb)) {
+                       dev_err(priv->adapter->dev,
+                               "Tx: cannot allocate new_skb\n");
+                       kfree_skb(skb);
+                       priv->stats.tx_dropped++;
+                       return;
+               }
+
+               kfree_skb(skb);
+               skb = new_skb;
+               dev_dbg(priv->adapter->dev, "info: new skb headroom %d\n",
+                       skb_headroom(skb));
+       }
+
+       tx_info = MWIFIEX_SKB_TXCB(skb);
+       tx_info->bss_num = priv->bss_num;
+       tx_info->bss_type = priv->bss_type;
+       tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;
+
+       do_gettimeofday(&tv);
+       skb->tstamp = timeval_to_ktime(tv);
+       mwifiex_wmm_add_buf_txqueue(priv, skb);
+       atomic_inc(&adapter->tx_pending);
+       atomic_inc(&adapter->pending_bridged_pkts);
+
+       if ((atomic_read(&adapter->tx_pending) >= MAX_TX_PENDING)) {
+               mwifiex_set_trans_start(priv->netdev);
+               mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
+       }
+       return;
+}
+
+/*
+ * This function contains logic for AP packet forwarding.
+ *
+ * If a packet is multicast/broadcast, it is sent to kernel/upper layer
+ * as well as queued back to AP TX queue so that it can be sent to other
+ * associated stations.
+ * If a packet is unicast and RA is present in associated station list,
+ * it is again requeued into AP TX queue.
+ * If a packet is unicast and RA is not in associated station list,
+ * packet is forwarded to kernel to handle routing logic.
+ */
+int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
+                                 struct sk_buff *skb)
+{
+       struct mwifiex_adapter *adapter = priv->adapter;
+       struct uap_rxpd *uap_rx_pd;
+       struct rx_packet_hdr *rx_pkt_hdr;
+       u8 ra[ETH_ALEN];
+       struct sk_buff *skb_uap;
+
+       uap_rx_pd = (struct uap_rxpd *)(skb->data);
+       rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
+
+       /* don't do packet forwarding in disconnected state */
+       if (!priv->media_connected) {
+               dev_err(adapter->dev, "drop packet in disconnected state.\n");
+               dev_kfree_skb_any(skb);
+               return 0;
+       }
+
+       memcpy(ra, rx_pkt_hdr->eth803_hdr.h_dest, ETH_ALEN);
+
+       if (is_multicast_ether_addr(ra)) {
+               skb_uap = skb_copy(skb, GFP_ATOMIC);
+               mwifiex_uap_queue_bridged_pkt(priv, skb_uap);
+       } else {
+               if (mwifiex_get_sta_entry(priv, ra)) {
+                       /* Requeue Intra-BSS packet */
+                       mwifiex_uap_queue_bridged_pkt(priv, skb);
+                       return 0;
+               }
+       }
+
+       /* Forward unicat/Inter-BSS packets to kernel. */
+       return mwifiex_process_rx_packet(adapter, skb);
+}
+
+/*
+ * This function processes the packet received on AP interface.
+ *
+ * The function looks into the RxPD and performs sanity tests on the
+ * received buffer to ensure its a valid packet before processing it
+ * further. If the packet is determined to be aggregated, it is
+ * de-aggregated accordingly. Then skb is passed to AP packet forwarding logic.
+ *
+ * The completion callback is called after processing is complete.
+ */
+int mwifiex_process_uap_rx_packet(struct mwifiex_adapter *adapter,
+                                 struct sk_buff *skb)
+{
+       int ret;
+       struct uap_rxpd *uap_rx_pd;
+       struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
+       struct rx_packet_hdr *rx_pkt_hdr;
+       u16 rx_pkt_type;
+       u8 ta[ETH_ALEN], pkt_type;
+       struct mwifiex_sta_node *node;
+
+       struct mwifiex_private *priv =
+                       mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
+                                              rx_info->bss_type);
+
+       if (!priv)
+               return -1;
+
+       uap_rx_pd = (struct uap_rxpd *)(skb->data);
+       rx_pkt_type = le16_to_cpu(uap_rx_pd->rx_pkt_type);
+       rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
+
+       if ((le16_to_cpu(uap_rx_pd->rx_pkt_offset) +
+            le16_to_cpu(uap_rx_pd->rx_pkt_length)) > (u16) skb->len) {
+               dev_err(adapter->dev,
+                       "wrong rx packet: len=%d, offset=%d, length=%d\n",
+                       skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset),
+                       le16_to_cpu(uap_rx_pd->rx_pkt_length));
+               priv->stats.rx_dropped++;
+
+               if (adapter->if_ops.data_complete)
+                       adapter->if_ops.data_complete(adapter, skb);
+               else
+                       dev_kfree_skb_any(skb);
+
+               return 0;
+       }
+
+       if (le16_to_cpu(uap_rx_pd->rx_pkt_type) == PKT_TYPE_AMSDU) {
+               struct sk_buff_head list;
+               struct sk_buff *rx_skb;
+
+               __skb_queue_head_init(&list);
+               skb_pull(skb, le16_to_cpu(uap_rx_pd->rx_pkt_offset));
+               skb_trim(skb, le16_to_cpu(uap_rx_pd->rx_pkt_length));
+
+               ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
+                                        priv->wdev->iftype, 0, false);
+
+               while (!skb_queue_empty(&list)) {
+                       rx_skb = __skb_dequeue(&list);
+                       ret = mwifiex_recv_packet(adapter, rx_skb);
+                       if (ret)
+                               dev_err(adapter->dev,
+                                       "AP:Rx A-MSDU failed");
+               }
+
+               return 0;
+       } else if (rx_pkt_type == PKT_TYPE_MGMT) {
+               ret = mwifiex_process_mgmt_packet(adapter, skb);
+               if (ret)
+                       dev_err(adapter->dev, "Rx of mgmt packet failed");
+               dev_kfree_skb_any(skb);
+               return ret;
+       }
+
+       memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN);
+
+       if (rx_pkt_type != PKT_TYPE_BAR && uap_rx_pd->priority < MAX_NUM_TID) {
+               node = mwifiex_get_sta_entry(priv, ta);
+               if (node)
+                       node->rx_seq[uap_rx_pd->priority] =
+                                               le16_to_cpu(uap_rx_pd->seq_num);
+       }
+
+       if (!priv->ap_11n_enabled ||
+           (!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
+           (le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
+               ret = mwifiex_handle_uap_rx_forward(priv, skb);
+               return ret;
+       }
+
+       /* Reorder and send to kernel */
+       pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
+       ret = mwifiex_11n_rx_reorder_pkt(priv, le16_to_cpu(uap_rx_pd->seq_num),
+                                        uap_rx_pd->priority, ta, pkt_type,
+                                        skb);
+
+       if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
+               if (adapter->if_ops.data_complete)
+                       adapter->if_ops.data_complete(adapter, skb);
+               else
+                       dev_kfree_skb_any(skb);
+       }
+
+       if (ret)
+               priv->stats.rx_dropped++;
+
+       return ret;
+}
+
+/*
+ * This function fills the TxPD for AP tx packets.
+ *
+ * The Tx buffer received by this function should already have the
+ * header space allocated for TxPD.
+ *
+ * This function inserts the TxPD in between interface header and actual
+ * data and adjusts the buffer pointers accordingly.
+ *
+ * The following TxPD fields are set by this function, as required -
+ *      - BSS number
+ *      - Tx packet length and offset
+ *      - Priority
+ *      - Packet delay
+ *      - Priority specific Tx control
+ *      - Flags
+ */
+void *mwifiex_process_uap_txpd(struct mwifiex_private *priv,
+                              struct sk_buff *skb)
+{
+       struct mwifiex_adapter *adapter = priv->adapter;
+       struct uap_txpd *txpd;
+       struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
+       int pad, len;
+       u16 pkt_type;
+
+       if (!skb->len) {
+               dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len);
+               tx_info->status_code = -1;
+               return skb->data;
+       }
+
+       pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0;
+
+       /* If skb->data is not aligned, add padding */
+       pad = (4 - (((void *)skb->data - NULL) & 0x3)) % 4;
+
+       len = sizeof(*txpd) + pad;
+
+       BUG_ON(skb_headroom(skb) < len + INTF_HEADER_LEN);
+
+       skb_push(skb, len);
+
+       txpd = (struct uap_txpd *)skb->data;
+       memset(txpd, 0, sizeof(*txpd));
+       txpd->bss_num = priv->bss_num;
+       txpd->bss_type = priv->bss_type;
+       txpd->tx_pkt_length = cpu_to_le16((u16)(skb->len - len));
+
+       txpd->priority = (u8)skb->priority;
+       txpd->pkt_delay_2ms = mwifiex_wmm_compute_drv_pkt_delay(priv, skb);
+
+       if (txpd->priority < ARRAY_SIZE(priv->wmm.user_pri_pkt_tx_ctrl))
+               /*
+                * Set the priority specific tx_control field, setting of 0 will
+                * cause the default value to be used later in this function.
+                */
+               txpd->tx_control =
+                   cpu_to_le32(priv->wmm.user_pri_pkt_tx_ctrl[txpd->priority]);
+
+       /* Offset of actual data */
+       if (pkt_type == PKT_TYPE_MGMT) {
+               /* Set the packet type and add header for management frame */
+               txpd->tx_pkt_type = cpu_to_le16(pkt_type);
+               len += MWIFIEX_MGMT_FRAME_HEADER_SIZE;
+       }
+
+       txpd->tx_pkt_offset = cpu_to_le16(len);
+
+       /* make space for INTF_HEADER_LEN */
+       skb_push(skb, INTF_HEADER_LEN);
+
+       if (!txpd->tx_control)
+               /* TxCtrl set by user or default */
+               txpd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl);
+
+       return skb->data;
+}
index 2864c74bdb6fc8b264815a57c039564fe9cbf0a0..ae88f80cf86b966238dff0544fa97ff7ca83e168 100644 (file)
@@ -141,6 +141,46 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
        return 0;
 }
 
+/*
+ * This function processes the received management packet and send it
+ * to the kernel.
+ */
+int
+mwifiex_process_mgmt_packet(struct mwifiex_adapter *adapter,
+                           struct sk_buff *skb)
+{
+       struct rxpd *rx_pd;
+       struct mwifiex_private *priv;
+       u16 pkt_len;
+
+       if (!skb)
+               return -1;
+
+       rx_pd = (struct rxpd *)skb->data;
+       priv = mwifiex_get_priv_by_id(adapter, rx_pd->bss_num, rx_pd->bss_type);
+       if (!priv)
+               return -1;
+
+       skb_pull(skb, le16_to_cpu(rx_pd->rx_pkt_offset));
+       skb_pull(skb, sizeof(pkt_len));
+
+       pkt_len = le16_to_cpu(rx_pd->rx_pkt_length);
+
+       /* Remove address4 */
+       memmove(skb->data + sizeof(struct ieee80211_hdr_3addr),
+               skb->data + sizeof(struct ieee80211_hdr),
+               pkt_len - sizeof(struct ieee80211_hdr));
+
+       pkt_len -= ETH_ALEN + sizeof(pkt_len);
+       rx_pd->rx_pkt_length = cpu_to_le16(pkt_len);
+
+       cfg80211_rx_mgmt(priv->wdev, priv->roc_cfg.chan.center_freq,
+                        CAL_RSSI(rx_pd->snr, rx_pd->nf),
+                        skb->data, pkt_len, GFP_ATOMIC);
+
+       return 0;
+}
+
 /*
  * This function processes the received packet before sending it to the
  * kernel.
index 3fa4d417699381225e853a56238e0d8506a2f99b..600d8194610e3b0c3c7d6c87a0400a5b290514a4 100644 (file)
@@ -127,6 +127,29 @@ mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, u8 *ra)
        return ra_list;
 }
 
+/* This function returns random no between 16 and 32 to be used as threshold
+ * for no of packets after which BA setup is initiated.
+ */
+static u8 mwifiex_get_random_ba_threshold(void)
+{
+       u32 sec, usec;
+       struct timeval ba_tstamp;
+       u8 ba_threshold;
+
+       /* setup ba_packet_threshold here random number between
+        * [BA_SETUP_PACKET_OFFSET,
+        * BA_SETUP_PACKET_OFFSET+BA_SETUP_MAX_PACKET_THRESHOLD-1]
+        */
+
+       do_gettimeofday(&ba_tstamp);
+       sec = (ba_tstamp.tv_sec & 0xFFFF) + (ba_tstamp.tv_sec >> 16);
+       usec = (ba_tstamp.tv_usec & 0xFFFF) + (ba_tstamp.tv_usec >> 16);
+       ba_threshold = (((sec << 16) + usec) % BA_SETUP_MAX_PACKET_THRESHOLD)
+                                                     + BA_SETUP_PACKET_OFFSET;
+
+       return ba_threshold;
+}
+
 /*
  * This function allocates and adds a RA list for all TIDs
  * with the given RA.
@@ -137,6 +160,12 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
        int i;
        struct mwifiex_ra_list_tbl *ra_list;
        struct mwifiex_adapter *adapter = priv->adapter;
+       struct mwifiex_sta_node *node;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+       node = mwifiex_get_sta_entry(priv, ra);
+       spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
 
        for (i = 0; i < MAX_NUM_TID; ++i) {
                ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra);
@@ -145,14 +174,24 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
                if (!ra_list)
                        break;
 
-               if (!mwifiex_queuing_ra_based(priv))
+               ra_list->is_11n_enabled = 0;
+               if (!mwifiex_queuing_ra_based(priv)) {
                        ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
-               else
-                       ra_list->is_11n_enabled = false;
+               } else {
+                       ra_list->is_11n_enabled =
+                                     mwifiex_is_sta_11n_enabled(priv, node);
+                       if (ra_list->is_11n_enabled)
+                               ra_list->max_amsdu = node->max_amsdu;
+               }
 
                dev_dbg(adapter->dev, "data: ralist %p: is_11n_enabled=%d\n",
                        ra_list, ra_list->is_11n_enabled);
 
+               if (ra_list->is_11n_enabled) {
+                       ra_list->pkt_count = 0;
+                       ra_list->ba_packet_thr =
+                                             mwifiex_get_random_ba_threshold();
+               }
                list_add_tail(&ra_list->list,
                              &priv->wmm.tid_tbl_ptr[i].ra_list);
 
@@ -423,7 +462,7 @@ mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
        for (i = 0; i < adapter->priv_num; ++i) {
                priv = adapter->priv[i];
                if (priv && atomic_read(&priv->wmm.tx_pkts_queued))
-                               return false;
+                       return false;
        }
 
        return true;
@@ -609,7 +648,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
        u8 ra[ETH_ALEN], tid_down;
        unsigned long flags;
 
-       if (!priv->media_connected) {
+       if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) {
                dev_dbg(adapter->dev, "data: drop packet in disconnect\n");
                mwifiex_write_data_complete(adapter, skb, -1);
                return;
@@ -624,7 +663,8 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
        /* In case of infra as we have already created the list during
           association we just don't have to call get_queue_raptr, we will
           have only 1 raptr for a tid in case of infra */
-       if (!mwifiex_queuing_ra_based(priv)) {
+       if (!mwifiex_queuing_ra_based(priv) &&
+           !mwifiex_is_skb_mgmt_frame(skb)) {
                if (!list_empty(&priv->wmm.tid_tbl_ptr[tid_down].ra_list))
                        ra_list = list_first_entry(
                                &priv->wmm.tid_tbl_ptr[tid_down].ra_list,
@@ -633,7 +673,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
                        ra_list = NULL;
        } else {
                memcpy(ra, skb->data, ETH_ALEN);
-               if (ra[0] & 0x01)
+               if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb))
                        memset(ra, 0xff, ETH_ALEN);
                ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra);
        }
@@ -647,6 +687,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
        skb_queue_tail(&ra_list->skb_head, skb);
 
        ra_list->total_pkts_size += skb->len;
+       ra_list->pkt_count++;
 
        atomic_inc(&priv->wmm.tx_pkts_queued);
 
@@ -867,17 +908,16 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
                if (adapter->bss_prio_tbl[j].bss_prio_cur ==
                    (struct mwifiex_bss_prio_node *)
                    &adapter->bss_prio_tbl[j].bss_prio_head) {
-                       bssprio_node =
+                       adapter->bss_prio_tbl[j].bss_prio_cur =
                                list_first_entry(&adapter->bss_prio_tbl[j]
                                                 .bss_prio_head,
                                                 struct mwifiex_bss_prio_node,
                                                 list);
-                       bssprio_head = bssprio_node;
-               } else {
-                       bssprio_node = adapter->bss_prio_tbl[j].bss_prio_cur;
-                       bssprio_head = bssprio_node;
                }
 
+               bssprio_node = adapter->bss_prio_tbl[j].bss_prio_cur;
+               bssprio_head = bssprio_node;
+
                do {
                        priv_tmp = bssprio_node->priv;
                        hqp = &priv_tmp->wmm.highest_queued_prio;
@@ -986,10 +1026,17 @@ mwifiex_is_11n_aggragation_possible(struct mwifiex_private *priv,
 {
        int count = 0, total_size = 0;
        struct sk_buff *skb, *tmp;
+       int max_amsdu_size;
+
+       if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP && priv->ap_11n_enabled &&
+           ptr->is_11n_enabled)
+               max_amsdu_size = min_t(int, ptr->max_amsdu, max_buf_size);
+       else
+               max_amsdu_size = max_buf_size;
 
        skb_queue_walk_safe(&ptr->skb_head, skb, tmp) {
                total_size += skb->len;
-               if (total_size >= max_buf_size)
+               if (total_size >= max_amsdu_size)
                        break;
                if (++count >= MIN_NUM_AMSDU)
                        return true;
@@ -1050,6 +1097,7 @@ mwifiex_send_single_packet(struct mwifiex_private *priv,
                skb_queue_tail(&ptr->skb_head, skb);
 
                ptr->total_pkts_size += skb->len;
+               ptr->pkt_count++;
                tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
                spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
                                       ra_list_flags);
@@ -1231,7 +1279,8 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
                /* ra_list_spinlock has been freed in
                   mwifiex_send_single_packet() */
        } else {
-               if (mwifiex_is_ampdu_allowed(priv, tid)) {
+               if (mwifiex_is_ampdu_allowed(priv, tid) &&
+                   ptr->pkt_count > ptr->ba_packet_thr) {
                        if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
                                mwifiex_create_ba_tbl(priv, ptr->ra, tid,
                                                      BA_SETUP_INPROGRESS);
index 224e03ade145cbbcfbd597095948c70d2a1e9beb..5099e5375cb39ed8e22a7382f6a42bc939c47fb7 100644 (file)
@@ -1830,12 +1830,14 @@ static inline void mwl8k_tx_count_packet(struct ieee80211_sta *sta, u8 tid)
 }
 
 static void
-mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
+mwl8k_txq_xmit(struct ieee80211_hw *hw,
+              int index,
+              struct ieee80211_sta *sta,
+              struct sk_buff *skb)
 {
        struct mwl8k_priv *priv = hw->priv;
        struct ieee80211_tx_info *tx_info;
        struct mwl8k_vif *mwl8k_vif;
-       struct ieee80211_sta *sta;
        struct ieee80211_hdr *wh;
        struct mwl8k_tx_queue *txq;
        struct mwl8k_tx_desc *tx;
@@ -1867,7 +1869,6 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
        wh = &((struct mwl8k_dma_data *)skb->data)->wh;
 
        tx_info = IEEE80211_SKB_CB(skb);
-       sta = tx_info->control.sta;
        mwl8k_vif = MWL8K_VIF(tx_info->control.vif);
 
        if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
@@ -2019,8 +2020,8 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
        tx->pkt_phys_addr = cpu_to_le32(dma);
        tx->pkt_len = cpu_to_le16(skb->len);
        tx->rate_info = 0;
-       if (!priv->ap_fw && tx_info->control.sta != NULL)
-               tx->peer_id = MWL8K_STA(tx_info->control.sta)->peer_id;
+       if (!priv->ap_fw && sta != NULL)
+               tx->peer_id = MWL8K_STA(sta)->peer_id;
        else
                tx->peer_id = 0;
 
@@ -4364,7 +4365,9 @@ static void mwl8k_rx_poll(unsigned long data)
 /*
  * Core driver operations.
  */
-static void mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void mwl8k_tx(struct ieee80211_hw *hw,
+                    struct ieee80211_tx_control *control,
+                    struct sk_buff *skb)
 {
        struct mwl8k_priv *priv = hw->priv;
        int index = skb_get_queue_mapping(skb);
@@ -4376,7 +4379,7 @@ static void mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
                return;
        }
 
-       mwl8k_txq_xmit(hw, index, skb);
+       mwl8k_txq_xmit(hw, index, control->sta, skb);
 }
 
 static int mwl8k_start(struct ieee80211_hw *hw)
index 33747e131a968e19f409de68e4edc92b6b2063e5..3b5508f982e80b8376d6bcb20c63dae6fd5586e0 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/if_arp.h>
 #include <linux/wireless.h>
 #include <linux/ieee80211.h>
+#include <linux/etherdevice.h>
 #include <net/iw_handler.h>
 #include <net/cfg80211.h>
 #include <net/cfg80211-wext.h>
@@ -159,15 +160,13 @@ static int orinoco_ioctl_setwap(struct net_device *dev,
        struct orinoco_private *priv = ndev_priv(dev);
        int err = -EINPROGRESS;         /* Call commit handler */
        unsigned long flags;
-       static const u8 off_addr[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
-       static const u8 any_addr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
 
        if (orinoco_lock(priv, &flags) != 0)
                return -EBUSY;
 
        /* Enable automatic roaming - no sanity checks are needed */
-       if (memcmp(&ap_addr->sa_data, off_addr, ETH_ALEN) == 0 ||
-           memcmp(&ap_addr->sa_data, any_addr, ETH_ALEN) == 0) {
+       if (is_zero_ether_addr(ap_addr->sa_data) ||
+           is_broadcast_ether_addr(ap_addr->sa_data)) {
                priv->bssid_fixed = 0;
                memset(priv->desired_bssid, 0, ETH_ALEN);
 
index 14037092ba89da99ddf5481a56f730ac6866aa25..1ef1bfe6a9d7845822cef3413e680de03c2774c9 100644 (file)
@@ -76,6 +76,7 @@ struct p54_channel_entry {
        u16 freq;
        u16 data;
        int index;
+       int max_power;
        enum ieee80211_band band;
 };
 
@@ -173,6 +174,7 @@ static int p54_generate_band(struct ieee80211_hw *dev,
        for (i = 0, j = 0; (j < list->band_channel_num[band]) &&
                           (i < list->entries); i++) {
                struct p54_channel_entry *chan = &list->channels[i];
+               struct ieee80211_channel *dest = &tmp->channels[j];
 
                if (chan->band != band)
                        continue;
@@ -190,14 +192,15 @@ static int p54_generate_band(struct ieee80211_hw *dev,
                        continue;
                }
 
-               tmp->channels[j].band = chan->band;
-               tmp->channels[j].center_freq = chan->freq;
+               dest->band = chan->band;
+               dest->center_freq = chan->freq;
+               dest->max_power = chan->max_power;
                priv->survey[*chan_num].channel = &tmp->channels[j];
                priv->survey[*chan_num].filled = SURVEY_INFO_NOISE_DBM |
                        SURVEY_INFO_CHANNEL_TIME |
                        SURVEY_INFO_CHANNEL_TIME_BUSY |
                        SURVEY_INFO_CHANNEL_TIME_TX;
-               tmp->channels[j].hw_value = (*chan_num);
+               dest->hw_value = (*chan_num);
                j++;
                (*chan_num)++;
        }
@@ -229,10 +232,11 @@ err_out:
        return ret;
 }
 
-static void p54_update_channel_param(struct p54_channel_list *list,
-                                    u16 freq, u16 data)
+static struct p54_channel_entry *p54_update_channel_param(struct p54_channel_list *list,
+                                                         u16 freq, u16 data)
 {
-       int band, i;
+       int i;
+       struct p54_channel_entry *entry = NULL;
 
        /*
         * usually all lists in the eeprom are mostly sorted.
@@ -241,30 +245,78 @@ static void p54_update_channel_param(struct p54_channel_list *list,
         */
        for (i = list->entries; i >= 0; i--) {
                if (freq == list->channels[i].freq) {
-                       list->channels[i].data |= data;
+                       entry = &list->channels[i];
                        break;
                }
        }
 
        if ((i < 0) && (list->entries < list->max_entries)) {
                /* entry does not exist yet. Initialize a new one. */
-               band = p54_get_band_from_freq(freq);
+               int band = p54_get_band_from_freq(freq);
 
                /*
                 * filter out frequencies which don't belong into
                 * any supported band.
                 */
-               if (band < 0)
-                       return ;
+               if (band >= 0) {
+                       i = list->entries++;
+                       list->band_channel_num[band]++;
+
+                       entry = &list->channels[i];
+                       entry->freq = freq;
+                       entry->band = band;
+                       entry->index = ieee80211_frequency_to_channel(freq);
+                       entry->max_power = 0;
+                       entry->data = 0;
+               }
+       }
 
-               i = list->entries++;
-               list->band_channel_num[band]++;
+       if (entry)
+               entry->data |= data;
 
-               list->channels[i].freq = freq;
-               list->channels[i].data = data;
-               list->channels[i].band = band;
-               list->channels[i].index = ieee80211_frequency_to_channel(freq);
-               /* TODO: parse output_limit and fill max_power */
+       return entry;
+}
+
+static int p54_get_maxpower(struct p54_common *priv, void *data)
+{
+       switch (priv->rxhw & PDR_SYNTH_FRONTEND_MASK) {
+       case PDR_SYNTH_FRONTEND_LONGBOW: {
+               struct pda_channel_output_limit_longbow *pda = data;
+               int j;
+               u16 rawpower = 0;
+               pda = data;
+               for (j = 0; j < ARRAY_SIZE(pda->point); j++) {
+                       struct pda_channel_output_limit_point_longbow *point =
+                               &pda->point[j];
+                       rawpower = max_t(u16,
+                               rawpower, le16_to_cpu(point->val_qpsk));
+                       rawpower = max_t(u16,
+                               rawpower, le16_to_cpu(point->val_bpsk));
+                       rawpower = max_t(u16,
+                               rawpower, le16_to_cpu(point->val_16qam));
+                       rawpower = max_t(u16,
+                               rawpower, le16_to_cpu(point->val_64qam));
+               }
+               /* longbow seems to use 1/16 dBm units */
+               return rawpower / 16;
+               }
+
+       case PDR_SYNTH_FRONTEND_DUETTE3:
+       case PDR_SYNTH_FRONTEND_DUETTE2:
+       case PDR_SYNTH_FRONTEND_FRISBEE:
+       case PDR_SYNTH_FRONTEND_XBOW: {
+               struct pda_channel_output_limit *pda = data;
+               u8 rawpower = 0;
+               rawpower = max(rawpower, pda->val_qpsk);
+               rawpower = max(rawpower, pda->val_bpsk);
+               rawpower = max(rawpower, pda->val_16qam);
+               rawpower = max(rawpower, pda->val_64qam);
+               /* raw values are in 1/4 dBm units */
+               return rawpower / 4;
+               }
+
+       default:
+               return 20;
        }
 }
 
@@ -315,12 +367,19 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
                }
 
                if (i < priv->output_limit->entries) {
-                       freq = le16_to_cpup((__le16 *) (i *
-                                           priv->output_limit->entry_size +
-                                           priv->output_limit->offset +
-                                           priv->output_limit->data));
-
-                       p54_update_channel_param(list, freq, CHAN_HAS_LIMIT);
+                       struct p54_channel_entry *tmp;
+
+                       void *data = (void *) ((unsigned long) i *
+                               priv->output_limit->entry_size +
+                               priv->output_limit->offset +
+                               priv->output_limit->data);
+
+                       freq = le16_to_cpup((__le16 *) data);
+                       tmp = p54_update_channel_param(list, freq,
+                                                      CHAN_HAS_LIMIT);
+                       if (tmp) {
+                               tmp->max_power = p54_get_maxpower(priv, data);
+                       }
                }
 
                if (i < priv->curve_data->entries) {
@@ -834,11 +893,12 @@ good_eeprom:
                goto err;
        }
 
+       priv->rxhw = synth & PDR_SYNTH_FRONTEND_MASK;
+
        err = p54_generate_channel_lists(dev);
        if (err)
                goto err;
 
-       priv->rxhw = synth & PDR_SYNTH_FRONTEND_MASK;
        if (priv->rxhw == PDR_SYNTH_FRONTEND_XBOW)
                p54_init_xbow_synth(priv);
        if (!(synth & PDR_SYNTH_24_GHZ_DISABLED))
index afde72b8460652dfa1fa3a475b1d35d9f50a04c4..20ebe39a3f4e8714d49cf8ff58478f87f6e4dbde 100644 (file)
@@ -57,6 +57,18 @@ struct pda_channel_output_limit {
        u8 rate_set_size;
 } __packed;
 
+struct pda_channel_output_limit_point_longbow {
+       __le16 val_bpsk;
+       __le16 val_qpsk;
+       __le16 val_16qam;
+       __le16 val_64qam;
+} __packed;
+
+struct pda_channel_output_limit_longbow {
+       __le16 freq;
+       struct pda_channel_output_limit_point_longbow point[3];
+} __packed;
+
 struct pda_pa_curve_data_sample_rev0 {
        u8 rf_power;
        u8 pa_detector;
index 3d8d622bec55d394543cf1a563b2163573e3b4f8..de1d46bf97dffc50836e813d4dc4edfc36f6a27a 100644 (file)
@@ -526,7 +526,9 @@ int p54_init_leds(struct p54_common *priv);
 void p54_unregister_leds(struct p54_common *priv);
 
 /* xmit functions */
-void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb);
+void p54_tx_80211(struct ieee80211_hw *dev,
+                 struct ieee80211_tx_control *control,
+                 struct sk_buff *skb);
 int p54_tx_cancel(struct p54_common *priv, __le32 req_id);
 void p54_tx(struct p54_common *priv, struct sk_buff *skb);
 
index 7cffea795ad27d0044e777a0a04e02e741793ea9..aadda99989c007838faf7eb5ca816d18904ea91c 100644 (file)
@@ -139,6 +139,7 @@ static int p54_beacon_format_ie_tim(struct sk_buff *skb)
 static int p54_beacon_update(struct p54_common *priv,
                        struct ieee80211_vif *vif)
 {
+       struct ieee80211_tx_control control = { };
        struct sk_buff *beacon;
        int ret;
 
@@ -158,7 +159,7 @@ static int p54_beacon_update(struct p54_common *priv,
         * to cancel the old beacon template by hand, instead the firmware
         * will release the previous one through the feedback mechanism.
         */
-       p54_tx_80211(priv->hw, beacon);
+       p54_tx_80211(priv->hw, &control, beacon);
        priv->tsf_high32 = 0;
        priv->tsf_low32 = 0;
 
@@ -514,6 +515,17 @@ static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
        if (modparam_nohwcrypt)
                return -EOPNOTSUPP;
 
+       if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
+               /*
+                * Unfortunately most/all firmwares are trying to decrypt
+                * incoming management frames if a suitable key can be found.
+                * However, in doing so the data in these frames gets
+                * corrupted. So, we can't have firmware supported crypto
+                * offload in this case.
+                */
+               return -EOPNOTSUPP;
+       }
+
        mutex_lock(&priv->conf_mutex);
        if (cmd == SET_KEY) {
                switch (key->cipher) {
@@ -737,6 +749,7 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
                     IEEE80211_HW_SIGNAL_DBM |
                     IEEE80211_HW_SUPPORTS_PS |
                     IEEE80211_HW_PS_NULLFUNC_STACK |
+                    IEEE80211_HW_MFP_CAPABLE |
                     IEEE80211_HW_REPORTS_TX_ACK_STATUS;
 
        dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
index 89318adc8c7f0427ec8e95caed78a36312a934b8..b4390797d78c1c6917d4ea81e0ddf2f269fbc46f 100644 (file)
@@ -488,6 +488,58 @@ static int p54p_open(struct ieee80211_hw *dev)
        return 0;
 }
 
+static void p54p_firmware_step2(const struct firmware *fw,
+                               void *context)
+{
+       struct p54p_priv *priv = context;
+       struct ieee80211_hw *dev = priv->common.hw;
+       struct pci_dev *pdev = priv->pdev;
+       int err;
+
+       if (!fw) {
+               dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n");
+               err = -ENOENT;
+               goto out;
+       }
+
+       priv->firmware = fw;
+
+       err = p54p_open(dev);
+       if (err)
+               goto out;
+       err = p54_read_eeprom(dev);
+       p54p_stop(dev);
+       if (err)
+               goto out;
+
+       err = p54_register_common(dev, &pdev->dev);
+       if (err)
+               goto out;
+
+out:
+
+       complete(&priv->fw_loaded);
+
+       if (err) {
+               struct device *parent = pdev->dev.parent;
+
+               if (parent)
+                       device_lock(parent);
+
+               /*
+                * This will indirectly result in a call to p54p_remove.
+                * Hence, we don't need to bother with freeing any
+                * allocated ressources at all.
+                */
+               device_release_driver(&pdev->dev);
+
+               if (parent)
+                       device_unlock(parent);
+       }
+
+       pci_dev_put(pdev);
+}
+
 static int __devinit p54p_probe(struct pci_dev *pdev,
                                const struct pci_device_id *id)
 {
@@ -496,6 +548,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
        unsigned long mem_addr, mem_len;
        int err;
 
+       pci_dev_get(pdev);
        err = pci_enable_device(pdev);
        if (err) {
                dev_err(&pdev->dev, "Cannot enable new PCI device\n");
@@ -537,6 +590,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
        priv = dev->priv;
        priv->pdev = pdev;
 
+       init_completion(&priv->fw_loaded);
        SET_IEEE80211_DEV(dev, &pdev->dev);
        pci_set_drvdata(pdev, dev);
 
@@ -561,32 +615,12 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
        spin_lock_init(&priv->lock);
        tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev);
 
-       err = request_firmware(&priv->firmware, "isl3886pci",
-                              &priv->pdev->dev);
-       if (err) {
-               dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n");
-               err = request_firmware(&priv->firmware, "isl3886",
-                                      &priv->pdev->dev);
-               if (err)
-                       goto err_free_common;
-       }
-
-       err = p54p_open(dev);
-       if (err)
-               goto err_free_common;
-       err = p54_read_eeprom(dev);
-       p54p_stop(dev);
-       if (err)
-               goto err_free_common;
-
-       err = p54_register_common(dev, &pdev->dev);
-       if (err)
-               goto err_free_common;
-
-       return 0;
+       err = request_firmware_nowait(THIS_MODULE, 1, "isl3886pci",
+                                     &priv->pdev->dev, GFP_KERNEL,
+                                     priv, p54p_firmware_step2);
+       if (!err)
+               return 0;
 
- err_free_common:
-       release_firmware(priv->firmware);
        pci_free_consistent(pdev, sizeof(*priv->ring_control),
                            priv->ring_control, priv->ring_control_dma);
 
@@ -601,6 +635,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
        pci_release_regions(pdev);
  err_disable_dev:
        pci_disable_device(pdev);
+       pci_dev_put(pdev);
        return err;
 }
 
@@ -612,8 +647,9 @@ static void __devexit p54p_remove(struct pci_dev *pdev)
        if (!dev)
                return;
 
-       p54_unregister_common(dev);
        priv = dev->priv;
+       wait_for_completion(&priv->fw_loaded);
+       p54_unregister_common(dev);
        release_firmware(priv->firmware);
        pci_free_consistent(pdev, sizeof(*priv->ring_control),
                            priv->ring_control, priv->ring_control_dma);
index 7aa509f7e387c052c31ca56d9a159b0dbc8d1743..68405c142f973d356a8847c72cafb5d3fdc6f6fe 100644 (file)
@@ -105,6 +105,7 @@ struct p54p_priv {
        struct sk_buff *tx_buf_data[32];
        struct sk_buff *tx_buf_mgmt[4];
        struct completion boot_comp;
+       struct completion fw_loaded;
 };
 
 #endif /* P54USB_H */
index f38786e02623e04c5ca67f20c3ee43b009aa98d1..5861e13a6fd8d5f9aa83c24892acfc11a210b02e 100644 (file)
@@ -676,8 +676,9 @@ int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb)
 EXPORT_SYMBOL_GPL(p54_rx);
 
 static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
-                               struct ieee80211_tx_info *info, u8 *queue,
-                               u32 *extra_len, u16 *flags, u16 *aid,
+                               struct ieee80211_tx_info *info,
+                               struct ieee80211_sta *sta,
+                               u8 *queue, u32 *extra_len, u16 *flags, u16 *aid,
                                bool *burst_possible)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -746,8 +747,8 @@ static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
                        }
                }
 
-               if (info->control.sta)
-                       *aid = info->control.sta->aid;
+               if (sta)
+                       *aid = sta->aid;
                break;
        }
 }
@@ -767,7 +768,9 @@ static u8 p54_convert_algo(u32 cipher)
        }
 }
 
-void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
+void p54_tx_80211(struct ieee80211_hw *dev,
+                 struct ieee80211_tx_control *control,
+                 struct sk_buff *skb)
 {
        struct p54_common *priv = dev->priv;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -784,7 +787,7 @@ void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
        u8 nrates = 0, nremaining = 8;
        bool burst_allowed = false;
 
-       p54_tx_80211_header(priv, skb, info, &queue, &extra_len,
+       p54_tx_80211_header(priv, skb, info, control->sta, &queue, &extra_len,
                            &hdr_flags, &aid, &burst_allowed);
 
        if (p54_tx_qos_accounting_alloc(priv, skb, queue)) {
index 7a4ae9ee1c63057b78d582fd3a94964a6fdea264..bd1f0cb56085ef94eb5e634a7467f1c12e3a3ffb 100644 (file)
@@ -1959,9 +1959,6 @@ static int rndis_scan(struct wiphy *wiphy,
         */
        rndis_check_bssid_list(usbdev, NULL, NULL);
 
-       if (!request)
-               return -EINVAL;
-
        if (priv->scan_request && priv->scan_request != request)
                return -EBUSY;
 
index 64328af496f598bb3280784b6d2adfd25ec5cc70..e3a2d9070cf655acdfb9758a525c5cc7a71575a9 100644 (file)
@@ -205,7 +205,7 @@ static int rt2400pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
        u32 reg;
 
        rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg);
-       return rt2x00_get_field32(reg, GPIOCSR_BIT0);
+       return rt2x00_get_field32(reg, GPIOCSR_VAL0);
 }
 
 #ifdef CONFIG_RT2X00_LIB_LEDS
@@ -1629,7 +1629,7 @@ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
         * rfkill switch GPIO pin correctly.
         */
        rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg);
-       rt2x00_set_field32(&reg, GPIOCSR_BIT8, 1);
+       rt2x00_set_field32(&reg, GPIOCSR_DIR0, 1);
        rt2x00pci_register_write(rt2x00dev, GPIOCSR, reg);
 
        /*
@@ -1789,7 +1789,6 @@ static const struct data_queue_desc rt2400pci_queue_atim = {
 
 static const struct rt2x00_ops rt2400pci_ops = {
        .name                   = KBUILD_MODNAME,
-       .max_sta_intf           = 1,
        .max_ap_intf            = 1,
        .eeprom_size            = EEPROM_SIZE,
        .rf_size                = RF_SIZE,
index 7564ae992b735179b15e24a3d616c5a71acb1aeb..e4b07f0aa3cc0bfd955c1cebfcc1e1443065a4f9 100644 (file)
 
 /*
  * GPIOCSR: GPIO control register.
+ *     GPIOCSR_VALx: Actual GPIO pin x value
+ *     GPIOCSR_DIRx: GPIO direction: 0 = output; 1 = input
  */
 #define GPIOCSR                                0x0120
-#define GPIOCSR_BIT0                   FIELD32(0x00000001)
-#define GPIOCSR_BIT1                   FIELD32(0x00000002)
-#define GPIOCSR_BIT2                   FIELD32(0x00000004)
-#define GPIOCSR_BIT3                   FIELD32(0x00000008)
-#define GPIOCSR_BIT4                   FIELD32(0x00000010)
-#define GPIOCSR_BIT5                   FIELD32(0x00000020)
-#define GPIOCSR_BIT6                   FIELD32(0x00000040)
-#define GPIOCSR_BIT7                   FIELD32(0x00000080)
-#define GPIOCSR_BIT8                   FIELD32(0x00000100)
+#define GPIOCSR_VAL0                   FIELD32(0x00000001)
+#define GPIOCSR_VAL1                   FIELD32(0x00000002)
+#define GPIOCSR_VAL2                   FIELD32(0x00000004)
+#define GPIOCSR_VAL3                   FIELD32(0x00000008)
+#define GPIOCSR_VAL4                   FIELD32(0x00000010)
+#define GPIOCSR_VAL5                   FIELD32(0x00000020)
+#define GPIOCSR_VAL6                   FIELD32(0x00000040)
+#define GPIOCSR_VAL7                   FIELD32(0x00000080)
+#define GPIOCSR_DIR0                   FIELD32(0x00000100)
+#define GPIOCSR_DIR1                   FIELD32(0x00000200)
+#define GPIOCSR_DIR2                   FIELD32(0x00000400)
+#define GPIOCSR_DIR3                   FIELD32(0x00000800)
+#define GPIOCSR_DIR4                   FIELD32(0x00001000)
+#define GPIOCSR_DIR5                   FIELD32(0x00002000)
+#define GPIOCSR_DIR6                   FIELD32(0x00004000)
+#define GPIOCSR_DIR7                   FIELD32(0x00008000)
 
 /*
  * BBPPCSR: BBP Pin control register.
index 3de0406735f6b7347b46cdf2305e413aaa17256d..479d756e275b388fc5d17b77bde4303e27117e48 100644 (file)
@@ -205,7 +205,7 @@ static int rt2500pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
        u32 reg;
 
        rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg);
-       return rt2x00_get_field32(reg, GPIOCSR_BIT0);
+       return rt2x00_get_field32(reg, GPIOCSR_VAL0);
 }
 
 #ifdef CONFIG_RT2X00_LIB_LEDS
@@ -2081,7 +2081,6 @@ static const struct data_queue_desc rt2500pci_queue_atim = {
 
 static const struct rt2x00_ops rt2500pci_ops = {
        .name                   = KBUILD_MODNAME,
-       .max_sta_intf           = 1,
        .max_ap_intf            = 1,
        .eeprom_size            = EEPROM_SIZE,
        .rf_size                = RF_SIZE,
index 2aad7ba8a10083547c8e39d4cba5dc77dcbc42d7..9c10068e4987a384019301542f664bd44f1b130a 100644 (file)
 
 /*
  * GPIOCSR: GPIO control register.
+ *     GPIOCSR_VALx: GPIO value
+ *     GPIOCSR_DIRx: GPIO direction: 0 = output; 1 = input
  */
 #define GPIOCSR                                0x0120
-#define GPIOCSR_BIT0                   FIELD32(0x00000001)
-#define GPIOCSR_BIT1                   FIELD32(0x00000002)
-#define GPIOCSR_BIT2                   FIELD32(0x00000004)
-#define GPIOCSR_BIT3                   FIELD32(0x00000008)
-#define GPIOCSR_BIT4                   FIELD32(0x00000010)
-#define GPIOCSR_BIT5                   FIELD32(0x00000020)
-#define GPIOCSR_BIT6                   FIELD32(0x00000040)
-#define GPIOCSR_BIT7                   FIELD32(0x00000080)
+#define GPIOCSR_VAL0                   FIELD32(0x00000001)
+#define GPIOCSR_VAL1                   FIELD32(0x00000002)
+#define GPIOCSR_VAL2                   FIELD32(0x00000004)
+#define GPIOCSR_VAL3                   FIELD32(0x00000008)
+#define GPIOCSR_VAL4                   FIELD32(0x00000010)
+#define GPIOCSR_VAL5                   FIELD32(0x00000020)
+#define GPIOCSR_VAL6                   FIELD32(0x00000040)
+#define GPIOCSR_VAL7                   FIELD32(0x00000080)
 #define GPIOCSR_DIR0                   FIELD32(0x00000100)
 #define GPIOCSR_DIR1                   FIELD32(0x00000200)
 #define GPIOCSR_DIR2                   FIELD32(0x00000400)
index 89fee311d8fda5ad07ae5ecd50fae567232aa35d..a12e84f892be1d9b13d1762a0369a7841af32645 100644 (file)
@@ -283,7 +283,7 @@ static int rt2500usb_rfkill_poll(struct rt2x00_dev *rt2x00dev)
        u16 reg;
 
        rt2500usb_register_read(rt2x00dev, MAC_CSR19, &reg);
-       return rt2x00_get_field16(reg, MAC_CSR19_BIT7);
+       return rt2x00_get_field16(reg, MAC_CSR19_VAL7);
 }
 
 #ifdef CONFIG_RT2X00_LIB_LEDS
@@ -1786,7 +1786,7 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
         * rfkill switch GPIO pin correctly.
         */
        rt2500usb_register_read(rt2x00dev, MAC_CSR19, &reg);
-       rt2x00_set_field16(&reg, MAC_CSR19_BIT8, 0);
+       rt2x00_set_field16(&reg, MAC_CSR19_DIR0, 0);
        rt2500usb_register_write(rt2x00dev, MAC_CSR19, reg);
 
        /*
@@ -1896,7 +1896,6 @@ static const struct data_queue_desc rt2500usb_queue_atim = {
 
 static const struct rt2x00_ops rt2500usb_ops = {
        .name                   = KBUILD_MODNAME,
-       .max_sta_intf           = 1,
        .max_ap_intf            = 1,
        .eeprom_size            = EEPROM_SIZE,
        .rf_size                = RF_SIZE,
index 196bd5103e4f5450483ce1e60449021bf6eafd2c..1b91a4cef9652c22fe336c2c889ec300d04d685f 100644 (file)
 
 /*
  * MAC_CSR19: GPIO control register.
+ *     MAC_CSR19_VALx: GPIO value
+ *     MAC_CSR19_DIRx: GPIO direction: 0 = input; 1 = output
  */
 #define MAC_CSR19                      0x0426
-#define MAC_CSR19_BIT0                 FIELD16(0x0001)
-#define MAC_CSR19_BIT1                 FIELD16(0x0002)
-#define MAC_CSR19_BIT2                 FIELD16(0x0004)
-#define MAC_CSR19_BIT3                 FIELD16(0x0008)
-#define MAC_CSR19_BIT4                 FIELD16(0x0010)
-#define MAC_CSR19_BIT5                 FIELD16(0x0020)
-#define MAC_CSR19_BIT6                 FIELD16(0x0040)
-#define MAC_CSR19_BIT7                 FIELD16(0x0080)
-#define MAC_CSR19_BIT8                 FIELD16(0x0100)
+#define MAC_CSR19_VAL0                 FIELD16(0x0001)
+#define MAC_CSR19_VAL1                 FIELD16(0x0002)
+#define MAC_CSR19_VAL2                 FIELD16(0x0004)
+#define MAC_CSR19_VAL3                 FIELD16(0x0008)
+#define MAC_CSR19_VAL4                 FIELD16(0x0010)
+#define MAC_CSR19_VAL5                 FIELD16(0x0020)
+#define MAC_CSR19_VAL6                 FIELD16(0x0040)
+#define MAC_CSR19_VAL7                 FIELD16(0x0080)
+#define MAC_CSR19_DIR0                 FIELD16(0x0100)
+#define MAC_CSR19_DIR1                 FIELD16(0x0200)
+#define MAC_CSR19_DIR2                 FIELD16(0x0400)
+#define MAC_CSR19_DIR3                 FIELD16(0x0800)
+#define MAC_CSR19_DIR4                 FIELD16(0x1000)
+#define MAC_CSR19_DIR5                 FIELD16(0x2000)
+#define MAC_CSR19_DIR6                 FIELD16(0x4000)
+#define MAC_CSR19_DIR7                 FIELD16(0x8000)
 
 /*
  * MAC_CSR20: LED control register.
index e252e9bafd0e2776075a8159d74e4f9180b87e83..6d67c3ede6513fd4e1be870bbf117cb2d69138dc 100644 (file)
 #define WMM_TXOP1_CFG_AC3TXOP          FIELD32(0xffff0000)
 
 /*
- * GPIO_CTRL_CFG:
- * GPIOD: GPIO direction, 0: Output, 1: Input
- */
-#define GPIO_CTRL_CFG                  0x0228
-#define GPIO_CTRL_CFG_BIT0             FIELD32(0x00000001)
-#define GPIO_CTRL_CFG_BIT1             FIELD32(0x00000002)
-#define GPIO_CTRL_CFG_BIT2             FIELD32(0x00000004)
-#define GPIO_CTRL_CFG_BIT3             FIELD32(0x00000008)
-#define GPIO_CTRL_CFG_BIT4             FIELD32(0x00000010)
-#define GPIO_CTRL_CFG_BIT5             FIELD32(0x00000020)
-#define GPIO_CTRL_CFG_BIT6             FIELD32(0x00000040)
-#define GPIO_CTRL_CFG_BIT7             FIELD32(0x00000080)
-#define GPIO_CTRL_CFG_GPIOD_BIT0       FIELD32(0x00000100)
-#define GPIO_CTRL_CFG_GPIOD_BIT1       FIELD32(0x00000200)
-#define GPIO_CTRL_CFG_GPIOD_BIT2       FIELD32(0x00000400)
-#define GPIO_CTRL_CFG_GPIOD_BIT3       FIELD32(0x00000800)
-#define GPIO_CTRL_CFG_GPIOD_BIT4       FIELD32(0x00001000)
-#define GPIO_CTRL_CFG_GPIOD_BIT5       FIELD32(0x00002000)
-#define GPIO_CTRL_CFG_GPIOD_BIT6       FIELD32(0x00004000)
-#define GPIO_CTRL_CFG_GPIOD_BIT7       FIELD32(0x00008000)
+ * GPIO_CTRL:
+ *     GPIO_CTRL_VALx: GPIO value
+ *     GPIO_CTRL_DIRx: GPIO direction: 0 = output; 1 = input
+ */
+#define GPIO_CTRL                      0x0228
+#define GPIO_CTRL_VAL0                 FIELD32(0x00000001)
+#define GPIO_CTRL_VAL1                 FIELD32(0x00000002)
+#define GPIO_CTRL_VAL2                 FIELD32(0x00000004)
+#define GPIO_CTRL_VAL3                 FIELD32(0x00000008)
+#define GPIO_CTRL_VAL4                 FIELD32(0x00000010)
+#define GPIO_CTRL_VAL5                 FIELD32(0x00000020)
+#define GPIO_CTRL_VAL6                 FIELD32(0x00000040)
+#define GPIO_CTRL_VAL7                 FIELD32(0x00000080)
+#define GPIO_CTRL_DIR0                 FIELD32(0x00000100)
+#define GPIO_CTRL_DIR1                 FIELD32(0x00000200)
+#define GPIO_CTRL_DIR2                 FIELD32(0x00000400)
+#define GPIO_CTRL_DIR3                 FIELD32(0x00000800)
+#define GPIO_CTRL_DIR4                 FIELD32(0x00001000)
+#define GPIO_CTRL_DIR5                 FIELD32(0x00002000)
+#define GPIO_CTRL_DIR6                 FIELD32(0x00004000)
+#define GPIO_CTRL_DIR7                 FIELD32(0x00008000)
+#define GPIO_CTRL_VAL8                 FIELD32(0x00010000)
+#define GPIO_CTRL_VAL9                 FIELD32(0x00020000)
+#define GPIO_CTRL_VAL10                        FIELD32(0x00040000)
+#define GPIO_CTRL_DIR8                 FIELD32(0x01000000)
+#define GPIO_CTRL_DIR9                 FIELD32(0x02000000)
+#define GPIO_CTRL_DIR10                        FIELD32(0x04000000)
 
 /*
  * MCU_CMD_CFG
@@ -1935,6 +1942,11 @@ struct mac_iveiv_entry {
 #define BBP47_TSSI_TSSI_MODE           FIELD8(0x18)
 #define BBP47_TSSI_ADC6                        FIELD8(0x80)
 
+/*
+ * BBP 49
+ */
+#define BBP49_UPDATE_FLAG              FIELD8(0x01)
+
 /*
  * BBP 109
  */
index b93516d832fb5603e4bb3d287a4770c0c8de06ad..540c94f8505a9b734b9b09ef6ab814ca04b17bd0 100644 (file)
@@ -923,8 +923,8 @@ int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev)
                rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
                return rt2x00_get_field32(reg, WLAN_GPIO_IN_BIT0);
        } else {
-               rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
-               return rt2x00_get_field32(reg, GPIO_CTRL_CFG_BIT2);
+               rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
+               return rt2x00_get_field32(reg, GPIO_CTRL_VAL2);
        }
 }
 EXPORT_SYMBOL_GPL(rt2800_rfkill_poll);
@@ -1570,10 +1570,10 @@ static void rt2800_set_ant_diversity(struct rt2x00_dev *rt2x00dev,
                rt2800_mcu_request(rt2x00dev, MCU_ANT_SELECT, 0xff,
                                   eesk_pin, 0);
 
-       rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
-       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT3, 0);
-       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, gpio_bit3);
-       rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
+       rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
+       rt2x00_set_field32(&reg, GPIO_CTRL_DIR3, 0);
+       rt2x00_set_field32(&reg, GPIO_CTRL_VAL3, gpio_bit3);
+       rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
 }
 
 void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
@@ -1615,6 +1615,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
        case 1:
                if (rt2x00_rt(rt2x00dev, RT3070) ||
                    rt2x00_rt(rt2x00dev, RT3090) ||
+                   rt2x00_rt(rt2x00dev, RT3352) ||
                    rt2x00_rt(rt2x00dev, RT3390)) {
                        rt2x00_eeprom_read(rt2x00dev,
                                           EEPROM_NIC_CONF1, &eeprom);
@@ -1762,36 +1763,15 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
 
        rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
        rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
+       rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD,
+                         rt2x00dev->default_ant.rx_chain_num <= 1);
+       rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD,
+                         rt2x00dev->default_ant.rx_chain_num <= 2);
        rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
-       if (rt2x00_rt(rt2x00dev, RT3390)) {
-               rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD,
-                                 rt2x00dev->default_ant.rx_chain_num == 1);
-               rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD,
-                                 rt2x00dev->default_ant.tx_chain_num == 1);
-       } else {
-               rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0);
-               rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
-               rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
-               rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
-
-               switch (rt2x00dev->default_ant.tx_chain_num) {
-               case 1:
-                       rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
-                       /* fall through */
-               case 2:
-                       rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1);
-                       break;
-               }
-
-               switch (rt2x00dev->default_ant.rx_chain_num) {
-               case 1:
-                       rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
-                       /* fall through */
-               case 2:
-                       rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1);
-                       break;
-               }
-       }
+       rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD,
+                         rt2x00dev->default_ant.tx_chain_num <= 1);
+       rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD,
+                         rt2x00dev->default_ant.tx_chain_num <= 2);
        rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
 
        rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
@@ -1995,13 +1975,13 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
                rt2800_rfcsr_write(rt2x00dev, 29, 0x9f);
        }
 
-       rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
-       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT7, 0);
+       rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
+       rt2x00_set_field32(&reg, GPIO_CTRL_DIR7, 0);
        if (rf->channel <= 14)
-               rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT7, 1);
+               rt2x00_set_field32(&reg, GPIO_CTRL_VAL7, 1);
        else
-               rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT7, 0);
-       rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
+               rt2x00_set_field32(&reg, GPIO_CTRL_VAL7, 0);
+       rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
 
        rt2800_rfcsr_read(rt2x00dev, 7, &rfcsr);
        rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
@@ -2053,6 +2033,60 @@ static void rt2800_config_channel_rf3290(struct rt2x00_dev *rt2x00dev,
        }
 }
 
+static void rt2800_config_channel_rf3322(struct rt2x00_dev *rt2x00dev,
+                                        struct ieee80211_conf *conf,
+                                        struct rf_channel *rf,
+                                        struct channel_info *info)
+{
+       u8 rfcsr;
+
+       rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
+       rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3);
+
+       rt2800_rfcsr_write(rt2x00dev, 11, 0x42);
+       rt2800_rfcsr_write(rt2x00dev, 12, 0x1c);
+       rt2800_rfcsr_write(rt2x00dev, 13, 0x00);
+
+       if (info->default_power1 > POWER_BOUND)
+               rt2800_rfcsr_write(rt2x00dev, 47, POWER_BOUND);
+       else
+               rt2800_rfcsr_write(rt2x00dev, 47, info->default_power1);
+
+       if (info->default_power2 > POWER_BOUND)
+               rt2800_rfcsr_write(rt2x00dev, 48, POWER_BOUND);
+       else
+               rt2800_rfcsr_write(rt2x00dev, 48, info->default_power2);
+
+       rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
+       if (rt2x00dev->freq_offset > FREQ_OFFSET_BOUND)
+               rt2x00_set_field8(&rfcsr, RFCSR17_CODE, FREQ_OFFSET_BOUND);
+       else
+               rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
+
+       rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
+       rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
+
+       if ( rt2x00dev->default_ant.tx_chain_num == 2 )
+               rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
+       else
+               rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
+
+       if ( rt2x00dev->default_ant.rx_chain_num == 2 )
+               rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
+       else
+               rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0);
+
+       rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
+       rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
+
+       rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
+
+       rt2800_rfcsr_write(rt2x00dev, 31, 80);
+}
+
 static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
                                         struct ieee80211_conf *conf,
                                         struct rf_channel *rf,
@@ -2182,6 +2216,9 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
        case RF3290:
                rt2800_config_channel_rf3290(rt2x00dev, conf, rf, info);
                break;
+       case RF3322:
+               rt2800_config_channel_rf3322(rt2x00dev, conf, rf, info);
+               break;
        case RF5360:
        case RF5370:
        case RF5372:
@@ -2194,6 +2231,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
        }
 
        if (rt2x00_rf(rt2x00dev, RF3290) ||
+           rt2x00_rf(rt2x00dev, RF3322) ||
            rt2x00_rf(rt2x00dev, RF5360) ||
            rt2x00_rf(rt2x00dev, RF5370) ||
            rt2x00_rf(rt2x00dev, RF5372) ||
@@ -2212,10 +2250,17 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
        /*
         * Change BBP settings
         */
-       rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
-       rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
-       rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
-       rt2800_bbp_write(rt2x00dev, 86, 0);
+       if (rt2x00_rt(rt2x00dev, RT3352)) {
+               rt2800_bbp_write(rt2x00dev, 27, 0x0);
+               rt2800_bbp_write(rt2x00dev, 62, 0x26 + rt2x00dev->lna_gain);
+               rt2800_bbp_write(rt2x00dev, 27, 0x20);
+               rt2800_bbp_write(rt2x00dev, 62, 0x26 + rt2x00dev->lna_gain);
+       } else {
+               rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
+               rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
+               rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
+               rt2800_bbp_write(rt2x00dev, 86, 0);
+       }
 
        if (rf->channel <= 14) {
                if (!rt2x00_rt(rt2x00dev, RT5390) &&
@@ -2310,6 +2355,15 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
        rt2800_register_read(rt2x00dev, CH_IDLE_STA, &reg);
        rt2800_register_read(rt2x00dev, CH_BUSY_STA, &reg);
        rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC, &reg);
+
+       /*
+        * Clear update flag
+        */
+       if (rt2x00_rt(rt2x00dev, RT3352)) {
+               rt2800_bbp_read(rt2x00dev, 49, &bbp);
+               rt2x00_set_field8(&bbp, BBP49_UPDATE_FLAG, 0);
+               rt2800_bbp_write(rt2x00dev, 49, bbp);
+       }
 }
 
 static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
@@ -2821,23 +2875,32 @@ EXPORT_SYMBOL_GPL(rt2800_link_stats);
 
 static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
 {
+       u8 vgc;
+
        if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
                if (rt2x00_rt(rt2x00dev, RT3070) ||
                    rt2x00_rt(rt2x00dev, RT3071) ||
                    rt2x00_rt(rt2x00dev, RT3090) ||
                    rt2x00_rt(rt2x00dev, RT3290) ||
                    rt2x00_rt(rt2x00dev, RT3390) ||
+                   rt2x00_rt(rt2x00dev, RT3572) ||
                    rt2x00_rt(rt2x00dev, RT5390) ||
                    rt2x00_rt(rt2x00dev, RT5392))
-                       return 0x1c + (2 * rt2x00dev->lna_gain);
+                       vgc = 0x1c + (2 * rt2x00dev->lna_gain);
                else
-                       return 0x2e + rt2x00dev->lna_gain;
+                       vgc = 0x2e + rt2x00dev->lna_gain;
+       } else { /* 5GHZ band */
+               if (rt2x00_rt(rt2x00dev, RT3572))
+                       vgc = 0x22 + (rt2x00dev->lna_gain * 5) / 3;
+               else {
+                       if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
+                               vgc = 0x32 + (rt2x00dev->lna_gain * 5) / 3;
+                       else
+                               vgc = 0x3a + (rt2x00dev->lna_gain * 5) / 3;
+               }
        }
 
-       if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
-               return 0x32 + (rt2x00dev->lna_gain * 5) / 3;
-       else
-               return 0x3a + (rt2x00dev->lna_gain * 5) / 3;
+       return vgc;
 }
 
 static inline void rt2800_set_vgc(struct rt2x00_dev *rt2x00dev,
@@ -2998,11 +3061,15 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
                rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
                rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
                rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000030);
+       } else if (rt2x00_rt(rt2x00dev, RT3352)) {
+               rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000402);
+               rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
+               rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
        } else if (rt2x00_rt(rt2x00dev, RT3572)) {
                rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
                rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
        } else if (rt2x00_rt(rt2x00dev, RT5390) ||
-                          rt2x00_rt(rt2x00dev, RT5392)) {
+                  rt2x00_rt(rt2x00dev, RT5392)) {
                rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
                rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
                rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
@@ -3378,6 +3445,11 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
                     rt2800_wait_bbp_ready(rt2x00dev)))
                return -EACCES;
 
+       if (rt2x00_rt(rt2x00dev, RT3352)) {
+               rt2800_bbp_write(rt2x00dev, 3, 0x00);
+               rt2800_bbp_write(rt2x00dev, 4, 0x50);
+       }
+
        if (rt2x00_rt(rt2x00dev, RT3290) ||
            rt2x00_rt(rt2x00dev, RT5390) ||
            rt2x00_rt(rt2x00dev, RT5392)) {
@@ -3388,15 +3460,20 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
 
        if (rt2800_is_305x_soc(rt2x00dev) ||
            rt2x00_rt(rt2x00dev, RT3290) ||
+           rt2x00_rt(rt2x00dev, RT3352) ||
            rt2x00_rt(rt2x00dev, RT3572) ||
            rt2x00_rt(rt2x00dev, RT5390) ||
            rt2x00_rt(rt2x00dev, RT5392))
                rt2800_bbp_write(rt2x00dev, 31, 0x08);
 
+       if (rt2x00_rt(rt2x00dev, RT3352))
+               rt2800_bbp_write(rt2x00dev, 47, 0x48);
+
        rt2800_bbp_write(rt2x00dev, 65, 0x2c);
        rt2800_bbp_write(rt2x00dev, 66, 0x38);
 
        if (rt2x00_rt(rt2x00dev, RT3290) ||
+           rt2x00_rt(rt2x00dev, RT3352) ||
            rt2x00_rt(rt2x00dev, RT5390) ||
            rt2x00_rt(rt2x00dev, RT5392))
                rt2800_bbp_write(rt2x00dev, 68, 0x0b);
@@ -3405,6 +3482,7 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
                rt2800_bbp_write(rt2x00dev, 69, 0x16);
                rt2800_bbp_write(rt2x00dev, 73, 0x12);
        } else if (rt2x00_rt(rt2x00dev, RT3290) ||
+                  rt2x00_rt(rt2x00dev, RT3352) ||
                   rt2x00_rt(rt2x00dev, RT5390) ||
                   rt2x00_rt(rt2x00dev, RT5392)) {
                rt2800_bbp_write(rt2x00dev, 69, 0x12);
@@ -3436,15 +3514,17 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
        } else if (rt2800_is_305x_soc(rt2x00dev)) {
                rt2800_bbp_write(rt2x00dev, 78, 0x0e);
                rt2800_bbp_write(rt2x00dev, 80, 0x08);
-       } else {
-               rt2800_bbp_write(rt2x00dev, 81, 0x37);
-       }
-
-       if (rt2x00_rt(rt2x00dev, RT3290)) {
+       } else if (rt2x00_rt(rt2x00dev, RT3290)) {
                rt2800_bbp_write(rt2x00dev, 74, 0x0b);
                rt2800_bbp_write(rt2x00dev, 79, 0x18);
                rt2800_bbp_write(rt2x00dev, 80, 0x09);
                rt2800_bbp_write(rt2x00dev, 81, 0x33);
+       } else if (rt2x00_rt(rt2x00dev, RT3352)) {
+               rt2800_bbp_write(rt2x00dev, 78, 0x0e);
+               rt2800_bbp_write(rt2x00dev, 80, 0x08);
+               rt2800_bbp_write(rt2x00dev, 81, 0x37);
+       } else {
+               rt2800_bbp_write(rt2x00dev, 81, 0x37);
        }
 
        rt2800_bbp_write(rt2x00dev, 82, 0x62);
@@ -3465,18 +3545,21 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
                rt2800_bbp_write(rt2x00dev, 84, 0x99);
 
        if (rt2x00_rt(rt2x00dev, RT3290) ||
+           rt2x00_rt(rt2x00dev, RT3352) ||
            rt2x00_rt(rt2x00dev, RT5390) ||
            rt2x00_rt(rt2x00dev, RT5392))
                rt2800_bbp_write(rt2x00dev, 86, 0x38);
        else
                rt2800_bbp_write(rt2x00dev, 86, 0x00);
 
-       if (rt2x00_rt(rt2x00dev, RT5392))
+       if (rt2x00_rt(rt2x00dev, RT3352) ||
+           rt2x00_rt(rt2x00dev, RT5392))
                rt2800_bbp_write(rt2x00dev, 88, 0x90);
 
        rt2800_bbp_write(rt2x00dev, 91, 0x04);
 
        if (rt2x00_rt(rt2x00dev, RT3290) ||
+           rt2x00_rt(rt2x00dev, RT3352) ||
            rt2x00_rt(rt2x00dev, RT5390) ||
            rt2x00_rt(rt2x00dev, RT5392))
                rt2800_bbp_write(rt2x00dev, 92, 0x02);
@@ -3493,6 +3576,7 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
            rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) ||
            rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) ||
            rt2x00_rt(rt2x00dev, RT3290) ||
+           rt2x00_rt(rt2x00dev, RT3352) ||
            rt2x00_rt(rt2x00dev, RT3572) ||
            rt2x00_rt(rt2x00dev, RT5390) ||
            rt2x00_rt(rt2x00dev, RT5392) ||
@@ -3502,6 +3586,7 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
                rt2800_bbp_write(rt2x00dev, 103, 0x00);
 
        if (rt2x00_rt(rt2x00dev, RT3290) ||
+           rt2x00_rt(rt2x00dev, RT3352) ||
            rt2x00_rt(rt2x00dev, RT5390) ||
            rt2x00_rt(rt2x00dev, RT5392))
                rt2800_bbp_write(rt2x00dev, 104, 0x92);
@@ -3510,6 +3595,8 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
                rt2800_bbp_write(rt2x00dev, 105, 0x01);
        else if (rt2x00_rt(rt2x00dev, RT3290))
                rt2800_bbp_write(rt2x00dev, 105, 0x1c);
+       else if (rt2x00_rt(rt2x00dev, RT3352))
+               rt2800_bbp_write(rt2x00dev, 105, 0x34);
        else if (rt2x00_rt(rt2x00dev, RT5390) ||
                         rt2x00_rt(rt2x00dev, RT5392))
                rt2800_bbp_write(rt2x00dev, 105, 0x3c);
@@ -3519,11 +3606,16 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
        if (rt2x00_rt(rt2x00dev, RT3290) ||
            rt2x00_rt(rt2x00dev, RT5390))
                rt2800_bbp_write(rt2x00dev, 106, 0x03);
+       else if (rt2x00_rt(rt2x00dev, RT3352))
+               rt2800_bbp_write(rt2x00dev, 106, 0x05);
        else if (rt2x00_rt(rt2x00dev, RT5392))
                rt2800_bbp_write(rt2x00dev, 106, 0x12);
        else
                rt2800_bbp_write(rt2x00dev, 106, 0x35);
 
+       if (rt2x00_rt(rt2x00dev, RT3352))
+               rt2800_bbp_write(rt2x00dev, 120, 0x50);
+
        if (rt2x00_rt(rt2x00dev, RT3290) ||
            rt2x00_rt(rt2x00dev, RT5390) ||
            rt2x00_rt(rt2x00dev, RT5392))
@@ -3534,6 +3626,9 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
                rt2800_bbp_write(rt2x00dev, 135, 0xf6);
        }
 
+       if (rt2x00_rt(rt2x00dev, RT3352))
+               rt2800_bbp_write(rt2x00dev, 137, 0x0f);
+
        if (rt2x00_rt(rt2x00dev, RT3071) ||
            rt2x00_rt(rt2x00dev, RT3090) ||
            rt2x00_rt(rt2x00dev, RT3390) ||
@@ -3574,6 +3669,28 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
                rt2800_bbp_write(rt2x00dev, 3, value);
        }
 
+       if (rt2x00_rt(rt2x00dev, RT3352)) {
+               rt2800_bbp_write(rt2x00dev, 163, 0xbd);
+               /* Set ITxBF timeout to 0x9c40=1000msec */
+               rt2800_bbp_write(rt2x00dev, 179, 0x02);
+               rt2800_bbp_write(rt2x00dev, 180, 0x00);
+               rt2800_bbp_write(rt2x00dev, 182, 0x40);
+               rt2800_bbp_write(rt2x00dev, 180, 0x01);
+               rt2800_bbp_write(rt2x00dev, 182, 0x9c);
+               rt2800_bbp_write(rt2x00dev, 179, 0x00);
+               /* Reprogram the inband interface to put right values in RXWI */
+               rt2800_bbp_write(rt2x00dev, 142, 0x04);
+               rt2800_bbp_write(rt2x00dev, 143, 0x3b);
+               rt2800_bbp_write(rt2x00dev, 142, 0x06);
+               rt2800_bbp_write(rt2x00dev, 143, 0xa0);
+               rt2800_bbp_write(rt2x00dev, 142, 0x07);
+               rt2800_bbp_write(rt2x00dev, 143, 0xa1);
+               rt2800_bbp_write(rt2x00dev, 142, 0x08);
+               rt2800_bbp_write(rt2x00dev, 143, 0xa2);
+
+               rt2800_bbp_write(rt2x00dev, 148, 0xc8);
+       }
+
        if (rt2x00_rt(rt2x00dev, RT5390) ||
                rt2x00_rt(rt2x00dev, RT5392)) {
                int ant, div_mode;
@@ -3587,16 +3704,16 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
                if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
                        u32 reg;
 
-                       rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
-                       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT3, 0);
-                       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT6, 0);
-                       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, 0);
-                       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT6, 0);
+                       rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
+                       rt2x00_set_field32(&reg, GPIO_CTRL_DIR3, 0);
+                       rt2x00_set_field32(&reg, GPIO_CTRL_DIR6, 0);
+                       rt2x00_set_field32(&reg, GPIO_CTRL_VAL3, 0);
+                       rt2x00_set_field32(&reg, GPIO_CTRL_VAL6, 0);
                        if (ant == 0)
-                               rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, 1);
+                               rt2x00_set_field32(&reg, GPIO_CTRL_VAL3, 1);
                        else if (ant == 1)
-                               rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT6, 1);
-                       rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
+                               rt2x00_set_field32(&reg, GPIO_CTRL_VAL6, 1);
+                       rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
                }
 
                /* This chip has hardware antenna diversity*/
@@ -3707,6 +3824,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
            !rt2x00_rt(rt2x00dev, RT3071) &&
            !rt2x00_rt(rt2x00dev, RT3090) &&
            !rt2x00_rt(rt2x00dev, RT3290) &&
+           !rt2x00_rt(rt2x00dev, RT3352) &&
            !rt2x00_rt(rt2x00dev, RT3390) &&
            !rt2x00_rt(rt2x00dev, RT3572) &&
            !rt2x00_rt(rt2x00dev, RT5390) &&
@@ -3903,6 +4021,70 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
                rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
                rt2800_rfcsr_write(rt2x00dev, 31, 0x00);
                return 0;
+       } else if (rt2x00_rt(rt2x00dev, RT3352)) {
+               rt2800_rfcsr_write(rt2x00dev, 0, 0xf0);
+               rt2800_rfcsr_write(rt2x00dev, 1, 0x23);
+               rt2800_rfcsr_write(rt2x00dev, 2, 0x50);
+               rt2800_rfcsr_write(rt2x00dev, 3, 0x18);
+               rt2800_rfcsr_write(rt2x00dev, 4, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 5, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 6, 0x33);
+               rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 8, 0xf1);
+               rt2800_rfcsr_write(rt2x00dev, 9, 0x02);
+               rt2800_rfcsr_write(rt2x00dev, 10, 0xd2);
+               rt2800_rfcsr_write(rt2x00dev, 11, 0x42);
+               rt2800_rfcsr_write(rt2x00dev, 12, 0x1c);
+               rt2800_rfcsr_write(rt2x00dev, 13, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 14, 0x5a);
+               rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 16, 0x01);
+               rt2800_rfcsr_write(rt2x00dev, 18, 0x45);
+               rt2800_rfcsr_write(rt2x00dev, 19, 0x02);
+               rt2800_rfcsr_write(rt2x00dev, 20, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 21, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 22, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 23, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 24, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
+               rt2800_rfcsr_write(rt2x00dev, 26, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
+               rt2800_rfcsr_write(rt2x00dev, 28, 0x03);
+               rt2800_rfcsr_write(rt2x00dev, 29, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
+               rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
+               rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
+               rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 34, 0x01);
+               rt2800_rfcsr_write(rt2x00dev, 35, 0x03);
+               rt2800_rfcsr_write(rt2x00dev, 36, 0xbd);
+               rt2800_rfcsr_write(rt2x00dev, 37, 0x3c);
+               rt2800_rfcsr_write(rt2x00dev, 38, 0x5f);
+               rt2800_rfcsr_write(rt2x00dev, 39, 0xc5);
+               rt2800_rfcsr_write(rt2x00dev, 40, 0x33);
+               rt2800_rfcsr_write(rt2x00dev, 41, 0x5b);
+               rt2800_rfcsr_write(rt2x00dev, 42, 0x5b);
+               rt2800_rfcsr_write(rt2x00dev, 43, 0xdb);
+               rt2800_rfcsr_write(rt2x00dev, 44, 0xdb);
+               rt2800_rfcsr_write(rt2x00dev, 45, 0xdb);
+               rt2800_rfcsr_write(rt2x00dev, 46, 0xdd);
+               rt2800_rfcsr_write(rt2x00dev, 47, 0x0d);
+               rt2800_rfcsr_write(rt2x00dev, 48, 0x14);
+               rt2800_rfcsr_write(rt2x00dev, 49, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 50, 0x2d);
+               rt2800_rfcsr_write(rt2x00dev, 51, 0x7f);
+               rt2800_rfcsr_write(rt2x00dev, 52, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 53, 0x52);
+               rt2800_rfcsr_write(rt2x00dev, 54, 0x1b);
+               rt2800_rfcsr_write(rt2x00dev, 55, 0x7f);
+               rt2800_rfcsr_write(rt2x00dev, 56, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 57, 0x52);
+               rt2800_rfcsr_write(rt2x00dev, 58, 0x1b);
+               rt2800_rfcsr_write(rt2x00dev, 59, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 60, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 61, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
        } else if (rt2x00_rt(rt2x00dev, RT5390)) {
                rt2800_rfcsr_write(rt2x00dev, 1, 0x0f);
                rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
@@ -4104,6 +4286,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
                        rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x19);
        } else if (rt2x00_rt(rt2x00dev, RT3071) ||
                   rt2x00_rt(rt2x00dev, RT3090) ||
+                  rt2x00_rt(rt2x00dev, RT3352) ||
                   rt2x00_rt(rt2x00dev, RT3390) ||
                   rt2x00_rt(rt2x00dev, RT3572)) {
                drv_data->calibration_bw20 =
@@ -4392,13 +4575,18 @@ void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
 }
 EXPORT_SYMBOL_GPL(rt2800_read_eeprom_efuse);
 
-int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
+static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
 {
        struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
        u16 word;
        u8 *mac;
        u8 default_lna_gain;
 
+       /*
+        * Read the EEPROM.
+        */
+       rt2800_read_eeprom(rt2x00dev);
+
        /*
         * Start validation of the data that has been read.
         */
@@ -4521,9 +4709,8 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(rt2800_validate_eeprom);
 
-int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
+static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
 {
        u32 reg;
        u16 value;
@@ -4562,6 +4749,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
        case RT3071:
        case RT3090:
        case RT3290:
+       case RT3352:
        case RT3390:
        case RT3572:
        case RT5390:
@@ -4584,6 +4772,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
        case RF3052:
        case RF3290:
        case RF3320:
+       case RF3322:
        case RF5360:
        case RF5370:
        case RF5372:
@@ -4608,6 +4797,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
 
        if (rt2x00_rt(rt2x00dev, RT3070) ||
            rt2x00_rt(rt2x00dev, RT3090) ||
+           rt2x00_rt(rt2x00dev, RT3352) ||
            rt2x00_rt(rt2x00dev, RT3390)) {
                value = rt2x00_get_field16(eeprom,
                                EEPROM_NIC_CONF1_ANT_DIVERSITY);
@@ -4681,7 +4871,6 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(rt2800_init_eeprom);
 
 /*
  * RF value list for rt28xx
@@ -4824,7 +5013,7 @@ static const struct rf_channel rf_vals_3x[] = {
        {173, 0x61, 0, 9},
 };
 
-int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 {
        struct hw_mode_spec *spec = &rt2x00dev->spec;
        struct channel_info *info;
@@ -4901,6 +5090,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
                   rt2x00_rf(rt2x00dev, RF3022) ||
                   rt2x00_rf(rt2x00dev, RF3290) ||
                   rt2x00_rf(rt2x00dev, RF3320) ||
+                  rt2x00_rf(rt2x00dev, RF3322) ||
                   rt2x00_rf(rt2x00dev, RF5360) ||
                   rt2x00_rf(rt2x00dev, RF5370) ||
                   rt2x00_rf(rt2x00dev, RF5372) ||
@@ -5000,7 +5190,72 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(rt2800_probe_hw_mode);
+
+int rt2800_probe_hw(struct rt2x00_dev *rt2x00dev)
+{
+       int retval;
+       u32 reg;
+
+       /*
+        * Allocate eeprom data.
+        */
+       retval = rt2800_validate_eeprom(rt2x00dev);
+       if (retval)
+               return retval;
+
+       retval = rt2800_init_eeprom(rt2x00dev);
+       if (retval)
+               return retval;
+
+       /*
+        * Enable rfkill polling by setting GPIO direction of the
+        * rfkill switch GPIO pin correctly.
+        */
+       rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
+       rt2x00_set_field32(&reg, GPIO_CTRL_DIR2, 1);
+       rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
+
+       /*
+        * Initialize hw specifications.
+        */
+       retval = rt2800_probe_hw_mode(rt2x00dev);
+       if (retval)
+               return retval;
+
+       /*
+        * Set device capabilities.
+        */
+       __set_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags);
+       __set_bit(CAPABILITY_CONTROL_FILTER_PSPOLL, &rt2x00dev->cap_flags);
+       if (!rt2x00_is_usb(rt2x00dev))
+               __set_bit(CAPABILITY_PRE_TBTT_INTERRUPT, &rt2x00dev->cap_flags);
+
+       /*
+        * Set device requirements.
+        */
+       if (!rt2x00_is_soc(rt2x00dev))
+               __set_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags);
+       __set_bit(REQUIRE_L2PAD, &rt2x00dev->cap_flags);
+       __set_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags);
+       if (!rt2800_hwcrypt_disabled(rt2x00dev))
+               __set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags);
+       __set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags);
+       __set_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags);
+       if (rt2x00_is_usb(rt2x00dev))
+               __set_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags);
+       else {
+               __set_bit(REQUIRE_DMA, &rt2x00dev->cap_flags);
+               __set_bit(REQUIRE_TASKLET_CONTEXT, &rt2x00dev->cap_flags);
+       }
+
+       /*
+        * Set the rssi offset.
+        */
+       rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rt2800_probe_hw);
 
 /*
  * IEEE80211 stack callback functions.
index 18a0b67b4c68daa23f6759a5ac407e04209bfc2c..a128ceadcb3e733620c37fa03e49666cf80b2e09 100644 (file)
@@ -43,6 +43,9 @@ struct rt2800_ops {
                            const unsigned int offset,
                            const struct rt2x00_field32 field, u32 *reg);
 
+       void (*read_eeprom)(struct rt2x00_dev *rt2x00dev);
+       bool (*hwcrypt_disabled)(struct rt2x00_dev *rt2x00dev);
+
        int (*drv_write_firmware)(struct rt2x00_dev *rt2x00dev,
                                  const u8 *data, const size_t len);
        int (*drv_init_registers)(struct rt2x00_dev *rt2x00dev);
@@ -114,6 +117,20 @@ static inline int rt2800_regbusy_read(struct rt2x00_dev *rt2x00dev,
        return rt2800ops->regbusy_read(rt2x00dev, offset, field, reg);
 }
 
+static inline void rt2800_read_eeprom(struct rt2x00_dev *rt2x00dev)
+{
+       const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv;
+
+       rt2800ops->read_eeprom(rt2x00dev);
+}
+
+static inline bool rt2800_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev)
+{
+       const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv;
+
+       return rt2800ops->hwcrypt_disabled(rt2x00dev);
+}
+
 static inline int rt2800_drv_write_firmware(struct rt2x00_dev *rt2x00dev,
                                            const u8 *data, const size_t len)
 {
@@ -191,9 +208,8 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev);
 
 int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev);
 void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev);
-int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev);
-int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev);
-int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev);
+
+int rt2800_probe_hw(struct rt2x00_dev *rt2x00dev);
 
 void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx, u32 *iv32,
                         u16 *iv16);
index 4765bbd654cdcfeea617c84f9c755db05409600d..27829e1e2e38964b2085dc84e30f618b3f72f97e 100644 (file)
@@ -54,6 +54,11 @@ static bool modparam_nohwcrypt = false;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 
+static bool rt2800pci_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev)
+{
+       return modparam_nohwcrypt;
+}
+
 static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
 {
        unsigned int i;
@@ -965,85 +970,14 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
 /*
  * Device probe functions.
  */
-static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
+static void rt2800pci_read_eeprom(struct rt2x00_dev *rt2x00dev)
 {
-       /*
-        * Read EEPROM into buffer
-        */
        if (rt2x00_is_soc(rt2x00dev))
                rt2800pci_read_eeprom_soc(rt2x00dev);
        else if (rt2800pci_efuse_detect(rt2x00dev))
                rt2800pci_read_eeprom_efuse(rt2x00dev);
        else
                rt2800pci_read_eeprom_pci(rt2x00dev);
-
-       return rt2800_validate_eeprom(rt2x00dev);
-}
-
-static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
-{
-       int retval;
-       u32 reg;
-
-       /*
-        * Allocate eeprom data.
-        */
-       retval = rt2800pci_validate_eeprom(rt2x00dev);
-       if (retval)
-               return retval;
-
-       retval = rt2800_init_eeprom(rt2x00dev);
-       if (retval)
-               return retval;
-
-       /*
-        * Enable rfkill polling by setting GPIO direction of the
-        * rfkill switch GPIO pin correctly.
-        */
-       rt2x00pci_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
-       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT2, 1);
-       rt2x00pci_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
-
-       /*
-        * Initialize hw specifications.
-        */
-       retval = rt2800_probe_hw_mode(rt2x00dev);
-       if (retval)
-               return retval;
-
-       /*
-        * This device has multiple filters for control frames
-        * and has a separate filter for PS Poll frames.
-        */
-       __set_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags);
-       __set_bit(CAPABILITY_CONTROL_FILTER_PSPOLL, &rt2x00dev->cap_flags);
-
-       /*
-        * This device has a pre tbtt interrupt and thus fetches
-        * a new beacon directly prior to transmission.
-        */
-       __set_bit(CAPABILITY_PRE_TBTT_INTERRUPT, &rt2x00dev->cap_flags);
-
-       /*
-        * This device requires firmware.
-        */
-       if (!rt2x00_is_soc(rt2x00dev))
-               __set_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags);
-       __set_bit(REQUIRE_DMA, &rt2x00dev->cap_flags);
-       __set_bit(REQUIRE_L2PAD, &rt2x00dev->cap_flags);
-       __set_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags);
-       __set_bit(REQUIRE_TASKLET_CONTEXT, &rt2x00dev->cap_flags);
-       if (!modparam_nohwcrypt)
-               __set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags);
-       __set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags);
-       __set_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags);
-
-       /*
-        * Set the rssi offset.
-        */
-       rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET;
-
-       return 0;
 }
 
 static const struct ieee80211_ops rt2800pci_mac80211_ops = {
@@ -1081,6 +1015,8 @@ static const struct rt2800_ops rt2800pci_rt2800_ops = {
        .register_multiread     = rt2x00pci_register_multiread,
        .register_multiwrite    = rt2x00pci_register_multiwrite,
        .regbusy_read           = rt2x00pci_regbusy_read,
+       .read_eeprom            = rt2800pci_read_eeprom,
+       .hwcrypt_disabled       = rt2800pci_hwcrypt_disabled,
        .drv_write_firmware     = rt2800pci_write_firmware,
        .drv_init_registers     = rt2800pci_init_registers,
        .drv_get_txwi           = rt2800pci_get_txwi,
@@ -1093,7 +1029,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
        .tbtt_tasklet           = rt2800pci_tbtt_tasklet,
        .rxdone_tasklet         = rt2800pci_rxdone_tasklet,
        .autowake_tasklet       = rt2800pci_autowake_tasklet,
-       .probe_hw               = rt2800pci_probe_hw,
+       .probe_hw               = rt2800_probe_hw,
        .get_firmware_name      = rt2800pci_get_firmware_name,
        .check_firmware         = rt2800_check_firmware,
        .load_firmware          = rt2800_load_firmware,
@@ -1152,7 +1088,6 @@ static const struct data_queue_desc rt2800pci_queue_bcn = {
 static const struct rt2x00_ops rt2800pci_ops = {
        .name                   = KBUILD_MODNAME,
        .drv_data_size          = sizeof(struct rt2800_drv_data),
-       .max_sta_intf           = 1,
        .max_ap_intf            = 8,
        .eeprom_size            = EEPROM_SIZE,
        .rf_size                = RF_SIZE,
index 6b4226b716187ea037d2a1c84e012806649e8816..c9e9370eb789c04ec91fd97a2a85d8d21977a144 100644 (file)
@@ -49,6 +49,11 @@ static bool modparam_nohwcrypt;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 
+static bool rt2800usb_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev)
+{
+       return modparam_nohwcrypt;
+}
+
 /*
  * Queue handlers.
  */
@@ -730,73 +735,27 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
 /*
  * Device probe functions.
  */
-static int rt2800usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
+static void rt2800usb_read_eeprom(struct rt2x00_dev *rt2x00dev)
 {
        if (rt2800_efuse_detect(rt2x00dev))
                rt2800_read_eeprom_efuse(rt2x00dev);
        else
                rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom,
                                      EEPROM_SIZE);
-
-       return rt2800_validate_eeprom(rt2x00dev);
 }
 
 static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
 {
        int retval;
-       u32 reg;
 
-       /*
-        * Allocate eeprom data.
-        */
-       retval = rt2800usb_validate_eeprom(rt2x00dev);
+       retval = rt2800_probe_hw(rt2x00dev);
        if (retval)
                return retval;
 
-       retval = rt2800_init_eeprom(rt2x00dev);
-       if (retval)
-               return retval;
-
-       /*
-        * Enable rfkill polling by setting GPIO direction of the
-        * rfkill switch GPIO pin correctly.
-        */
-       rt2x00usb_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
-       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT2, 1);
-       rt2x00usb_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
-
-       /*
-        * Initialize hw specifications.
-        */
-       retval = rt2800_probe_hw_mode(rt2x00dev);
-       if (retval)
-               return retval;
-
-       /*
-        * This device has multiple filters for control frames
-        * and has a separate filter for PS Poll frames.
-        */
-       __set_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags);
-       __set_bit(CAPABILITY_CONTROL_FILTER_PSPOLL, &rt2x00dev->cap_flags);
-
-       /*
-        * This device requires firmware.
-        */
-       __set_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags);
-       __set_bit(REQUIRE_L2PAD, &rt2x00dev->cap_flags);
-       if (!modparam_nohwcrypt)
-               __set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags);
-       __set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags);
-       __set_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags);
-       __set_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags);
-       __set_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags);
-
-       rt2x00dev->txstatus_timer.function = rt2800usb_tx_sta_fifo_timeout,
-
        /*
-        * Set the rssi offset.
+        * Set txstatus timer function.
         */
-       rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET;
+       rt2x00dev->txstatus_timer.function = rt2800usb_tx_sta_fifo_timeout;
 
        /*
         * Overwrite TX done handler
@@ -842,6 +801,8 @@ static const struct rt2800_ops rt2800usb_rt2800_ops = {
        .register_multiread     = rt2x00usb_register_multiread,
        .register_multiwrite    = rt2x00usb_register_multiwrite,
        .regbusy_read           = rt2x00usb_regbusy_read,
+       .read_eeprom            = rt2800usb_read_eeprom,
+       .hwcrypt_disabled       = rt2800usb_hwcrypt_disabled,
        .drv_write_firmware     = rt2800usb_write_firmware,
        .drv_init_registers     = rt2800usb_init_registers,
        .drv_get_txwi           = rt2800usb_get_txwi,
@@ -909,7 +870,6 @@ static const struct data_queue_desc rt2800usb_queue_bcn = {
 static const struct rt2x00_ops rt2800usb_ops = {
        .name                   = KBUILD_MODNAME,
        .drv_data_size          = sizeof(struct rt2800_drv_data),
-       .max_sta_intf           = 1,
        .max_ap_intf            = 8,
        .eeprom_size            = EEPROM_SIZE,
        .rf_size                = RF_SIZE,
index 8afb546c2b2d3b1741be167969e7c04dcefb0239..0751b35ef6dcd536ba51c3554dc4b58e359a9628 100644 (file)
@@ -188,6 +188,7 @@ struct rt2x00_chip {
 #define RT3071         0x3071
 #define RT3090         0x3090  /* 2.4GHz PCIe */
 #define RT3290         0x3290
+#define RT3352         0x3352  /* WSOC */
 #define RT3390         0x3390
 #define RT3572         0x3572
 #define RT3593         0x3593
@@ -655,7 +656,6 @@ struct rt2x00lib_ops {
 struct rt2x00_ops {
        const char *name;
        const unsigned int drv_data_size;
-       const unsigned int max_sta_intf;
        const unsigned int max_ap_intf;
        const unsigned int eeprom_size;
        const unsigned int rf_size;
@@ -740,6 +740,14 @@ enum rt2x00_capability_flags {
        CAPABILITY_VCO_RECALIBRATION,
 };
 
+/*
+ * Interface combinations
+ */
+enum {
+       IF_COMB_AP = 0,
+       NUM_IF_COMB,
+};
+
 /*
  * rt2x00 device structure.
  */
@@ -866,6 +874,12 @@ struct rt2x00_dev {
        unsigned int intf_associated;
        unsigned int intf_beaconing;
 
+       /*
+        * Interface combinations
+        */
+       struct ieee80211_iface_limit if_limits_ap;
+       struct ieee80211_iface_combination if_combinations[NUM_IF_COMB];
+
        /*
         * Link quality
         */
@@ -1287,7 +1301,9 @@ void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp);
 /*
  * mac80211 handlers.
  */
-void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+void rt2x00mac_tx(struct ieee80211_hw *hw,
+                 struct ieee80211_tx_control *control,
+                 struct sk_buff *skb);
 int rt2x00mac_start(struct ieee80211_hw *hw);
 void rt2x00mac_stop(struct ieee80211_hw *hw);
 int rt2x00mac_add_interface(struct ieee80211_hw *hw,
index 3f07e36f462b384565884580170faf7c3be2f25f..69097d1faeb676d97ddd27c7ba7dc3f575cd2f6f 100644 (file)
@@ -194,7 +194,7 @@ static void rt2x00lib_bc_buffer_iter(void *data, u8 *mac,
         */
        skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
        while (skb) {
-               rt2x00mac_tx(rt2x00dev->hw, skb);
+               rt2x00mac_tx(rt2x00dev->hw, NULL, skb);
                skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
        }
 }
@@ -1118,6 +1118,34 @@ void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev)
        rt2x00dev->intf_associated = 0;
 }
 
+static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev)
+{
+       struct ieee80211_iface_limit *if_limit;
+       struct ieee80211_iface_combination *if_combination;
+
+       /*
+        * Build up AP interface limits structure.
+        */
+       if_limit = &rt2x00dev->if_limits_ap;
+       if_limit->max = rt2x00dev->ops->max_ap_intf;
+       if_limit->types = BIT(NL80211_IFTYPE_AP);
+
+       /*
+        * Build up AP interface combinations structure.
+        */
+       if_combination = &rt2x00dev->if_combinations[IF_COMB_AP];
+       if_combination->limits = if_limit;
+       if_combination->n_limits = 1;
+       if_combination->max_interfaces = if_limit->max;
+       if_combination->num_different_channels = 1;
+
+       /*
+        * Finally, specify the possible combinations to mac80211.
+        */
+       rt2x00dev->hw->wiphy->iface_combinations = rt2x00dev->if_combinations;
+       rt2x00dev->hw->wiphy->n_iface_combinations = 1;
+}
+
 /*
  * driver allocation handlers.
  */
@@ -1125,6 +1153,11 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
 {
        int retval = -ENOMEM;
 
+       /*
+        * Set possible interface combinations.
+        */
+       rt2x00lib_set_if_combinations(rt2x00dev);
+
        /*
         * Allocate the driver data memory, if necessary.
         */
index 4ff26c2159bf4b25178fbb66a0cd9794651ac185..98a9e48f8e4a38e852c54e8dbdef0c2369a753a4 100644 (file)
@@ -99,7 +99,9 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
        return retval;
 }
 
-void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void rt2x00mac_tx(struct ieee80211_hw *hw,
+                 struct ieee80211_tx_control *control,
+                 struct sk_buff *skb)
 {
        struct rt2x00_dev *rt2x00dev = hw->priv;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -212,46 +214,6 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
            !test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
                return -ENODEV;
 
-       switch (vif->type) {
-       case NL80211_IFTYPE_AP:
-               /*
-                * We don't support mixed combinations of
-                * sta and ap interfaces.
-                */
-               if (rt2x00dev->intf_sta_count)
-                       return -ENOBUFS;
-
-               /*
-                * Check if we exceeded the maximum amount
-                * of supported interfaces.
-                */
-               if (rt2x00dev->intf_ap_count >= rt2x00dev->ops->max_ap_intf)
-                       return -ENOBUFS;
-
-               break;
-       case NL80211_IFTYPE_STATION:
-       case NL80211_IFTYPE_ADHOC:
-       case NL80211_IFTYPE_MESH_POINT:
-       case NL80211_IFTYPE_WDS:
-               /*
-                * We don't support mixed combinations of
-                * sta and ap interfaces.
-                */
-               if (rt2x00dev->intf_ap_count)
-                       return -ENOBUFS;
-
-               /*
-                * Check if we exceeded the maximum amount
-                * of supported interfaces.
-                */
-               if (rt2x00dev->intf_sta_count >= rt2x00dev->ops->max_sta_intf)
-                       return -ENOBUFS;
-
-               break;
-       default:
-               return -EINVAL;
-       }
-
        /*
         * Loop through all beacon queues to find a free
         * entry. Since there are as much beacon entries
index f7e74a0a775911abab23dab3b0a512fb0524d0a4..e488b944a0340834ed96c02c91df59e9b3f5e142 100644 (file)
@@ -315,6 +315,7 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
 static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
                                                struct sk_buff *skb,
                                                struct txentry_desc *txdesc,
+                                               struct ieee80211_sta *sta,
                                                const struct rt2x00_rate *hwrate)
 {
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -322,11 +323,11 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct rt2x00_sta *sta_priv = NULL;
 
-       if (tx_info->control.sta) {
+       if (sta) {
                txdesc->u.ht.mpdu_density =
-                   tx_info->control.sta->ht_cap.ampdu_density;
+                   sta->ht_cap.ampdu_density;
 
-               sta_priv = sta_to_rt2x00_sta(tx_info->control.sta);
+               sta_priv = sta_to_rt2x00_sta(sta);
                txdesc->u.ht.wcid = sta_priv->wcid;
        }
 
@@ -341,8 +342,8 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
                 * MIMO PS should be set to 1 for STA's using dynamic SM PS
                 * when using more then one tx stream (>MCS7).
                 */
-               if (tx_info->control.sta && txdesc->u.ht.mcs > 7 &&
-                   ((tx_info->control.sta->ht_cap.cap &
+               if (sta && txdesc->u.ht.mcs > 7 &&
+                   ((sta->ht_cap.cap &
                      IEEE80211_HT_CAP_SM_PS) >>
                     IEEE80211_HT_CAP_SM_PS_SHIFT) ==
                    WLAN_HT_CAP_SM_PS_DYNAMIC)
@@ -409,7 +410,8 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
 
 static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
                                             struct sk_buff *skb,
-                                            struct txentry_desc *txdesc)
+                                            struct txentry_desc *txdesc,
+                                            struct ieee80211_sta *sta)
 {
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -503,7 +505,7 @@ static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
 
        if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
                rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
-                                                   hwrate);
+                                                  sta, hwrate);
        else
                rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc,
                                                      hwrate);
@@ -595,7 +597,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
         * after that we are free to use the skb->cb array
         * for our information.
         */
-       rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc);
+       rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, NULL);
 
        /*
         * All information is retrieved from the skb->cb array,
@@ -740,7 +742,7 @@ int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
         * after that we are free to use the skb->cb array
         * for our information.
         */
-       rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc);
+       rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL);
 
        /*
         * Fill in skb descriptor
index b8ec96163922a11711a3d6800b9556052c1386fc..d6582a2fa3534879614a23dd29435e747a0cca0c 100644 (file)
@@ -243,7 +243,7 @@ static int rt61pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
        u32 reg;
 
        rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg);
-       return rt2x00_get_field32(reg, MAC_CSR13_BIT5);
+       return rt2x00_get_field32(reg, MAC_CSR13_VAL5);
 }
 
 #ifdef CONFIG_RT2X00_LIB_LEDS
@@ -715,11 +715,11 @@ static void rt61pci_config_antenna_2529_rx(struct rt2x00_dev *rt2x00dev,
 
        rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg);
 
-       rt2x00_set_field32(&reg, MAC_CSR13_BIT4, p1);
-       rt2x00_set_field32(&reg, MAC_CSR13_BIT12, 0);
+       rt2x00_set_field32(&reg, MAC_CSR13_DIR4, 0);
+       rt2x00_set_field32(&reg, MAC_CSR13_VAL4, p1);
 
-       rt2x00_set_field32(&reg, MAC_CSR13_BIT3, !p2);
-       rt2x00_set_field32(&reg, MAC_CSR13_BIT11, 0);
+       rt2x00_set_field32(&reg, MAC_CSR13_DIR3, 0);
+       rt2x00_set_field32(&reg, MAC_CSR13_VAL3, !p2);
 
        rt2x00pci_register_write(rt2x00dev, MAC_CSR13, reg);
 }
@@ -2855,7 +2855,7 @@ static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
         * rfkill switch GPIO pin correctly.
         */
        rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg);
-       rt2x00_set_field32(&reg, MAC_CSR13_BIT13, 1);
+       rt2x00_set_field32(&reg, MAC_CSR13_DIR5, 1);
        rt2x00pci_register_write(rt2x00dev, MAC_CSR13, reg);
 
        /*
@@ -3045,7 +3045,6 @@ static const struct data_queue_desc rt61pci_queue_bcn = {
 
 static const struct rt2x00_ops rt61pci_ops = {
        .name                   = KBUILD_MODNAME,
-       .max_sta_intf           = 1,
        .max_ap_intf            = 4,
        .eeprom_size            = EEPROM_SIZE,
        .rf_size                = RF_SIZE,
index 8f3da5a56766f4c3293825c2d649078f4cb5455f..9bc6b6044e34189e6a069a4168a7eaec53565a00 100644 (file)
@@ -357,22 +357,22 @@ struct hw_pairwise_ta_entry {
 
 /*
  * MAC_CSR13: GPIO.
+ *     MAC_CSR13_VALx: GPIO value
+ *     MAC_CSR13_DIRx: GPIO direction: 0 = output; 1 = input
  */
 #define MAC_CSR13                      0x3034
-#define MAC_CSR13_BIT0                 FIELD32(0x00000001)
-#define MAC_CSR13_BIT1                 FIELD32(0x00000002)
-#define MAC_CSR13_BIT2                 FIELD32(0x00000004)
-#define MAC_CSR13_BIT3                 FIELD32(0x00000008)
-#define MAC_CSR13_BIT4                 FIELD32(0x00000010)
-#define MAC_CSR13_BIT5                 FIELD32(0x00000020)
-#define MAC_CSR13_BIT6                 FIELD32(0x00000040)
-#define MAC_CSR13_BIT7                 FIELD32(0x00000080)
-#define MAC_CSR13_BIT8                 FIELD32(0x00000100)
-#define MAC_CSR13_BIT9                 FIELD32(0x00000200)
-#define MAC_CSR13_BIT10                        FIELD32(0x00000400)
-#define MAC_CSR13_BIT11                        FIELD32(0x00000800)
-#define MAC_CSR13_BIT12                        FIELD32(0x00001000)
-#define MAC_CSR13_BIT13                        FIELD32(0x00002000)
+#define MAC_CSR13_VAL0                 FIELD32(0x00000001)
+#define MAC_CSR13_VAL1                 FIELD32(0x00000002)
+#define MAC_CSR13_VAL2                 FIELD32(0x00000004)
+#define MAC_CSR13_VAL3                 FIELD32(0x00000008)
+#define MAC_CSR13_VAL4                 FIELD32(0x00000010)
+#define MAC_CSR13_VAL5                 FIELD32(0x00000020)
+#define MAC_CSR13_DIR0                 FIELD32(0x00000100)
+#define MAC_CSR13_DIR1                 FIELD32(0x00000200)
+#define MAC_CSR13_DIR2                 FIELD32(0x00000400)
+#define MAC_CSR13_DIR3                 FIELD32(0x00000800)
+#define MAC_CSR13_DIR4                 FIELD32(0x00001000)
+#define MAC_CSR13_DIR5                 FIELD32(0x00002000)
 
 /*
  * MAC_CSR14: LED control register.
index 248436c13ce04ae1f79312c6cbb1e16d8a4b5fc9..e5eb43b3eee74f94db85349436f0ba1e15925059 100644 (file)
@@ -189,7 +189,7 @@ static int rt73usb_rfkill_poll(struct rt2x00_dev *rt2x00dev)
        u32 reg;
 
        rt2x00usb_register_read(rt2x00dev, MAC_CSR13, &reg);
-       return rt2x00_get_field32(reg, MAC_CSR13_BIT7);
+       return rt2x00_get_field32(reg, MAC_CSR13_VAL7);
 }
 
 #ifdef CONFIG_RT2X00_LIB_LEDS
@@ -2195,7 +2195,7 @@ static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
         * rfkill switch GPIO pin correctly.
         */
        rt2x00usb_register_read(rt2x00dev, MAC_CSR13, &reg);
-       rt2x00_set_field32(&reg, MAC_CSR13_BIT15, 0);
+       rt2x00_set_field32(&reg, MAC_CSR13_DIR7, 0);
        rt2x00usb_register_write(rt2x00dev, MAC_CSR13, reg);
 
        /*
@@ -2382,7 +2382,6 @@ static const struct data_queue_desc rt73usb_queue_bcn = {
 
 static const struct rt2x00_ops rt73usb_ops = {
        .name                   = KBUILD_MODNAME,
-       .max_sta_intf           = 1,
        .max_ap_intf            = 4,
        .eeprom_size            = EEPROM_SIZE,
        .rf_size                = RF_SIZE,
index df1cc116b83be891ee2ff20702260f5949d3d983..7577e0ba3877363cb63480b42c4e118f599de7ef 100644 (file)
@@ -267,24 +267,26 @@ struct hw_pairwise_ta_entry {
 
 /*
  * MAC_CSR13: GPIO.
+ *     MAC_CSR13_VALx: GPIO value
+ *     MAC_CSR13_DIRx: GPIO direction: 0 = input; 1 = output
  */
 #define MAC_CSR13                      0x3034
-#define MAC_CSR13_BIT0                 FIELD32(0x00000001)
-#define MAC_CSR13_BIT1                 FIELD32(0x00000002)
-#define MAC_CSR13_BIT2                 FIELD32(0x00000004)
-#define MAC_CSR13_BIT3                 FIELD32(0x00000008)
-#define MAC_CSR13_BIT4                 FIELD32(0x00000010)
-#define MAC_CSR13_BIT5                 FIELD32(0x00000020)
-#define MAC_CSR13_BIT6                 FIELD32(0x00000040)
-#define MAC_CSR13_BIT7                 FIELD32(0x00000080)
-#define MAC_CSR13_BIT8                 FIELD32(0x00000100)
-#define MAC_CSR13_BIT9                 FIELD32(0x00000200)
-#define MAC_CSR13_BIT10                        FIELD32(0x00000400)
-#define MAC_CSR13_BIT11                        FIELD32(0x00000800)
-#define MAC_CSR13_BIT12                        FIELD32(0x00001000)
-#define MAC_CSR13_BIT13                        FIELD32(0x00002000)
-#define MAC_CSR13_BIT14                        FIELD32(0x00004000)
-#define MAC_CSR13_BIT15                        FIELD32(0x00008000)
+#define MAC_CSR13_VAL0                 FIELD32(0x00000001)
+#define MAC_CSR13_VAL1                 FIELD32(0x00000002)
+#define MAC_CSR13_VAL2                 FIELD32(0x00000004)
+#define MAC_CSR13_VAL3                 FIELD32(0x00000008)
+#define MAC_CSR13_VAL4                 FIELD32(0x00000010)
+#define MAC_CSR13_VAL5                 FIELD32(0x00000020)
+#define MAC_CSR13_VAL6                 FIELD32(0x00000040)
+#define MAC_CSR13_VAL7                 FIELD32(0x00000080)
+#define MAC_CSR13_DIR0                 FIELD32(0x00000100)
+#define MAC_CSR13_DIR1                 FIELD32(0x00000200)
+#define MAC_CSR13_DIR2                 FIELD32(0x00000400)
+#define MAC_CSR13_DIR3                 FIELD32(0x00000800)
+#define MAC_CSR13_DIR4                 FIELD32(0x00001000)
+#define MAC_CSR13_DIR5                 FIELD32(0x00002000)
+#define MAC_CSR13_DIR6                 FIELD32(0x00004000)
+#define MAC_CSR13_DIR7                 FIELD32(0x00008000)
 
 /*
  * MAC_CSR14: LED control register.
index aceaf689f73704d5eba60e2d17478ee620999392..021d83e1b1d3367d0ff19954324c4306dc6b8f42 100644 (file)
@@ -244,7 +244,9 @@ static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static void rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void rtl8180_tx(struct ieee80211_hw *dev,
+                      struct ieee80211_tx_control *control,
+                      struct sk_buff *skb)
 {
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -710,7 +712,7 @@ static void rtl8180_beacon_work(struct work_struct *work)
        /* TODO: use actual beacon queue */
        skb_set_queue_mapping(skb, 0);
 
-       rtl8180_tx(dev, skb);
+       rtl8180_tx(dev, NULL, skb);
 
 resched:
        /*
index 533024095c43ad48871868d8522c6953399eb86a..7811b6315973cd466e19d2fd2c139532daf04610 100644 (file)
@@ -228,7 +228,9 @@ static void rtl8187_tx_cb(struct urb *urb)
        }
 }
 
-static void rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void rtl8187_tx(struct ieee80211_hw *dev,
+                      struct ieee80211_tx_control *control,
+                      struct sk_buff *skb)
 {
        struct rtl8187_priv *priv = dev->priv;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1076,7 +1078,7 @@ static void rtl8187_beacon_work(struct work_struct *work)
        /* TODO: use actual beacon queue */
        skb_set_queue_mapping(skb, 0);
 
-       rtl8187_tx(dev, skb);
+       rtl8187_tx(dev, NULL, skb);
 
 resched:
        /*
index cefac6a43601e17ca6ca9ef6db36589e7d055daf..6b28e92d1d215c0f598354326b9c1e9780376268 100644 (file)
@@ -1,6 +1,6 @@
 config RTL8192CE
        tristate "Realtek RTL8192CE/RTL8188CE Wireless Network Adapter"
-       depends on MAC80211 && PCI && EXPERIMENTAL
+       depends on MAC80211 && PCI
        select FW_LOADER
        select RTLWIFI
        select RTL8192C_COMMON
@@ -12,7 +12,7 @@ config RTL8192CE
 
 config RTL8192SE
        tristate "Realtek RTL8192SE/RTL8191SE PCIe Wireless Network Adapter"
-       depends on MAC80211 && EXPERIMENTAL && PCI
+       depends on MAC80211 && PCI
        select FW_LOADER
        select RTLWIFI
        ---help---
@@ -23,7 +23,7 @@ config RTL8192SE
 
 config RTL8192DE
        tristate "Realtek RTL8192DE/RTL8188DE PCIe Wireless Network Adapter"
-       depends on MAC80211 && EXPERIMENTAL && PCI
+       depends on MAC80211 && PCI
        select FW_LOADER
        select RTLWIFI
        ---help---
@@ -34,7 +34,7 @@ config RTL8192DE
 
 config RTL8192CU
        tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter"
-       depends on MAC80211 && USB && EXPERIMENTAL
+       depends on MAC80211 && USB
        select FW_LOADER
        select RTLWIFI
        select RTL8192C_COMMON
index 942e56b77b6030856ee6c6c19055512c8c3552de..59381fe8ed064064dcddaf697ba26aa345b2c0de 100644 (file)
@@ -1341,9 +1341,8 @@ int rtl_send_smps_action(struct ieee80211_hw *hw,
                rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
 
                info->control.rates[0].idx = 0;
-               info->control.sta = sta;
                info->band = hw->conf.channel->band;
-               rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
+               rtlpriv->intf_ops->adapter_tx(hw, sta, skb, &tcb_desc);
        }
 err_free:
        return 0;
index a18ad2a989381bc363aa782b514c913f1f8c5b1a..a7c0e52869ba3c708cf39685c59489122353b4a0 100644 (file)
@@ -124,7 +124,9 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
        mutex_unlock(&rtlpriv->locks.conf_mutex);
 }
 
-static void rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void rtl_op_tx(struct ieee80211_hw *hw,
+                     struct ieee80211_tx_control *control,
+                     struct sk_buff *skb)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -138,8 +140,8 @@ static void rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
                goto err_free;
 
-       if (!rtlpriv->intf_ops->waitq_insert(hw, skb))
-               rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
+       if (!rtlpriv->intf_ops->waitq_insert(hw, control->sta, skb))
+               rtlpriv->intf_ops->adapter_tx(hw, control->sta, skb, &tcb_desc);
 
        return;
 
index 5983631a1b1a750c631a688f896e17043eb14fd1..abc306b502ac0348b97dd40eae26c01ebf6947d9 100644 (file)
@@ -502,7 +502,7 @@ static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
                                _rtl_update_earlymode_info(hw, skb,
                                                           &tcb_desc, tid);
 
-                       rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
+                       rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
                }
        }
 }
@@ -927,7 +927,7 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
        info = IEEE80211_SKB_CB(pskb);
        pdesc = &ring->desc[0];
        rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
-               info, pskb, BEACON_QUEUE, &tcb_desc);
+               info, NULL, pskb, BEACON_QUEUE, &tcb_desc);
 
        __skb_queue_tail(&ring->queue, pskb);
 
@@ -1303,11 +1303,10 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
 }
 
 static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
+                                       struct ieee80211_sta *sta,
                                        struct sk_buff *skb)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_sta *sta = info->control.sta;
        struct rtl_sta_info *sta_entry = NULL;
        u8 tid = rtl_get_tid(skb);
 
@@ -1335,13 +1334,14 @@ static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
        return true;
 }
 
-static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
-               struct rtl_tcb_desc *ptcb_desc)
+static int rtl_pci_tx(struct ieee80211_hw *hw,
+                     struct ieee80211_sta *sta,
+                     struct sk_buff *skb,
+                     struct rtl_tcb_desc *ptcb_desc)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_sta_info *sta_entry = NULL;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_sta *sta = info->control.sta;
        struct rtl8192_tx_ring *ring;
        struct rtl_tx_desc *pdesc;
        u8 idx;
@@ -1416,7 +1416,7 @@ static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
                rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
 
        rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc,
-                       info, skb, hw_queue, ptcb_desc);
+                       info, sta, skb, hw_queue, ptcb_desc);
 
        __skb_queue_tail(&ring->queue, skb);
 
index a45afda8259c1fbf4f550b08d9af6904932e3675..1ca4e25c143b83026c43fcccc0ddc6723f900605 100644 (file)
@@ -167,7 +167,7 @@ static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
        dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
        dm_digtable->cur_igvalue = 0x20;
        dm_digtable->pre_igvalue = 0x0;
-       dm_digtable->cursta_connectctate = DIG_STA_DISCONNECT;
+       dm_digtable->cursta_connectstate = DIG_STA_DISCONNECT;
        dm_digtable->presta_connectstate = DIG_STA_DISCONNECT;
        dm_digtable->curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
        dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
@@ -190,7 +190,7 @@ static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
        long rssi_val_min = 0;
 
        if ((dm_digtable->curmultista_connectstate == DIG_MULTISTA_CONNECT) &&
-           (dm_digtable->cursta_connectctate == DIG_STA_CONNECT)) {
+           (dm_digtable->cursta_connectstate == DIG_STA_CONNECT)) {
                if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0)
                        rssi_val_min =
                            (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb >
@@ -199,8 +199,8 @@ static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
                            rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
                else
                        rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
-       } else if (dm_digtable->cursta_connectctate == DIG_STA_CONNECT ||
-                  dm_digtable->cursta_connectctate == DIG_STA_BEFORE_CONNECT) {
+       } else if (dm_digtable->cursta_connectstate == DIG_STA_CONNECT ||
+                  dm_digtable->cursta_connectstate == DIG_STA_BEFORE_CONNECT) {
                rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
        } else if (dm_digtable->curmultista_connectstate ==
                   DIG_MULTISTA_CONNECT) {
@@ -334,7 +334,7 @@ static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
                multi_sta = true;
 
        if (!multi_sta ||
-           dm_digtable->cursta_connectctate != DIG_STA_DISCONNECT) {
+           dm_digtable->cursta_connectstate != DIG_STA_DISCONNECT) {
                initialized = false;
                dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
                return;
@@ -378,15 +378,15 @@ static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
        struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
 
        RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
-                "presta_connectstate = %x, cursta_connectctate = %x\n",
+                "presta_connectstate = %x, cursta_connectstate = %x\n",
                 dm_digtable->presta_connectstate,
-                dm_digtable->cursta_connectctate);
+                dm_digtable->cursta_connectstate);
 
-       if (dm_digtable->presta_connectstate == dm_digtable->cursta_connectctate
-           || dm_digtable->cursta_connectctate == DIG_STA_BEFORE_CONNECT
-           || dm_digtable->cursta_connectctate == DIG_STA_CONNECT) {
+       if (dm_digtable->presta_connectstate == dm_digtable->cursta_connectstate
+           || dm_digtable->cursta_connectstate == DIG_STA_BEFORE_CONNECT
+           || dm_digtable->cursta_connectstate == DIG_STA_CONNECT) {
 
-               if (dm_digtable->cursta_connectctate != DIG_STA_DISCONNECT) {
+               if (dm_digtable->cursta_connectstate != DIG_STA_DISCONNECT) {
                        dm_digtable->rssi_val_min =
                            rtl92c_dm_initial_gain_min_pwdb(hw);
                        rtl92c_dm_ctrl_initgain_by_rssi(hw);
@@ -407,7 +407,7 @@ static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
        struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
 
-       if (dm_digtable->cursta_connectctate == DIG_STA_CONNECT) {
+       if (dm_digtable->cursta_connectstate == DIG_STA_CONNECT) {
                dm_digtable->rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw);
 
                if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
@@ -484,15 +484,15 @@ static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
                return;
 
        if (mac->link_state >= MAC80211_LINKED)
-               dm_digtable->cursta_connectctate = DIG_STA_CONNECT;
+               dm_digtable->cursta_connectstate = DIG_STA_CONNECT;
        else
-               dm_digtable->cursta_connectctate = DIG_STA_DISCONNECT;
+               dm_digtable->cursta_connectstate = DIG_STA_DISCONNECT;
 
        rtl92c_dm_initial_gain_sta(hw);
        rtl92c_dm_initial_gain_multi_sta(hw);
        rtl92c_dm_cck_packet_detection_thresh(hw);
 
-       dm_digtable->presta_connectstate = dm_digtable->cursta_connectctate;
+       dm_digtable->presta_connectstate = dm_digtable->cursta_connectstate;
 
 }
 
@@ -1214,18 +1214,13 @@ static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
                                 "PreState = %d, CurState = %d\n",
                                 p_ra->pre_ratr_state, p_ra->ratr_state);
 
-                       /* Only the PCI card uses sta in the update rate table
-                        * callback routine */
-                       if (rtlhal->interface == INTF_PCI) {
-                               rcu_read_lock();
-                               sta = ieee80211_find_sta(mac->vif, mac->bssid);
-                       }
+                       rcu_read_lock();
+                       sta = ieee80211_find_sta(mac->vif, mac->bssid);
                        rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
                                        p_ra->ratr_state);
 
                        p_ra->pre_ratr_state = p_ra->ratr_state;
-                       if (rtlhal->interface == INTF_PCI)
-                               rcu_read_unlock();
+                       rcu_read_unlock();
                }
        }
 }
index 8a7b864faca30cc7bfe7cd7462b959be285a609e..883f23ae95194f66fc416853a2a027a4a8a6f9a1 100644 (file)
@@ -577,8 +577,7 @@ static bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw,
        ring = &rtlpci->tx_ring[BEACON_QUEUE];
 
        pskb = __skb_dequeue(&ring->queue);
-       if (pskb)
-               kfree_skb(pskb);
+       kfree_skb(pskb);
 
        spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
 
index dd4bb0950a575ddd37c31489f10d2bacdaaab940..86d73b32d9956c81f9b43886805bc23cb4adaa10 100644 (file)
@@ -1914,8 +1914,8 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
        }
        RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
                 "ratr_bitmap :%x\n", ratr_bitmap);
-       *(u32 *)&rate_mask = EF4BYTE((ratr_bitmap & 0x0fffffff) |
-                                    (ratr_index << 28));
+       *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
+                                    (ratr_index << 28);
        rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
        RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
                 "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x\n",
index 7d8f96405f42068bafa9c08d7ec6a382f2e2c7f0..ea2e1bd847c847d83b000f808b7cc1d60c99b7d6 100644 (file)
@@ -344,7 +344,7 @@ static struct rtl_hal_cfg rtl92ce_hal_cfg = {
        .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15,
 };
 
-DEFINE_PCI_DEVICE_TABLE(rtl92ce_pci_ids) = {
+static DEFINE_PCI_DEVICE_TABLE(rtl92ce_pci_ids) = {
        {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8191, rtl92ce_hal_cfg)},
        {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8178, rtl92ce_hal_cfg)},
        {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8177, rtl92ce_hal_cfg)},
index 52166640f1679897480bb02ebebf33251002e051..390d6d4fcaa027654e82ceea0892933751383be6 100644 (file)
@@ -596,7 +596,9 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
 
 void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
                          struct ieee80211_hdr *hdr, u8 *pdesc_tx,
-                         struct ieee80211_tx_info *info, struct sk_buff *skb,
+                         struct ieee80211_tx_info *info,
+                         struct ieee80211_sta *sta,
+                         struct sk_buff *skb,
                          u8 hw_queue, struct rtl_tcb_desc *tcb_desc)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -604,7 +606,6 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
        bool defaultadapter = true;
-       struct ieee80211_sta *sta;
        u8 *pdesc = pdesc_tx;
        u16 seq_number;
        __le16 fc = hdr->frame_control;
index c4adb97773659b5353b8d1d724e6f3cfa3ff0e8f..a7cdd514cb2e2bfd6a86aa629a997a3d1c5f5db8 100644 (file)
@@ -713,6 +713,7 @@ struct rx_desc_92c {
 void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
                          struct ieee80211_hdr *hdr,
                          u8 *pdesc, struct ieee80211_tx_info *info,
+                         struct ieee80211_sta *sta,
                          struct sk_buff *skb, u8 hw_queue,
                          struct rtl_tcb_desc *ptcb_desc);
 bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
index 2e6eb356a93ed3151ece64055875d1e292d9e372..6e66f04c363fb43ad25557e5149cab6a78a6d933 100644 (file)
@@ -491,12 +491,14 @@ static void _rtl_tx_desc_checksum(u8 *txdesc)
        SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0);
        for (index = 0; index < 16; index++)
                checksum = checksum ^ (*(ptr + index));
-       SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, cpu_to_le16(checksum));
+       SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, checksum);
 }
 
 void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
                          struct ieee80211_hdr *hdr, u8 *pdesc_tx,
-                         struct ieee80211_tx_info *info, struct sk_buff *skb,
+                         struct ieee80211_tx_info *info,
+                         struct ieee80211_sta *sta,
+                         struct sk_buff *skb,
                          u8 queue_index,
                          struct rtl_tcb_desc *tcb_desc)
 {
@@ -504,7 +506,6 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
        bool defaultadapter = true;
-       struct ieee80211_sta *sta = info->control.sta = info->control.sta;
        u8 *qc = ieee80211_get_qos_ctl(hdr);
        u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
        u16 seq_number;
index 332b06e78b00d8e9fb849e03709fcb0b412849b0..725c53accc5839bbd30af97e753ab6b8d040ee48 100644 (file)
@@ -420,7 +420,9 @@ struct sk_buff *rtl8192c_tx_aggregate_hdl(struct ieee80211_hw *,
                                           struct sk_buff_head *);
 void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
                          struct ieee80211_hdr *hdr, u8 *pdesc_tx,
-                         struct ieee80211_tx_info *info, struct sk_buff *skb,
+                         struct ieee80211_tx_info *info,
+                         struct ieee80211_sta *sta,
+                         struct sk_buff *skb,
                          u8 queue_index,
                          struct rtl_tcb_desc *tcb_desc);
 void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc,
index c0201ed69dd75737b2be2e3c65717bdec90232ae..ed868c396c257d5a5b4a22affcc4c12e401b0d0c 100644 (file)
@@ -164,7 +164,7 @@ static void rtl92d_dm_diginit(struct ieee80211_hw *hw)
        de_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
        de_digtable->cur_igvalue = 0x20;
        de_digtable->pre_igvalue = 0x0;
-       de_digtable->cursta_connectctate = DIG_STA_DISCONNECT;
+       de_digtable->cursta_connectstate = DIG_STA_DISCONNECT;
        de_digtable->presta_connectstate = DIG_STA_DISCONNECT;
        de_digtable->curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
        de_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
@@ -310,7 +310,7 @@ static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
        struct dig_t *de_digtable = &rtlpriv->dm_digtable;
        unsigned long flag = 0;
 
-       if (de_digtable->cursta_connectctate == DIG_STA_CONNECT) {
+       if (de_digtable->cursta_connectstate == DIG_STA_CONNECT) {
                if (de_digtable->pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
                        if (de_digtable->min_undecorated_pwdb_for_dm <= 25)
                                de_digtable->cur_cck_pd_state =
@@ -342,7 +342,7 @@ static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
                de_digtable->pre_cck_pd_state = de_digtable->cur_cck_pd_state;
        }
        RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n",
-                de_digtable->cursta_connectctate == DIG_STA_CONNECT ?
+                de_digtable->cursta_connectstate == DIG_STA_CONNECT ?
                 "DIG_STA_CONNECT " : "DIG_STA_DISCONNECT");
        RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n",
                 de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ?
@@ -428,9 +428,9 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
        RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n");
        /* Decide the current status and if modify initial gain or not */
        if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
-               de_digtable->cursta_connectctate = DIG_STA_CONNECT;
+               de_digtable->cursta_connectstate = DIG_STA_CONNECT;
        else
-               de_digtable->cursta_connectctate = DIG_STA_DISCONNECT;
+               de_digtable->cursta_connectstate = DIG_STA_DISCONNECT;
 
        /* adjust initial gain according to false alarm counter */
        if (falsealm_cnt->cnt_all < DM_DIG_FA_TH0)
index eb22dccc418bb3ac50c4d9513cf671517ce8970f..23177076b97f9795b7ba2c57581f49924823fc3e 100644 (file)
@@ -570,8 +570,7 @@ static bool _rtl92d_cmd_send_packet(struct ieee80211_hw *hw,
 
        ring = &rtlpci->tx_ring[BEACON_QUEUE];
        pskb = __skb_dequeue(&ring->queue);
-       if (pskb)
-               kfree_skb(pskb);
+       kfree_skb(pskb);
        spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
        pdesc = &ring->desc[idx];
        /* discard output from call below */
index 442031256bceeda3df1bc56a45fbdcb9edd6aff3..db0086062d0574f59018739a8ac3f198c3a4aa34 100644 (file)
@@ -1314,7 +1314,7 @@ static void _rtl92d_phy_restore_rf_env(struct ieee80211_hw *hw, u8 rfpath,
        struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
 
        RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "=====>\n");
-       /*----Restore RFENV control type----*/ ;
+       /*----Restore RFENV control type----*/
        switch (rfpath) {
        case RF90_PATH_A:
        case RF90_PATH_C:
index f80690d82c117ab430b25caac22170be63a32e9a..4686f340b9d6095698c0e8b2f3a16f3bd2ae0fdb 100644 (file)
@@ -551,7 +551,9 @@ static void _rtl92de_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
 
 void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
                          struct ieee80211_hdr *hdr, u8 *pdesc_tx,
-                         struct ieee80211_tx_info *info, struct sk_buff *skb,
+                         struct ieee80211_tx_info *info,
+                         struct ieee80211_sta *sta,
+                         struct sk_buff *skb,
                          u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -559,7 +561,6 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-       struct ieee80211_sta *sta = info->control.sta;
        u8 *pdesc = pdesc_tx;
        u16 seq_number;
        __le16 fc = hdr->frame_control;
index 057a52431b0036a9b953b9628397a851cb913995..c1b5dfb79d53ce2d2ebf843db65c14999d92196c 100644 (file)
@@ -730,6 +730,7 @@ struct rx_desc_92d {
 void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
                          struct ieee80211_hdr *hdr,
                          u8 *pdesc, struct ieee80211_tx_info *info,
+                         struct ieee80211_sta *sta,
                          struct sk_buff *skb, u8 hw_queue,
                          struct rtl_tcb_desc *ptcb_desc);
 bool rtl92de_rx_query_desc(struct ieee80211_hw *hw,
index 36d1cb3aef8a7d5a76e019ab4671f140c38b6255..e3cf4c02122a9baaad488c4e02aaf23fcb33f21f 100644 (file)
@@ -591,14 +591,15 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
 
 void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
                struct ieee80211_hdr *hdr, u8 *pdesc_tx,
-               struct ieee80211_tx_info *info, struct sk_buff *skb,
+               struct ieee80211_tx_info *info,
+               struct ieee80211_sta *sta,
+               struct sk_buff *skb,
                u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       struct ieee80211_sta *sta = info->control.sta;
        u8 *pdesc = pdesc_tx;
        u16 seq_number;
        __le16 fc = hdr->frame_control;
@@ -755,7 +756,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
        SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len);
 
        /* DOWRD 8 */
-       SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
+       SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
 
        RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
 }
@@ -785,7 +786,7 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
                /* 92SE need not to set TX packet size when firmware download */
                SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
                SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
-               SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
+               SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
 
                wmb();
                SET_TX_DESC_OWN(pdesc, 1);
@@ -804,7 +805,7 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
                SET_BITS_TO_LE_4BYTE(skb->data, 24, 7, rtlhal->h2c_txcmd_seq);
 
                SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
-               SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
+               SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
 
                wmb();
                SET_TX_DESC_OWN(pdesc, 1);
index 011e7b0695f24f0a00bdd322a82e961cb69faa28..64dd66f287c182a25949126d303925124a24bd71 100644 (file)
@@ -31,6 +31,7 @@
 
 void rtl92se_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
                          u8 *pdesc, struct ieee80211_tx_info *info,
+                         struct ieee80211_sta *sta,
                          struct sk_buff *skb, u8 hw_queue,
                          struct rtl_tcb_desc *ptcb_desc);
 void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg,
index aa970fc18a2176e736758d7467a93e577b835b92..030beb45d8b0b2b30669e945eb1ccc2e2cbd5fd7 100644 (file)
@@ -120,7 +120,7 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
 
        if (status < 0 && count++ < 4)
                pr_err("reg 0x%x, usbctrl_vendorreq TimeOut! status:0x%x value=0x%x\n",
-                      value, status, le32_to_cpu(*(u32 *)pdata));
+                      value, status, *(u32 *)pdata);
        return status;
 }
 
@@ -848,8 +848,10 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
        _rtl_submit_tx_urb(hw, _urb);
 }
 
-static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
-                           u16 hw_queue)
+static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw,
+                                  struct ieee80211_sta *sta,
+                                  struct sk_buff *skb,
+                                  u16 hw_queue)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -891,7 +893,7 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
                seq_number += 1;
                seq_number <<= 4;
        }
-       rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, skb,
+       rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, sta, skb,
                                        hw_queue, &tcb_desc);
        if (!ieee80211_has_morefrags(hdr->frame_control)) {
                if (qc)
@@ -901,7 +903,9 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
                rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
 }
 
-static int rtl_usb_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
+static int rtl_usb_tx(struct ieee80211_hw *hw,
+                     struct ieee80211_sta *sta,
+                     struct sk_buff *skb,
                      struct rtl_tcb_desc *dummy)
 {
        struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
@@ -913,7 +917,7 @@ static int rtl_usb_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
        if (unlikely(is_hal_stop(rtlhal)))
                goto err_free;
        hw_queue = rtlusb->usb_mq_to_hwq(fc, skb_get_queue_mapping(skb));
-       _rtl_usb_tx_preprocess(hw, skb, hw_queue);
+       _rtl_usb_tx_preprocess(hw, sta, skb, hw_queue);
        _rtl_usb_transmit(hw, skb, hw_queue);
        return NETDEV_TX_OK;
 
@@ -923,6 +927,7 @@ err_free:
 }
 
 static bool rtl_usb_tx_chk_waitq_insert(struct ieee80211_hw *hw,
+                                       struct ieee80211_sta *sta,
                                        struct sk_buff *skb)
 {
        return false;
index cdaa21f297108fe1ac4306d001843965029ac514..f1b6bc693b0a28ddddfd89ef50ffa898f3ce24f7 100644 (file)
@@ -122,7 +122,7 @@ enum rt_eeprom_type {
        EEPROM_BOOT_EFUSE,
 };
 
-enum rtl_status {
+enum ttl_status {
        RTL_STATUS_INTERFACE_START = 0,
 };
 
@@ -135,7 +135,7 @@ enum hardware_type {
        HARDWARE_TYPE_RTL8192CU,
        HARDWARE_TYPE_RTL8192DE,
        HARDWARE_TYPE_RTL8192DU,
-       HARDWARE_TYPE_RTL8723E,
+       HARDWARE_TYPE_RTL8723AE,
        HARDWARE_TYPE_RTL8723U,
 
        /* keep it last */
@@ -389,6 +389,7 @@ enum rt_enc_alg {
        RSERVED_ENCRYPTION = 3,
        AESCCMP_ENCRYPTION = 4,
        WEP104_ENCRYPTION = 5,
+       AESCMAC_ENCRYPTION = 6, /*IEEE802.11w */
 };
 
 enum rtl_hal_state {
@@ -873,6 +874,7 @@ struct rtl_phy {
        u32 adda_backup[16];
        u32 iqk_mac_backup[IQK_MAC_REG_NUM];
        u32 iqk_bb_backup[10];
+       bool iqk_initialized;
 
        /* Dual mac */
        bool need_iqk;
@@ -910,6 +912,8 @@ struct rtl_phy {
 #define RTL_AGG_OPERATIONAL                    3
 #define RTL_AGG_OFF                            0
 #define RTL_AGG_ON                             1
+#define RTL_RX_AGG_START                       1
+#define RTL_RX_AGG_STOP                                0
 #define RTL_AGG_EMPTYING_HW_QUEUE_ADDBA                2
 #define RTL_AGG_EMPTYING_HW_QUEUE_DELBA                3
 
@@ -920,6 +924,7 @@ struct rtl_ht_agg {
        u64 bitmap;
        u32 rate_n_flags;
        u8 agg_state;
+       u8 rx_agg_state;
 };
 
 struct rtl_tid_data {
@@ -927,11 +932,19 @@ struct rtl_tid_data {
        struct rtl_ht_agg agg;
 };
 
+struct rssi_sta {
+       long undecorated_smoothed_pwdb;
+};
+
 struct rtl_sta_info {
+       struct list_head list;
        u8 ratr_index;
        u8 wireless_mode;
        u8 mimo_ps;
        struct rtl_tid_data tids[MAX_TID_COUNT];
+
+       /* just used for ap adhoc or mesh*/
+       struct rssi_sta rssi_stat;
 } __packed;
 
 struct rtl_priv;
@@ -1034,6 +1047,11 @@ struct rtl_mac {
 struct rtl_hal {
        struct ieee80211_hw *hw;
 
+       bool up_first_time;
+       bool first_init;
+       bool being_init_adapter;
+       bool bbrf_ready;
+
        enum intf_type interface;
        u16 hw_type;            /*92c or 92d or 92s and so on */
        u8 ic_class;
@@ -1048,6 +1066,7 @@ struct rtl_hal {
        u16 fw_subversion;
        bool h2c_setinprogress;
        u8 last_hmeboxnum;
+       bool fw_ready;
        /*Reserve page start offset except beacon in TxQ. */
        u8 fw_rsvdpage_startoffset;
        u8 h2c_txcmd_seq;
@@ -1083,6 +1102,8 @@ struct rtl_hal {
        bool load_imrandiqk_setting_for2g;
 
        bool disable_amsdu_8k;
+       bool master_of_dmsp;
+       bool slave_of_dmsp;
 };
 
 struct rtl_security {
@@ -1144,6 +1165,9 @@ struct rtl_dm {
        bool disable_tx_int;
        char ofdm_index[2];
        char cck_index;
+
+       /* DMSP */
+       bool supp_phymode_switch;
 };
 
 #define        EFUSE_MAX_LOGICAL_SIZE                  256
@@ -1337,6 +1361,10 @@ struct rtl_stats {
 };
 
 struct rt_link_detect {
+       /* count for roaming */
+       u32 bcn_rx_inperiod;
+       u32 roam_times;
+
        u32 num_tx_in4period[4];
        u32 num_rx_in4period[4];
 
@@ -1344,6 +1372,8 @@ struct rt_link_detect {
        u32 num_rx_inperiod;
 
        bool busytraffic;
+       bool tx_busy_traffic;
+       bool rx_busy_traffic;
        bool higher_busytraffic;
        bool higher_busyrxtraffic;
 
@@ -1418,6 +1448,7 @@ struct rtl_hal_ops {
        void (*fill_tx_desc) (struct ieee80211_hw *hw,
                              struct ieee80211_hdr *hdr, u8 *pdesc_tx,
                              struct ieee80211_tx_info *info,
+                             struct ieee80211_sta *sta,
                              struct sk_buff *skb, u8 hw_queue,
                              struct rtl_tcb_desc *ptcb_desc);
        void (*fill_fake_txdesc) (struct ieee80211_hw *hw, u8 *pDesc,
@@ -1454,7 +1485,12 @@ struct rtl_hal_ops {
                          u32 regaddr, u32 bitmask);
        void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
                           u32 regaddr, u32 bitmask, u32 data);
+       void (*allow_all_destaddr)(struct ieee80211_hw *hw,
+               bool allow_all_da, bool write_into_reg);
        void (*linked_set_reg) (struct ieee80211_hw *hw);
+       void (*check_switch_to_dmdp) (struct ieee80211_hw *hw);
+       void (*dualmac_easy_concurrent) (struct ieee80211_hw *hw);
+       void (*dualmac_switch_to_dmdp) (struct ieee80211_hw *hw);
        bool (*phy_rf6052_config) (struct ieee80211_hw *hw);
        void (*phy_rf6052_set_cck_txpower) (struct ieee80211_hw *hw,
                                            u8 *powerlevel);
@@ -1474,12 +1510,18 @@ struct rtl_intf_ops {
        void (*read_efuse_byte)(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
        int (*adapter_start) (struct ieee80211_hw *hw);
        void (*adapter_stop) (struct ieee80211_hw *hw);
+       bool (*check_buddy_priv)(struct ieee80211_hw *hw,
+                                struct rtl_priv **buddy_priv);
 
-       int (*adapter_tx) (struct ieee80211_hw *hw, struct sk_buff *skb,
-                       struct rtl_tcb_desc *ptcb_desc);
+       int (*adapter_tx) (struct ieee80211_hw *hw,
+                          struct ieee80211_sta *sta,
+                          struct sk_buff *skb,
+                          struct rtl_tcb_desc *ptcb_desc);
        void (*flush)(struct ieee80211_hw *hw, bool drop);
        int (*reset_trx_ring) (struct ieee80211_hw *hw);
-       bool (*waitq_insert) (struct ieee80211_hw *hw, struct sk_buff *skb);
+       bool (*waitq_insert) (struct ieee80211_hw *hw,
+                             struct ieee80211_sta *sta,
+                             struct sk_buff *skb);
 
        /*pci */
        void (*disable_aspm) (struct ieee80211_hw *hw);
@@ -1554,11 +1596,16 @@ struct rtl_locks {
        spinlock_t h2c_lock;
        spinlock_t rf_ps_lock;
        spinlock_t rf_lock;
+       spinlock_t lps_lock;
        spinlock_t waitq_lock;
+       spinlock_t entry_list_lock;
        spinlock_t usb_lock;
 
        /*Dual mac*/
        spinlock_t cck_and_rw_pagea_lock;
+
+       /*Easy concurrent*/
+       spinlock_t check_sendpkt_lock;
 };
 
 struct rtl_works {
@@ -1566,6 +1613,7 @@ struct rtl_works {
 
        /*timer */
        struct timer_list watchdog_timer;
+       struct timer_list dualmac_easyconcurrent_retrytimer;
 
        /*task */
        struct tasklet_struct irq_tasklet;
@@ -1593,6 +1641,31 @@ struct rtl_debug {
        char proc_name[20];
 };
 
+#define MIMO_PS_STATIC                 0
+#define MIMO_PS_DYNAMIC                        1
+#define MIMO_PS_NOLIMIT                        3
+
+struct rtl_dualmac_easy_concurrent_ctl {
+       enum band_type currentbandtype_backfordmdp;
+       bool close_bbandrf_for_dmsp;
+       bool change_to_dmdp;
+       bool change_to_dmsp;
+       bool switch_in_process;
+};
+
+struct rtl_dmsp_ctl {
+       bool activescan_for_slaveofdmsp;
+       bool scan_for_anothermac_fordmsp;
+       bool scan_for_itself_fordmsp;
+       bool writedig_for_anothermacofdmsp;
+       u32 curdigvalue_for_anothermacofdmsp;
+       bool changecckpdstate_for_anothermacofdmsp;
+       u8 curcckpdstate_for_anothermacofdmsp;
+       bool changetxhighpowerlvl_for_anothermacofdmsp;
+       u8 curtxhighlvl_for_anothermacofdmsp;
+       long rssivalmin_for_anothermacofdmsp;
+};
+
 struct ps_t {
        u8 pre_ccastate;
        u8 cur_ccasate;
@@ -1619,7 +1692,7 @@ struct dig_t {
        u8 dig_twoport_algorithm;
        u8 dig_dbgmode;
        u8 dig_slgorithm_switch;
-       u8 cursta_connectctate;
+       u8 cursta_connectstate;
        u8 presta_connectstate;
        u8 curmultista_connectstate;
        char backoff_val;
@@ -1652,8 +1725,20 @@ struct dig_t {
        char backoffval_range_min;
 };
 
+struct rtl_global_var {
+       /* from this list we can get
+        * other adapter's rtl_priv */
+       struct list_head glb_priv_list;
+       spinlock_t glb_list_lock;
+};
+
 struct rtl_priv {
        struct completion firmware_loading_complete;
+       struct list_head list;
+       struct rtl_priv *buddy_priv;
+       struct rtl_global_var *glb_var;
+       struct rtl_dualmac_easy_concurrent_ctl easy_concurrent_ctl;
+       struct rtl_dmsp_ctl dmsp_ctl;
        struct rtl_locks locks;
        struct rtl_works works;
        struct rtl_mac mac80211;
@@ -1674,6 +1759,9 @@ struct rtl_priv {
 
        struct rtl_rate_priv *rate_priv;
 
+       /* sta entry list for ap adhoc or mesh */
+       struct list_head entry_list;
+
        struct rtl_debug dbg;
        int max_fw_size;
 
@@ -1815,9 +1903,9 @@ struct bt_coexist_info {
        EF1BYTE(*((u8 *)(_ptr)))
 /* Read le16 data from memory and convert to host ordering */
 #define READEF2BYTE(_ptr)      \
-       EF2BYTE(*((u16 *)(_ptr)))
+       EF2BYTE(*(_ptr))
 #define READEF4BYTE(_ptr)      \
-       EF4BYTE(*((u32 *)(_ptr)))
+       EF4BYTE(*(_ptr))
 
 /* Write data to memory */
 #define WRITEEF1BYTE(_ptr, _val)       \
@@ -1826,7 +1914,7 @@ struct bt_coexist_info {
 #define WRITEEF2BYTE(_ptr, _val)       \
        (*((u16 *)(_ptr))) = EF2BYTE(_val)
 #define WRITEEF4BYTE(_ptr, _val)       \
-       (*((u16 *)(_ptr))) = EF2BYTE(_val)
+       (*((u32 *)(_ptr))) = EF2BYTE(_val)
 
 /* Create a bit mask
  * Examples:
@@ -1859,9 +1947,9 @@ struct bt_coexist_info {
  * 4-byte pointer in little-endian system.
  */
 #define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \
-       (EF4BYTE(*((u32 *)(__pstart))))
+       (EF4BYTE(*((__le32 *)(__pstart))))
 #define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \
-       (EF2BYTE(*((u16 *)(__pstart))))
+       (EF2BYTE(*((__le16 *)(__pstart))))
 #define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \
        (EF1BYTE(*((u8 *)(__pstart))))
 
@@ -1908,13 +1996,13 @@ value to host byte ordering.*/
  * Set subfield of little-endian 4-byte value to specified value.
  */
 #define SET_BITS_TO_LE_4BYTE(__pstart, __bitoffset, __bitlen, __val) \
-       *((u32 *)(__pstart)) = EF4BYTE \
+       *((u32 *)(__pstart)) = \
        ( \
                LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \
                ((((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset)) \
        );
 #define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \
-       *((u16 *)(__pstart)) = EF2BYTE \
+       *((u16 *)(__pstart)) = \
        ( \
                LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \
                ((((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset)) \
@@ -2100,4 +2188,11 @@ static inline struct ieee80211_sta *get_sta(struct ieee80211_hw *hw,
        return ieee80211_find_sta(vif, bssid);
 }
 
+static inline struct ieee80211_sta *rtl_find_sta(struct ieee80211_hw *hw,
+               u8 *mac_addr)
+{
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       return ieee80211_find_sta(mac->vif, mac_addr);
+}
+
 #endif
index 3118c425bcf17dcbdf85f274a568e25a80cac9e2..441cbccbd38162bb8b1e83c11fe5fe266897a9c8 100644 (file)
@@ -354,7 +354,9 @@ out:
        return ret;
 }
 
-static void wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void wl1251_op_tx(struct ieee80211_hw *hw,
+                        struct ieee80211_tx_control *control,
+                        struct sk_buff *skb)
 {
        struct wl1251 *wl = hw->priv;
        unsigned long flags;
index f429fc110cb0ac1a09e887564310f04dfe0c72dd..dadf1dbb002a3ac7bfb904f47e694eb5b5b37c19 100644 (file)
@@ -32,7 +32,6 @@
 #include "../wlcore/acx.h"
 #include "../wlcore/tx.h"
 #include "../wlcore/rx.h"
-#include "../wlcore/io.h"
 #include "../wlcore/boot.h"
 
 #include "wl12xx.h"
@@ -1185,9 +1184,16 @@ static int wl12xx_enable_interrupts(struct wl1271 *wl)
        ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK,
                               WL1271_ACX_INTR_ALL & ~(WL12XX_INTR_MASK));
        if (ret < 0)
-               goto out;
+               goto disable_interrupts;
 
        ret = wlcore_write32(wl, WL12XX_HI_CFG, HI_CFG_DEF_VAL);
+       if (ret < 0)
+               goto disable_interrupts;
+
+       return ret;
+
+disable_interrupts:
+       wlcore_disable_interrupts(wl);
 
 out:
        return ret;
@@ -1583,7 +1589,10 @@ static int wl12xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
        return wlcore_set_key(wl, cmd, vif, sta, key_conf);
 }
 
+static int wl12xx_setup(struct wl1271 *wl);
+
 static struct wlcore_ops wl12xx_ops = {
+       .setup                  = wl12xx_setup,
        .identify_chip          = wl12xx_identify_chip,
        .identify_fw            = wl12xx_identify_fw,
        .boot                   = wl12xx_boot,
@@ -1624,26 +1633,15 @@ static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
                },
 };
 
-static int __devinit wl12xx_probe(struct platform_device *pdev)
+static int wl12xx_setup(struct wl1271 *wl)
 {
-       struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
-       struct wl1271 *wl;
-       struct ieee80211_hw *hw;
-       struct wl12xx_priv *priv;
-
-       hw = wlcore_alloc_hw(sizeof(*priv));
-       if (IS_ERR(hw)) {
-               wl1271_error("can't allocate hw");
-               return PTR_ERR(hw);
-       }
+       struct wl12xx_priv *priv = wl->priv;
+       struct wl12xx_platform_data *pdata = wl->pdev->dev.platform_data;
 
-       wl = hw->priv;
-       priv = wl->priv;
-       wl->ops = &wl12xx_ops;
-       wl->ptable = wl12xx_ptable;
        wl->rtable = wl12xx_rtable;
-       wl->num_tx_desc = 16;
-       wl->num_rx_desc = 8;
+       wl->num_tx_desc = WL12XX_NUM_TX_DESCRIPTORS;
+       wl->num_rx_desc = WL12XX_NUM_RX_DESCRIPTORS;
+       wl->num_mac_addr = WL12XX_NUM_MAC_ADDRESSES;
        wl->band_rate_to_idx = wl12xx_band_rate_to_idx;
        wl->hw_tx_rate_tbl_size = WL12XX_CONF_HW_RXTX_RATE_MAX;
        wl->hw_min_ht_rate = WL12XX_CONF_HW_RXTX_RATE_MCS0;
@@ -1695,7 +1693,36 @@ static int __devinit wl12xx_probe(struct platform_device *pdev)
                        wl1271_error("Invalid tcxo parameter %s", tcxo_param);
        }
 
-       return wlcore_probe(wl, pdev);
+       return 0;
+}
+
+static int __devinit wl12xx_probe(struct platform_device *pdev)
+{
+       struct wl1271 *wl;
+       struct ieee80211_hw *hw;
+       int ret;
+
+       hw = wlcore_alloc_hw(sizeof(struct wl12xx_priv),
+                            WL12XX_AGGR_BUFFER_SIZE);
+       if (IS_ERR(hw)) {
+               wl1271_error("can't allocate hw");
+               ret = PTR_ERR(hw);
+               goto out;
+       }
+
+       wl = hw->priv;
+       wl->ops = &wl12xx_ops;
+       wl->ptable = wl12xx_ptable;
+       ret = wlcore_probe(wl, pdev);
+       if (ret)
+               goto out_free;
+
+       return ret;
+
+out_free:
+       wlcore_free_hw(wl);
+out:
+       return ret;
 }
 
 static const struct platform_device_id wl12xx_id_table[] __devinitconst = {
@@ -1714,17 +1741,7 @@ static struct platform_driver wl12xx_driver = {
        }
 };
 
-static int __init wl12xx_init(void)
-{
-       return platform_driver_register(&wl12xx_driver);
-}
-module_init(wl12xx_init);
-
-static void __exit wl12xx_exit(void)
-{
-       platform_driver_unregister(&wl12xx_driver);
-}
-module_exit(wl12xx_exit);
+module_platform_driver(wl12xx_driver);
 
 module_param_named(fref, fref_param, charp, 0);
 MODULE_PARM_DESC(fref, "FREF clock: 19.2, 26, 26x, 38.4, 38.4x, 52");
index 26990fb4edeade102151fe8e818c215bc51b25d0..7182bbf6625daac99ddcf00b3581c32ee39c2ef6 100644 (file)
 #define WL128X_SUBTYPE_VER     2
 #define WL128X_MINOR_VER       115
 
+#define WL12XX_AGGR_BUFFER_SIZE        (4 * PAGE_SIZE)
+
+#define WL12XX_NUM_TX_DESCRIPTORS 16
+#define WL12XX_NUM_RX_DESCRIPTORS 8
+
+#define WL12XX_NUM_MAC_ADDRESSES 2
+
 struct wl127x_rx_mem_pool_addr {
        u32 addr;
        u32 addr_extra;
index 3ce6f1039af3f33593663d8d5c8b0639f803f346..7f1669cdea090ed6895b8eaabf0ab7f74054d9c4 100644 (file)
@@ -220,7 +220,7 @@ static ssize_t clear_fw_stats_write(struct file *file,
 
        mutex_lock(&wl->mutex);
 
-       if (wl->state == WL1271_STATE_OFF)
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        ret = wl18xx_acx_clear_statistics(wl);
index 69042bb9a0975637be043e2d89d7fdc16b3a9b34..a39682a7c25f333cefb9bcfc67b632426afece00 100644 (file)
@@ -30,7 +30,6 @@
 #include "../wlcore/acx.h"
 #include "../wlcore/tx.h"
 #include "../wlcore/rx.h"
-#include "../wlcore/io.h"
 #include "../wlcore/boot.h"
 
 #include "reg.h"
@@ -46,7 +45,6 @@
 static char *ht_mode_param = NULL;
 static char *board_type_param = NULL;
 static bool checksum_param = false;
-static bool enable_11a_param = true;
 static int num_rx_desc_param = -1;
 
 /* phy paramters */
@@ -416,7 +414,7 @@ static struct wlcore_conf wl18xx_conf = {
                .snr_threshold                  = 0,
        },
        .ht = {
-               .rx_ba_win_size = 10,
+               .rx_ba_win_size = 32,
                .tx_ba_win_size = 64,
                .inactivity_timeout = 10000,
                .tx_ba_tid_bitmap = CONF_TX_BA_ENABLED_TID_BITMAP,
@@ -506,8 +504,8 @@ static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
                .rdl                            = 0x01,
                .auto_detect                    = 0x00,
                .dedicated_fem                  = FEM_NONE,
-               .low_band_component             = COMPONENT_2_WAY_SWITCH,
-               .low_band_component_type        = 0x06,
+               .low_band_component             = COMPONENT_3_WAY_SWITCH,
+               .low_band_component_type        = 0x04,
                .high_band_component            = COMPONENT_2_WAY_SWITCH,
                .high_band_component_type       = 0x09,
                .tcxo_ldo_voltage               = 0x00,
@@ -813,6 +811,13 @@ static int wl18xx_enable_interrupts(struct wl1271 *wl)
 
        ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK,
                               WL1271_ACX_INTR_ALL & ~intr_mask);
+       if (ret < 0)
+               goto disable_interrupts;
+
+       return ret;
+
+disable_interrupts:
+       wlcore_disable_interrupts(wl);
 
 out:
        return ret;
@@ -1203,6 +1208,12 @@ static int wl18xx_handle_static_data(struct wl1271 *wl,
        struct wl18xx_static_data_priv *static_data_priv =
                (struct wl18xx_static_data_priv *) static_data->priv;
 
+       strncpy(wl->chip.phy_fw_ver_str, static_data_priv->phy_version,
+               sizeof(wl->chip.phy_fw_ver_str));
+
+       /* make sure the string is NULL-terminated */
+       wl->chip.phy_fw_ver_str[sizeof(wl->chip.phy_fw_ver_str) - 1] = '\0';
+
        wl1271_info("PHY firmware version: %s", static_data_priv->phy_version);
 
        return 0;
@@ -1241,13 +1252,6 @@ static int wl18xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
        if (!change_spare)
                return wlcore_set_key(wl, cmd, vif, sta, key_conf);
 
-       /*
-        * stop the queues and flush to ensure the next packets are
-        * in sync with FW spare block accounting
-        */
-       wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
-       wl1271_tx_flush(wl);
-
        ret = wlcore_set_key(wl, cmd, vif, sta, key_conf);
        if (ret < 0)
                goto out;
@@ -1270,7 +1274,6 @@ static int wl18xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
        }
 
 out:
-       wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
        return ret;
 }
 
@@ -1293,7 +1296,10 @@ static u32 wl18xx_pre_pkt_send(struct wl1271 *wl,
        return buf_offset;
 }
 
+static int wl18xx_setup(struct wl1271 *wl);
+
 static struct wlcore_ops wl18xx_ops = {
+       .setup          = wl18xx_setup,
        .identify_chip  = wl18xx_identify_chip,
        .boot           = wl18xx_boot,
        .plt_init       = wl18xx_plt_init,
@@ -1374,27 +1380,15 @@ static struct ieee80211_sta_ht_cap wl18xx_mimo_ht_cap_2ghz = {
                },
 };
 
-static int __devinit wl18xx_probe(struct platform_device *pdev)
+static int wl18xx_setup(struct wl1271 *wl)
 {
-       struct wl1271 *wl;
-       struct ieee80211_hw *hw;
-       struct wl18xx_priv *priv;
+       struct wl18xx_priv *priv = wl->priv;
        int ret;
 
-       hw = wlcore_alloc_hw(sizeof(*priv));
-       if (IS_ERR(hw)) {
-               wl1271_error("can't allocate hw");
-               ret = PTR_ERR(hw);
-               goto out;
-       }
-
-       wl = hw->priv;
-       priv = wl->priv;
-       wl->ops = &wl18xx_ops;
-       wl->ptable = wl18xx_ptable;
        wl->rtable = wl18xx_rtable;
-       wl->num_tx_desc = 32;
-       wl->num_rx_desc = 32;
+       wl->num_tx_desc = WL18XX_NUM_TX_DESCRIPTORS;
+       wl->num_rx_desc = WL18XX_NUM_TX_DESCRIPTORS;
+       wl->num_mac_addr = WL18XX_NUM_MAC_ADDRESSES;
        wl->band_rate_to_idx = wl18xx_band_rate_to_idx;
        wl->hw_tx_rate_tbl_size = WL18XX_CONF_HW_RXTX_RATE_MAX;
        wl->hw_min_ht_rate = WL18XX_CONF_HW_RXTX_RATE_MCS0;
@@ -1405,9 +1399,9 @@ static int __devinit wl18xx_probe(struct platform_device *pdev)
        if (num_rx_desc_param != -1)
                wl->num_rx_desc = num_rx_desc_param;
 
-       ret = wl18xx_conf_init(wl, &pdev->dev);
+       ret = wl18xx_conf_init(wl, wl->dev);
        if (ret < 0)
-               goto out_free;
+               return ret;
 
        /* If the module param is set, update it in conf */
        if (board_type_param) {
@@ -1424,27 +1418,14 @@ static int __devinit wl18xx_probe(struct platform_device *pdev)
                } else {
                        wl1271_error("invalid board type '%s'",
                                board_type_param);
-                       ret = -EINVAL;
-                       goto out_free;
+                       return -EINVAL;
                }
        }
 
-       /* HACK! Just for now we hardcode COM8 and HDK to 0x06 */
-       switch (priv->conf.phy.board_type) {
-       case BOARD_TYPE_HDK_18XX:
-       case BOARD_TYPE_COM8_18XX:
-               priv->conf.phy.low_band_component_type = 0x06;
-               break;
-       case BOARD_TYPE_FPGA_18XX:
-       case BOARD_TYPE_DVP_18XX:
-       case BOARD_TYPE_EVB_18XX:
-               priv->conf.phy.low_band_component_type = 0x05;
-               break;
-       default:
+       if (priv->conf.phy.board_type >= NUM_BOARD_TYPES) {
                wl1271_error("invalid board type '%d'",
                        priv->conf.phy.board_type);
-               ret = -EINVAL;
-               goto out_free;
+               return -EINVAL;
        }
 
        if (low_band_component_param != -1)
@@ -1476,22 +1457,21 @@ static int __devinit wl18xx_probe(struct platform_device *pdev)
                        priv->conf.ht.mode = HT_MODE_SISO20;
                else {
                        wl1271_error("invalid ht_mode '%s'", ht_mode_param);
-                       ret = -EINVAL;
-                       goto out_free;
+                       return -EINVAL;
                }
        }
 
        if (priv->conf.ht.mode == HT_MODE_DEFAULT) {
                /*
                 * Only support mimo with multiple antennas. Fall back to
-                * siso20.
+                * siso40.
                 */
                if (wl18xx_is_mimo_supported(wl))
                        wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
                                          &wl18xx_mimo_ht_cap_2ghz);
                else
                        wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
-                                         &wl18xx_siso20_ht_cap);
+                                         &wl18xx_siso40_ht_cap_2ghz);
 
                /* 5Ghz is always wide */
                wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ,
@@ -1513,9 +1493,34 @@ static int __devinit wl18xx_probe(struct platform_device *pdev)
                wl18xx_ops.init_vif = NULL;
        }
 
-       wl->enable_11a = enable_11a_param;
+       /* Enable 11a Band only if we have 5G antennas */
+       wl->enable_11a = (priv->conf.phy.number_of_assembled_ant5 != 0);
+
+       return 0;
+}
+
+static int __devinit wl18xx_probe(struct platform_device *pdev)
+{
+       struct wl1271 *wl;
+       struct ieee80211_hw *hw;
+       int ret;
+
+       hw = wlcore_alloc_hw(sizeof(struct wl18xx_priv),
+                            WL18XX_AGGR_BUFFER_SIZE);
+       if (IS_ERR(hw)) {
+               wl1271_error("can't allocate hw");
+               ret = PTR_ERR(hw);
+               goto out;
+       }
+
+       wl = hw->priv;
+       wl->ops = &wl18xx_ops;
+       wl->ptable = wl18xx_ptable;
+       ret = wlcore_probe(wl, pdev);
+       if (ret)
+               goto out_free;
 
-       return wlcore_probe(wl, pdev);
+       return ret;
 
 out_free:
        wlcore_free_hw(wl);
@@ -1539,18 +1544,7 @@ static struct platform_driver wl18xx_driver = {
        }
 };
 
-static int __init wl18xx_init(void)
-{
-       return platform_driver_register(&wl18xx_driver);
-}
-module_init(wl18xx_init);
-
-static void __exit wl18xx_exit(void)
-{
-       platform_driver_unregister(&wl18xx_driver);
-}
-module_exit(wl18xx_exit);
-
+module_platform_driver(wl18xx_driver);
 module_param_named(ht_mode, ht_mode_param, charp, S_IRUSR);
 MODULE_PARM_DESC(ht_mode, "Force HT mode: wide or siso20");
 
@@ -1561,9 +1555,6 @@ MODULE_PARM_DESC(board_type, "Board type: fpga, hdk (default), evb, com8 or "
 module_param_named(checksum, checksum_param, bool, S_IRUSR);
 MODULE_PARM_DESC(checksum, "Enable TCP checksum: boolean (defaults to false)");
 
-module_param_named(enable_11a, enable_11a_param, bool, S_IRUSR);
-MODULE_PARM_DESC(enable_11a, "Enable 11a (5GHz): boolean (defaults to true)");
-
 module_param_named(dc2dc, dc2dc_param, int, S_IRUSR);
 MODULE_PARM_DESC(dc2dc, "External DC2DC: u8 (defaults to 0)");
 
index 6452396fa1d411d4e37b9c82e37046589dc26044..96a1e438d677fd1f14ea125338c824c25e34b228 100644 (file)
 
 #define WL18XX_CMD_MAX_SIZE          740
 
+#define WL18XX_AGGR_BUFFER_SIZE                (13 * PAGE_SIZE)
+
+#define WL18XX_NUM_TX_DESCRIPTORS 32
+#define WL18XX_NUM_RX_DESCRIPTORS 32
+
+#define WL18XX_NUM_MAC_ADDRESSES 3
+
 struct wl18xx_priv {
        /* buffer for sending commands to FW */
        u8 cmd_buf[WL18XX_CMD_MAX_SIZE];
index 20e1bd9238321e6d2eab682784f8e3cd9b6f8528..eaef3f41b2524b9885084ce33c77afb2a034ceb6 100644 (file)
@@ -59,6 +59,9 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
        u16 status;
        u16 poll_count = 0;
 
+       if (WARN_ON(unlikely(wl->state == WLCORE_STATE_RESTARTING)))
+               return -EIO;
+
        cmd = buf;
        cmd->id = cpu_to_le16(id);
        cmd->status = 0;
@@ -990,7 +993,7 @@ int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
 
        ret = wl1271_cmd_template_set(wl, wlvif->role_id, CMD_TEMPL_KLV,
                                      skb->data, skb->len,
-                                     CMD_TEMPL_KLV_IDX_NULL_DATA,
+                                     wlvif->sta.klv_template_id,
                                      wlvif->basic_rate);
 
 out:
@@ -1785,10 +1788,17 @@ int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
                      wlvif->bss_type == BSS_TYPE_IBSS)))
                return -EINVAL;
 
-       ret = wl12xx_cmd_role_start_dev(wl, wlvif);
+       ret = wl12xx_cmd_role_enable(wl,
+                                    wl12xx_wlvif_to_vif(wlvif)->addr,
+                                    WL1271_ROLE_DEVICE,
+                                    &wlvif->dev_role_id);
        if (ret < 0)
                goto out;
 
+       ret = wl12xx_cmd_role_start_dev(wl, wlvif);
+       if (ret < 0)
+               goto out_disable;
+
        ret = wl12xx_roc(wl, wlvif, wlvif->dev_role_id);
        if (ret < 0)
                goto out_stop;
@@ -1797,6 +1807,8 @@ int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 
 out_stop:
        wl12xx_cmd_role_stop_dev(wl, wlvif);
+out_disable:
+       wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
 out:
        return ret;
 }
@@ -1824,6 +1836,11 @@ int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
        ret = wl12xx_cmd_role_stop_dev(wl, wlvif);
        if (ret < 0)
                goto out;
+
+       ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
+       if (ret < 0)
+               goto out;
+
 out:
        return ret;
 }
index 4ef0b095f0d61cbb2d85d7f9986cf8f25f58fa81..2409f3d71f63ddd457cc571b7a76798234d16e0b 100644 (file)
@@ -157,11 +157,6 @@ enum wl1271_commands {
 
 #define MAX_CMD_PARAMS 572
 
-enum {
-       CMD_TEMPL_KLV_IDX_NULL_DATA = 0,
-       CMD_TEMPL_KLV_IDX_MAX = 4
-};
-
 enum cmd_templ {
        CMD_TEMPL_NULL_DATA = 0,
        CMD_TEMPL_BEACON,
index d77224f2ac6bcbccfa6377f85c49a620a8fd2234..9e40760bafe17b43121585d63bacfaa1925611b3 100644 (file)
@@ -412,8 +412,7 @@ struct conf_rx_settings {
 #define CONF_TX_RATE_RETRY_LIMIT       10
 
 /* basic rates for p2p operations (probe req/resp, etc.) */
-#define CONF_TX_RATE_MASK_BASIC_P2P    (CONF_HW_BIT_RATE_6MBPS | \
-       CONF_HW_BIT_RATE_12MBPS | CONF_HW_BIT_RATE_24MBPS)
+#define CONF_TX_RATE_MASK_BASIC_P2P    CONF_HW_BIT_RATE_6MBPS
 
 /*
  * Rates supported for data packets when operating as AP. Note the absence
index 6b800b3cbea59fdeb7be9fbe395aea2988fbd682..db4bf5a68ce208c1fa3e98fbe848221051ce0ec5 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/bitops.h>
 #include <linux/printk.h>
 
-#define DRIVER_NAME "wl12xx"
+#define DRIVER_NAME "wlcore"
 #define DRIVER_PREFIX DRIVER_NAME ": "
 
 enum {
@@ -73,11 +73,21 @@ extern u32 wl12xx_debug_level;
 #define wl1271_info(fmt, arg...) \
        pr_info(DRIVER_PREFIX fmt "\n", ##arg)
 
+/* define the debug macro differently if dynamic debug is supported */
+#if defined(CONFIG_DYNAMIC_DEBUG)
 #define wl1271_debug(level, fmt, arg...) \
        do { \
-               if (level & wl12xx_debug_level) \
-                       pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \
+               if (unlikely(level & wl12xx_debug_level)) \
+                       dynamic_pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \
+       } while (0)
+#else
+#define wl1271_debug(level, fmt, arg...) \
+       do { \
+               if (unlikely(level & wl12xx_debug_level)) \
+                       printk(KERN_DEBUG pr_fmt(DRIVER_PREFIX fmt "\n"), \
+                              ##arg); \
        } while (0)
+#endif
 
 /* TODO: use pr_debug_hex_dump when it becomes available */
 #define wl1271_dump(level, prefix, buf, len)   \
index 80dbc5304facdc5e69f93b55b6fbe442f4a25052..c86bb00c24884d355e93af45917b30defe86f220 100644 (file)
@@ -62,11 +62,14 @@ void wl1271_debugfs_update_stats(struct wl1271 *wl)
 
        mutex_lock(&wl->mutex);
 
+       if (unlikely(wl->state != WLCORE_STATE_ON))
+               goto out;
+
        ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
 
-       if (wl->state == WL1271_STATE_ON && !wl->plt &&
+       if (!wl->plt &&
            time_after(jiffies, wl->stats.fw_stats_update +
                       msecs_to_jiffies(WL1271_DEBUGFS_STATS_LIFETIME))) {
                wl1271_acx_statistics(wl, wl->stats.fw_stats);
@@ -286,7 +289,7 @@ static ssize_t dynamic_ps_timeout_write(struct file *file,
 
        wl->conf.conn.dynamic_ps_timeout = value;
 
-       if (wl->state == WL1271_STATE_OFF)
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        ret = wl1271_ps_elp_wakeup(wl);
@@ -353,7 +356,7 @@ static ssize_t forced_ps_write(struct file *file,
 
        wl->conf.conn.forced_ps = value;
 
-       if (wl->state == WL1271_STATE_OFF)
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        ret = wl1271_ps_elp_wakeup(wl);
@@ -486,6 +489,7 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
        DRIVER_STATE_PRINT_HEX(platform_quirks);
        DRIVER_STATE_PRINT_HEX(chip.id);
        DRIVER_STATE_PRINT_STR(chip.fw_ver_str);
+       DRIVER_STATE_PRINT_STR(chip.phy_fw_ver_str);
        DRIVER_STATE_PRINT_INT(sched_scanning);
 
 #undef DRIVER_STATE_PRINT_INT
@@ -999,7 +1003,7 @@ static ssize_t sleep_auth_write(struct file *file,
 
        wl->conf.conn.sta_sleep_auth = value;
 
-       if (wl->state == WL1271_STATE_OFF) {
+       if (unlikely(wl->state != WLCORE_STATE_ON)) {
                /* this will show up on "read" in case we are off */
                wl->sleep_auth = value;
                goto out;
@@ -1060,14 +1064,16 @@ static ssize_t dev_mem_read(struct file *file,
 
        mutex_lock(&wl->mutex);
 
-       if (wl->state == WL1271_STATE_OFF) {
+       if (unlikely(wl->state == WLCORE_STATE_OFF)) {
                ret = -EFAULT;
                goto skip_read;
        }
 
-       ret = wl1271_ps_elp_wakeup(wl);
-       if (ret < 0)
-               goto skip_read;
+       /*
+        * Don't fail if elp_wakeup returns an error, so the device's memory
+        * could be read even if the FW crashed
+        */
+       wl1271_ps_elp_wakeup(wl);
 
        /* store current partition and switch partition */
        memcpy(&old_part, &wl->curr_part, sizeof(old_part));
@@ -1145,14 +1151,16 @@ static ssize_t dev_mem_write(struct file *file, const char __user *user_buf,
 
        mutex_lock(&wl->mutex);
 
-       if (wl->state == WL1271_STATE_OFF) {
+       if (unlikely(wl->state == WLCORE_STATE_OFF)) {
                ret = -EFAULT;
                goto skip_write;
        }
 
-       ret = wl1271_ps_elp_wakeup(wl);
-       if (ret < 0)
-               goto skip_write;
+       /*
+        * Don't fail if elp_wakeup returns an error, so the device's memory
+        * could be read even if the FW crashed
+        */
+       wl1271_ps_elp_wakeup(wl);
 
        /* store current partition and switch partition */
        memcpy(&old_part, &wl->curr_part, sizeof(old_part));
index a3c867786df80fcc9458461b804dd8fcdeb0610c..32d157f62f3116f32dfc2b562aad770c0e9da907 100644 (file)
@@ -141,7 +141,7 @@ int wl1271_init_templates_config(struct wl1271 *wl)
        if (ret < 0)
                return ret;
 
-       for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
+       for (i = 0; i < WLCORE_MAX_KLV_TEMPLATES; i++) {
                ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
                                              CMD_TEMPL_KLV, NULL,
                                              sizeof(struct ieee80211_qos_hdr),
@@ -371,15 +371,7 @@ static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl,
                                       struct ieee80211_vif *vif)
 {
        struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
-       int ret, i;
-
-       /* disable all keep-alive templates */
-       for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
-               ret = wl1271_acx_keep_alive_config(wl, wlvif, i,
-                                                  ACX_KEEP_ALIVE_TPL_INVALID);
-               if (ret < 0)
-                       return ret;
-       }
+       int ret;
 
        /* disable the keep-alive feature */
        ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
index 259149f36faec0b7108500302f30e66e323cc1aa..f48530fec14fb3ba9563e1763c2c38fa46b9488d 100644 (file)
@@ -64,7 +64,7 @@ static inline int __must_check wlcore_raw_write(struct wl1271 *wl, int addr,
                return -EIO;
 
        ret = wl->if_ops->write(wl->dev, addr, buf, len, fixed);
-       if (ret && wl->state != WL1271_STATE_OFF)
+       if (ret && wl->state != WLCORE_STATE_OFF)
                set_bit(WL1271_FLAG_IO_FAILED, &wl->flags);
 
        return ret;
@@ -80,7 +80,7 @@ static inline int __must_check wlcore_raw_read(struct wl1271 *wl, int addr,
                return -EIO;
 
        ret = wl->if_ops->read(wl->dev, addr, buf, len, fixed);
-       if (ret && wl->state != WL1271_STATE_OFF)
+       if (ret && wl->state != WLCORE_STATE_OFF)
                set_bit(WL1271_FLAG_IO_FAILED, &wl->flags);
 
        return ret;
index 72548609f71122b469991615c3dda3e2aabe598c..25530c8760cb0a07234f753efe66e31fbf107c2d 100644 (file)
@@ -248,7 +248,7 @@ static void wl12xx_tx_watchdog_work(struct work_struct *work)
 
        mutex_lock(&wl->mutex);
 
-       if (unlikely(wl->state == WL1271_STATE_OFF))
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        /* Tx went out in the meantime - everything is ok */
@@ -512,7 +512,7 @@ static int wlcore_irq_locked(struct wl1271 *wl)
 
        wl1271_debug(DEBUG_IRQ, "IRQ work");
 
-       if (unlikely(wl->state == WL1271_STATE_OFF))
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        ret = wl1271_ps_elp_wakeup(wl);
@@ -696,7 +696,7 @@ static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
                 * we can't call wl12xx_get_vif_count() here because
                 * wl->mutex is taken, so use the cached last_vif_count value
                 */
-               if (wl->last_vif_count > 1) {
+               if (wl->last_vif_count > 1 && wl->mr_fw_name) {
                        fw_type = WL12XX_FW_TYPE_MULTI;
                        fw_name = wl->mr_fw_name;
                } else {
@@ -744,38 +744,14 @@ out:
        return ret;
 }
 
-static void wl1271_fetch_nvs(struct wl1271 *wl)
-{
-       const struct firmware *fw;
-       int ret;
-
-       ret = request_firmware(&fw, WL12XX_NVS_NAME, wl->dev);
-
-       if (ret < 0) {
-               wl1271_debug(DEBUG_BOOT, "could not get nvs file %s: %d",
-                            WL12XX_NVS_NAME, ret);
-               return;
-       }
-
-       wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
-
-       if (!wl->nvs) {
-               wl1271_error("could not allocate memory for the nvs file");
-               goto out;
-       }
-
-       wl->nvs_len = fw->size;
-
-out:
-       release_firmware(fw);
-}
-
 void wl12xx_queue_recovery_work(struct wl1271 *wl)
 {
        WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
 
        /* Avoid a recursive recovery */
-       if (!test_and_set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
+       if (wl->state == WLCORE_STATE_ON) {
+               wl->state = WLCORE_STATE_RESTARTING;
+               set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
                wlcore_disable_interrupts_nosync(wl);
                ieee80211_queue_work(wl->hw, &wl->recovery_work);
        }
@@ -913,7 +889,7 @@ static void wl1271_recovery_work(struct work_struct *work)
 
        mutex_lock(&wl->mutex);
 
-       if (wl->state != WL1271_STATE_ON || wl->plt)
+       if (wl->state == WLCORE_STATE_OFF || wl->plt)
                goto out_unlock;
 
        if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
@@ -1081,7 +1057,7 @@ int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
 
        wl1271_notice("power up");
 
-       if (wl->state != WL1271_STATE_OFF) {
+       if (wl->state != WLCORE_STATE_OFF) {
                wl1271_error("cannot go into PLT state because not "
                             "in off state: %d", wl->state);
                ret = -EBUSY;
@@ -1102,7 +1078,7 @@ int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
                if (ret < 0)
                        goto power_off;
 
-               wl->state = WL1271_STATE_ON;
+               wl->state = WLCORE_STATE_ON;
                wl1271_notice("firmware booted in PLT mode %s (%s)",
                              PLT_MODE[plt_mode],
                              wl->chip.fw_ver_str);
@@ -1171,7 +1147,7 @@ int wl1271_plt_stop(struct wl1271 *wl)
        wl1271_power_off(wl);
        wl->flags = 0;
        wl->sleep_auth = WL1271_PSM_ILLEGAL;
-       wl->state = WL1271_STATE_OFF;
+       wl->state = WLCORE_STATE_OFF;
        wl->plt = false;
        wl->plt_mode = PLT_OFF;
        wl->rx_counter = 0;
@@ -1181,7 +1157,9 @@ out:
        return ret;
 }
 
-static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void wl1271_op_tx(struct ieee80211_hw *hw,
+                        struct ieee80211_tx_control *control,
+                        struct sk_buff *skb)
 {
        struct wl1271 *wl = hw->priv;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1197,7 +1175,7 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        mapping = skb_get_queue_mapping(skb);
        q = wl1271_tx_get_queue(mapping);
 
-       hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
+       hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
 
        spin_lock_irqsave(&wl->wl_lock, flags);
 
@@ -1600,12 +1578,6 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl,
        if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
                goto out;
 
-       if ((wl->conf.conn.suspend_wake_up_event ==
-            wl->conf.conn.wake_up_event) &&
-           (wl->conf.conn.suspend_listen_interval ==
-            wl->conf.conn.listen_interval))
-               goto out;
-
        ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
@@ -1614,6 +1586,12 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl,
        if (ret < 0)
                goto out_sleep;
 
+       if ((wl->conf.conn.suspend_wake_up_event ==
+            wl->conf.conn.wake_up_event) &&
+           (wl->conf.conn.suspend_listen_interval ==
+            wl->conf.conn.listen_interval))
+               goto out_sleep;
+
        ret = wl1271_acx_wake_up_conditions(wl, wlvif,
                                    wl->conf.conn.suspend_wake_up_event,
                                    wl->conf.conn.suspend_listen_interval);
@@ -1669,11 +1647,7 @@ static void wl1271_configure_resume(struct wl1271 *wl,
        if ((!is_ap) && (!is_sta))
                return;
 
-       if (is_sta &&
-           ((wl->conf.conn.suspend_wake_up_event ==
-             wl->conf.conn.wake_up_event) &&
-            (wl->conf.conn.suspend_listen_interval ==
-             wl->conf.conn.listen_interval)))
+       if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
                return;
 
        ret = wl1271_ps_elp_wakeup(wl);
@@ -1683,6 +1657,12 @@ static void wl1271_configure_resume(struct wl1271 *wl,
        if (is_sta) {
                wl1271_configure_wowlan(wl, NULL);
 
+               if ((wl->conf.conn.suspend_wake_up_event ==
+                    wl->conf.conn.wake_up_event) &&
+                   (wl->conf.conn.suspend_listen_interval ==
+                    wl->conf.conn.listen_interval))
+                       goto out_sleep;
+
                ret = wl1271_acx_wake_up_conditions(wl, wlvif,
                                    wl->conf.conn.wake_up_event,
                                    wl->conf.conn.listen_interval);
@@ -1695,6 +1675,7 @@ static void wl1271_configure_resume(struct wl1271 *wl,
                ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
        }
 
+out_sleep:
        wl1271_ps_elp_sleep(wl);
 }
 
@@ -1831,7 +1812,7 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
 {
        int i;
 
-       if (wl->state == WL1271_STATE_OFF) {
+       if (wl->state == WLCORE_STATE_OFF) {
                if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
                                        &wl->flags))
                        wlcore_enable_interrupts(wl);
@@ -1843,7 +1824,7 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
         * this must be before the cancel_work calls below, so that the work
         * functions don't perform further work.
         */
-       wl->state = WL1271_STATE_OFF;
+       wl->state = WLCORE_STATE_OFF;
 
        /*
         * Use the nosync variant to disable interrupts, so the mutex could be
@@ -1854,6 +1835,8 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
        mutex_unlock(&wl->mutex);
 
        wlcore_synchronize_interrupts(wl);
+       if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
+               cancel_work_sync(&wl->recovery_work);
        wl1271_flush_deferred_work(wl);
        cancel_delayed_work_sync(&wl->scan_complete_work);
        cancel_work_sync(&wl->netstack_work);
@@ -1956,6 +1939,27 @@ static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
        *idx = WL12XX_MAX_RATE_POLICIES;
 }
 
+static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
+{
+       u8 policy = find_first_zero_bit(wl->klv_templates_map,
+                                       WLCORE_MAX_KLV_TEMPLATES);
+       if (policy >= WLCORE_MAX_KLV_TEMPLATES)
+               return -EBUSY;
+
+       __set_bit(policy, wl->klv_templates_map);
+       *idx = policy;
+       return 0;
+}
+
+static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
+{
+       if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
+               return;
+
+       __clear_bit(*idx, wl->klv_templates_map);
+       *idx = WLCORE_MAX_KLV_TEMPLATES;
+}
+
 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        switch (wlvif->bss_type) {
@@ -2020,6 +2024,7 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
                wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
                wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
                wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
+               wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
                wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
                wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
                wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
@@ -2096,7 +2101,7 @@ irq_disable:
                /* Unlocking the mutex in the middle of handling is
                   inherently unsafe. In this case we deem it safe to do,
                   because we need to let any possibly pending IRQ out of
-                  the system (and while we are WL1271_STATE_OFF the IRQ
+                  the system (and while we are WLCORE_STATE_OFF the IRQ
                   work function will not do anything.) Also, any other
                   possible concurrent operations will fail due to the
                   current state, hence the wl1271 struct should be safe. */
@@ -2131,7 +2136,7 @@ power_off:
        wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
                     wl->enable_11a ? "" : "not ");
 
-       wl->state = WL1271_STATE_ON;
+       wl->state = WLCORE_STATE_ON;
 out:
        return booted;
 }
@@ -2165,7 +2170,11 @@ static bool wl12xx_need_fw_change(struct wl1271 *wl,
        wl->last_vif_count = vif_count;
 
        /* no need for fw change if the device is OFF */
-       if (wl->state == WL1271_STATE_OFF)
+       if (wl->state == WLCORE_STATE_OFF)
+               return false;
+
+       /* no need for fw change if a single fw is used */
+       if (!wl->mr_fw_name)
                return false;
 
        if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
@@ -2247,7 +2256,7 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
         * TODO: after the nvs issue will be solved, move this block
         * to start(), and make sure here the driver is ON.
         */
-       if (wl->state == WL1271_STATE_OFF) {
+       if (wl->state == WLCORE_STATE_OFF) {
                /*
                 * we still need this in order to configure the fw
                 * while uploading the nvs
@@ -2261,21 +2270,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
                }
        }
 
-       if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
-           wlvif->bss_type == BSS_TYPE_IBSS) {
-               /*
-                * The device role is a special role used for
-                * rx and tx frames prior to association (as
-                * the STA role can get packets only from
-                * its associated bssid)
-                */
-               ret = wl12xx_cmd_role_enable(wl, vif->addr,
-                                                WL1271_ROLE_DEVICE,
-                                                &wlvif->dev_role_id);
-               if (ret < 0)
-                       goto out;
-       }
-
        ret = wl12xx_cmd_role_enable(wl, vif->addr,
                                     role_type, &wlvif->role_id);
        if (ret < 0)
@@ -2314,7 +2308,7 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
                return;
 
        /* because of hardware recovery, we may get here twice */
-       if (wl->state != WL1271_STATE_ON)
+       if (wl->state == WLCORE_STATE_OFF)
                return;
 
        wl1271_info("down");
@@ -2344,10 +2338,6 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
                    wlvif->bss_type == BSS_TYPE_IBSS) {
                        if (wl12xx_dev_role_started(wlvif))
                                wl12xx_stop_dev(wl, wlvif);
-
-                       ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
-                       if (ret < 0)
-                               goto deinit;
                }
 
                ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
@@ -2366,6 +2356,7 @@ deinit:
                wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
                wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
                wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
+               wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
        } else {
                wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
                wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
@@ -2430,12 +2421,11 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
        struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        struct wl12xx_vif *iter;
        struct vif_counter_data vif_count;
-       bool cancel_recovery = true;
 
        wl12xx_get_vif_count(hw, vif, &vif_count);
        mutex_lock(&wl->mutex);
 
-       if (wl->state == WL1271_STATE_OFF ||
+       if (wl->state == WLCORE_STATE_OFF ||
            !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
                goto out;
 
@@ -2455,12 +2445,9 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
                wl12xx_force_active_psm(wl);
                set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
                wl12xx_queue_recovery_work(wl);
-               cancel_recovery = false;
        }
 out:
        mutex_unlock(&wl->mutex);
-       if (cancel_recovery)
-               cancel_work_sync(&wl->recovery_work);
 }
 
 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
@@ -2534,7 +2521,7 @@ static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                goto out;
 
        ret = wl1271_acx_keep_alive_config(wl, wlvif,
-                                          CMD_TEMPL_KLV_IDX_NULL_DATA,
+                                          wlvif->sta.klv_template_id,
                                           ACX_KEEP_ALIVE_TPL_VALID);
        if (ret < 0)
                goto out;
@@ -2554,6 +2541,11 @@ static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif)
                ieee80211_chswitch_done(vif, false);
        }
 
+       /* invalidate keep-alive template */
+       wl1271_acx_keep_alive_config(wl, wlvif,
+                                    wlvif->sta.klv_template_id,
+                                    ACX_KEEP_ALIVE_TPL_INVALID);
+
        /* to stop listening to a channel, we disconnect */
        ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
        if (ret < 0)
@@ -2592,11 +2584,6 @@ static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                wlvif->rate_set =
                        wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
                ret = wl1271_acx_sta_rate_policies(wl, wlvif);
-               if (ret < 0)
-                       goto out;
-               ret = wl1271_acx_keep_alive_config(
-                       wl, wlvif, CMD_TEMPL_KLV_IDX_NULL_DATA,
-                       ACX_KEEP_ALIVE_TPL_INVALID);
                if (ret < 0)
                        goto out;
                clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
@@ -2770,7 +2757,7 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
        if (changed & IEEE80211_CONF_CHANGE_POWER)
                wl->power_level = conf->power_level;
 
-       if (unlikely(wl->state == WL1271_STATE_OFF))
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        ret = wl1271_ps_elp_wakeup(wl);
@@ -2804,10 +2791,6 @@ static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
 {
        struct wl1271_filter_params *fp;
        struct netdev_hw_addr *ha;
-       struct wl1271 *wl = hw->priv;
-
-       if (unlikely(wl->state == WL1271_STATE_OFF))
-               return 0;
 
        fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
        if (!fp) {
@@ -2856,7 +2839,7 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
        *total &= WL1271_SUPPORTED_FILTERS;
        changed &= WL1271_SUPPORTED_FILTERS;
 
-       if (unlikely(wl->state == WL1271_STATE_OFF))
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        ret = wl1271_ps_elp_wakeup(wl);
@@ -3080,8 +3063,45 @@ static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                             struct ieee80211_key_conf *key_conf)
 {
        struct wl1271 *wl = hw->priv;
+       int ret;
+       bool might_change_spare =
+               key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
+               key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
 
-       return wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
+       if (might_change_spare) {
+               /*
+                * stop the queues and flush to ensure the next packets are
+                * in sync with FW spare block accounting
+                */
+               mutex_lock(&wl->mutex);
+               wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
+               mutex_unlock(&wl->mutex);
+
+               wl1271_tx_flush(wl);
+       }
+
+       mutex_lock(&wl->mutex);
+
+       if (unlikely(wl->state != WLCORE_STATE_ON)) {
+               ret = -EAGAIN;
+               goto out_wake_queues;
+       }
+
+       ret = wl1271_ps_elp_wakeup(wl);
+       if (ret < 0)
+               goto out_wake_queues;
+
+       ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
+
+       wl1271_ps_elp_sleep(wl);
+
+out_wake_queues:
+       if (might_change_spare)
+               wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
+
+       mutex_unlock(&wl->mutex);
+
+       return ret;
 }
 
 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
@@ -3103,17 +3123,6 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
                     key_conf->keylen, key_conf->flags);
        wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
 
-       mutex_lock(&wl->mutex);
-
-       if (unlikely(wl->state == WL1271_STATE_OFF)) {
-               ret = -EAGAIN;
-               goto out_unlock;
-       }
-
-       ret = wl1271_ps_elp_wakeup(wl);
-       if (ret < 0)
-               goto out_unlock;
-
        switch (key_conf->cipher) {
        case WLAN_CIPHER_SUITE_WEP40:
        case WLAN_CIPHER_SUITE_WEP104:
@@ -3143,8 +3152,7 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
        default:
                wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
 
-               ret = -EOPNOTSUPP;
-               goto out_sleep;
+               return -EOPNOTSUPP;
        }
 
        switch (cmd) {
@@ -3155,7 +3163,7 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
                                 tx_seq_32, tx_seq_16, sta);
                if (ret < 0) {
                        wl1271_error("Could not add or replace key");
-                       goto out_sleep;
+                       return ret;
                }
 
                /*
@@ -3169,7 +3177,7 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
                        ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
                        if (ret < 0) {
                                wl1271_warning("build arp rsp failed: %d", ret);
-                               goto out_sleep;
+                               return ret;
                        }
                }
                break;
@@ -3181,22 +3189,15 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
                                     0, 0, sta);
                if (ret < 0) {
                        wl1271_error("Could not remove key");
-                       goto out_sleep;
+                       return ret;
                }
                break;
 
        default:
                wl1271_error("Unsupported key cmd 0x%x", cmd);
-               ret = -EOPNOTSUPP;
-               break;
+               return -EOPNOTSUPP;
        }
 
-out_sleep:
-       wl1271_ps_elp_sleep(wl);
-
-out_unlock:
-       mutex_unlock(&wl->mutex);
-
        return ret;
 }
 EXPORT_SYMBOL_GPL(wlcore_set_key);
@@ -3219,7 +3220,7 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
 
        mutex_lock(&wl->mutex);
 
-       if (wl->state == WL1271_STATE_OFF) {
+       if (unlikely(wl->state != WLCORE_STATE_ON)) {
                /*
                 * We cannot return -EBUSY here because cfg80211 will expect
                 * a call to ieee80211_scan_completed if we do - in this case
@@ -3259,7 +3260,7 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
 
        mutex_lock(&wl->mutex);
 
-       if (wl->state == WL1271_STATE_OFF)
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
@@ -3308,7 +3309,7 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
 
        mutex_lock(&wl->mutex);
 
-       if (wl->state == WL1271_STATE_OFF) {
+       if (unlikely(wl->state != WLCORE_STATE_ON)) {
                ret = -EAGAIN;
                goto out;
        }
@@ -3345,7 +3346,7 @@ static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
 
        mutex_lock(&wl->mutex);
 
-       if (wl->state == WL1271_STATE_OFF)
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        ret = wl1271_ps_elp_wakeup(wl);
@@ -3366,7 +3367,7 @@ static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
 
        mutex_lock(&wl->mutex);
 
-       if (unlikely(wl->state == WL1271_STATE_OFF)) {
+       if (unlikely(wl->state != WLCORE_STATE_ON)) {
                ret = -EAGAIN;
                goto out;
        }
@@ -3395,7 +3396,7 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
 
        mutex_lock(&wl->mutex);
 
-       if (unlikely(wl->state == WL1271_STATE_OFF)) {
+       if (unlikely(wl->state != WLCORE_STATE_ON)) {
                ret = -EAGAIN;
                goto out;
        }
@@ -4171,7 +4172,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
 
        mutex_lock(&wl->mutex);
 
-       if (unlikely(wl->state == WL1271_STATE_OFF))
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
@@ -4255,7 +4256,7 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
 
        mutex_lock(&wl->mutex);
 
-       if (unlikely(wl->state == WL1271_STATE_OFF))
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        ret = wl1271_ps_elp_wakeup(wl);
@@ -4454,7 +4455,7 @@ static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
 
        mutex_lock(&wl->mutex);
 
-       if (unlikely(wl->state == WL1271_STATE_OFF)) {
+       if (unlikely(wl->state != WLCORE_STATE_ON)) {
                ret = -EBUSY;
                goto out;
        }
@@ -4493,7 +4494,7 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
 
        mutex_lock(&wl->mutex);
 
-       if (unlikely(wl->state == WL1271_STATE_OFF)) {
+       if (unlikely(wl->state != WLCORE_STATE_ON)) {
                ret = -EAGAIN;
                goto out;
        }
@@ -4611,7 +4612,7 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
                                                    mask->control[i].legacy,
                                                    i);
 
-       if (unlikely(wl->state == WL1271_STATE_OFF))
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
@@ -4647,12 +4648,14 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
 
        mutex_lock(&wl->mutex);
 
-       if (unlikely(wl->state == WL1271_STATE_OFF)) {
+       if (unlikely(wl->state == WLCORE_STATE_OFF)) {
                wl12xx_for_each_wlvif_sta(wl, wlvif) {
                        struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
                        ieee80211_chswitch_done(vif, false);
                }
                goto out;
+       } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
+               goto out;
        }
 
        ret = wl1271_ps_elp_wakeup(wl);
@@ -4687,7 +4690,7 @@ static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
 
        mutex_lock(&wl->mutex);
 
-       if (unlikely(wl->state == WL1271_STATE_OFF))
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        /* packets are considered pending if in the TX queue or the FW */
@@ -4936,7 +4939,7 @@ static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
 
        wl->sg_enabled = res;
 
-       if (wl->state == WL1271_STATE_OFF)
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        ret = wl1271_ps_elp_wakeup(wl);
@@ -5054,7 +5057,7 @@ static void wl1271_connection_loss_work(struct work_struct *work)
 
        mutex_lock(&wl->mutex);
 
-       if (unlikely(wl->state == WL1271_STATE_OFF))
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        /* Call mac80211 connection loss */
@@ -5068,18 +5071,17 @@ out:
        mutex_unlock(&wl->mutex);
 }
 
-static void wl12xx_derive_mac_addresses(struct wl1271 *wl,
-                                       u32 oui, u32 nic, int n)
+static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
 {
        int i;
 
-       wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x, n %d",
-                    oui, nic, n);
+       wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
+                    oui, nic);
 
-       if (nic + n - 1 > 0xffffff)
+       if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
                wl1271_warning("NIC part of the MAC address wraps around!");
 
-       for (i = 0; i < n; i++) {
+       for (i = 0; i < wl->num_mac_addr; i++) {
                wl->addresses[i].addr[0] = (u8)(oui >> 16);
                wl->addresses[i].addr[1] = (u8)(oui >> 8);
                wl->addresses[i].addr[2] = (u8) oui;
@@ -5089,7 +5091,22 @@ static void wl12xx_derive_mac_addresses(struct wl1271 *wl,
                nic++;
        }
 
-       wl->hw->wiphy->n_addresses = n;
+       /* we may be one address short at the most */
+       WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
+
+       /*
+        * turn on the LAA bit in the first address and use it as
+        * the last address.
+        */
+       if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
+               int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
+               memcpy(&wl->addresses[idx], &wl->addresses[0],
+                      sizeof(wl->addresses[0]));
+               /* LAA bit */
+               wl->addresses[idx].addr[2] |= BIT(1);
+       }
+
+       wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
        wl->hw->wiphy->addresses = wl->addresses;
 }
 
@@ -5128,8 +5145,7 @@ static int wl1271_register_hw(struct wl1271 *wl)
        if (wl->mac80211_registered)
                return 0;
 
-       wl1271_fetch_nvs(wl);
-       if (wl->nvs != NULL) {
+       if (wl->nvs_len >= 12) {
                /* NOTE: The wl->nvs->nvs element must be first, in
                 * order to simplify the casting, we assume it is at
                 * the beginning of the wl->nvs structure.
@@ -5149,7 +5165,7 @@ static int wl1271_register_hw(struct wl1271 *wl)
                nic_addr = wl->fuse_nic_addr + 1;
        }
 
-       wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr, 2);
+       wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
 
        ret = ieee80211_register_hw(wl->hw);
        if (ret < 0) {
@@ -5179,7 +5195,7 @@ static void wl1271_unregister_hw(struct wl1271 *wl)
 
 static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
        {
-               .max = 2,
+               .max = 3,
                .types = BIT(NL80211_IFTYPE_STATION),
        },
        {
@@ -5194,7 +5210,7 @@ static const struct ieee80211_iface_combination
 wlcore_iface_combinations[] = {
        {
          .num_different_channels = 1,
-         .max_interfaces = 2,
+         .max_interfaces = 3,
          .limits = wlcore_iface_limits,
          .n_limits = ARRAY_SIZE(wlcore_iface_limits),
        },
@@ -5310,7 +5326,7 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
 
 #define WL1271_DEFAULT_CHANNEL 0
 
-struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size)
+struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size)
 {
        struct ieee80211_hw *hw;
        struct wl1271 *wl;
@@ -5390,17 +5406,19 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size)
 
        spin_lock_init(&wl->wl_lock);
 
-       wl->state = WL1271_STATE_OFF;
+       wl->state = WLCORE_STATE_OFF;
        wl->fw_type = WL12XX_FW_TYPE_NONE;
        mutex_init(&wl->mutex);
        mutex_init(&wl->flush_mutex);
+       init_completion(&wl->nvs_loading_complete);
 
-       order = get_order(WL1271_AGGR_BUFFER_SIZE);
+       order = get_order(aggr_buf_size);
        wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
        if (!wl->aggr_buf) {
                ret = -ENOMEM;
                goto err_wq;
        }
+       wl->aggr_buf_size = aggr_buf_size;
 
        wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
        if (!wl->dummy_packet) {
@@ -5463,8 +5481,7 @@ int wlcore_free_hw(struct wl1271 *wl)
        device_remove_file(wl->dev, &dev_attr_bt_coex_state);
        free_page((unsigned long)wl->fwlog);
        dev_kfree_skb(wl->dummy_packet);
-       free_pages((unsigned long)wl->aggr_buf,
-                       get_order(WL1271_AGGR_BUFFER_SIZE));
+       free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
 
        wl1271_debugfs_exit(wl);
 
@@ -5514,17 +5531,32 @@ static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
        return IRQ_WAKE_THREAD;
 }
 
-int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
+static void wlcore_nvs_cb(const struct firmware *fw, void *context)
 {
+       struct wl1271 *wl = context;
+       struct platform_device *pdev = wl->pdev;
        struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
        unsigned long irqflags;
        int ret;
 
-       if (!wl->ops || !wl->ptable) {
-               ret = -EINVAL;
-               goto out_free_hw;
+       if (fw) {
+               wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
+               if (!wl->nvs) {
+                       wl1271_error("Could not allocate nvs data");
+                       goto out;
+               }
+               wl->nvs_len = fw->size;
+       } else {
+               wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
+                            WL12XX_NVS_NAME);
+               wl->nvs = NULL;
+               wl->nvs_len = 0;
        }
 
+       ret = wl->ops->setup(wl);
+       if (ret < 0)
+               goto out_free_nvs;
+
        BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
 
        /* adjust some runtime configuration parameters */
@@ -5533,11 +5565,8 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
        wl->irq = platform_get_irq(pdev, 0);
        wl->platform_quirks = pdata->platform_quirks;
        wl->set_power = pdata->set_power;
-       wl->dev = &pdev->dev;
        wl->if_ops = pdata->ops;
 
-       platform_set_drvdata(pdev, wl);
-
        if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
                irqflags = IRQF_TRIGGER_RISING;
        else
@@ -5548,7 +5577,7 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
                                   pdev->name, wl);
        if (ret < 0) {
                wl1271_error("request_irq() failed: %d", ret);
-               goto out_free_hw;
+               goto out_free_nvs;
        }
 
 #ifdef CONFIG_PM
@@ -5607,6 +5636,7 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
                goto out_hw_pg_ver;
        }
 
+       wl->initialized = true;
        goto out;
 
 out_hw_pg_ver:
@@ -5621,10 +5651,33 @@ out_unreg:
 out_irq:
        free_irq(wl->irq, wl);
 
-out_free_hw:
-       wlcore_free_hw(wl);
+out_free_nvs:
+       kfree(wl->nvs);
 
 out:
+       release_firmware(fw);
+       complete_all(&wl->nvs_loading_complete);
+}
+
+int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
+{
+       int ret;
+
+       if (!wl->ops || !wl->ptable)
+               return -EINVAL;
+
+       wl->dev = &pdev->dev;
+       wl->pdev = pdev;
+       platform_set_drvdata(pdev, wl);
+
+       ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+                                     WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
+                                     wl, wlcore_nvs_cb);
+       if (ret < 0) {
+               wl1271_error("request_firmware_nowait failed: %d", ret);
+               complete_all(&wl->nvs_loading_complete);
+       }
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(wlcore_probe);
@@ -5633,6 +5686,10 @@ int __devexit wlcore_remove(struct platform_device *pdev)
 {
        struct wl1271 *wl = platform_get_drvdata(pdev);
 
+       wait_for_completion(&wl->nvs_loading_complete);
+       if (!wl->initialized)
+               return 0;
+
        if (wl->irq_wake_enabled) {
                device_init_wakeup(wl->dev, 0);
                disable_irq_wake(wl->irq);
@@ -5663,3 +5720,4 @@ MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
+MODULE_FIRMWARE(WL12XX_NVS_NAME);
index 46d36fd30eba54e306cbf4cdeade28b61e337ea7..4d1414a673fb6025f42d20e4b7da16fcaf2f6265 100644 (file)
@@ -28,7 +28,7 @@
 
 #define WL1271_WAKEUP_TIMEOUT 500
 
-#define ELP_ENTRY_DELAY  5
+#define ELP_ENTRY_DELAY  30
 
 void wl1271_elp_work(struct work_struct *work)
 {
@@ -44,7 +44,7 @@ void wl1271_elp_work(struct work_struct *work)
 
        mutex_lock(&wl->mutex);
 
-       if (unlikely(wl->state == WL1271_STATE_OFF))
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        /* our work might have been already cancelled */
@@ -98,11 +98,7 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
                        return;
        }
 
-       if (wl->conf.conn.forced_ps)
-               timeout = ELP_ENTRY_DELAY;
-       else
-               timeout = wl->conf.conn.dynamic_ps_timeout;
-
+       timeout = ELP_ENTRY_DELAY;
        ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
                                     msecs_to_jiffies(timeout));
 }
index f55e2f9e7ac56c4212c01cdf0e96b32cf3a7e465..9ee0ec6fd1db3d666769747e2f52c42bcdbe53e1 100644 (file)
@@ -221,7 +221,7 @@ int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status)
                        pkt_len = wlcore_rx_get_buf_size(wl, des);
                        align_pkt_len = wlcore_rx_get_align_buf_size(wl,
                                                                     pkt_len);
-                       if (buf_size + align_pkt_len > WL1271_AGGR_BUFFER_SIZE)
+                       if (buf_size + align_pkt_len > wl->aggr_buf_size)
                                break;
                        buf_size += align_pkt_len;
                        rx_counter++;
index dbeca1bfbb2cc40814baa73b16d7a9a33c7607ad..d00501493dfec06d9aa314c675e1834f12bb98d2 100644 (file)
@@ -46,7 +46,7 @@ void wl1271_scan_complete_work(struct work_struct *work)
 
        mutex_lock(&wl->mutex);
 
-       if (wl->state == WL1271_STATE_OFF)
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
@@ -184,11 +184,7 @@ static int wl1271_scan_send(struct wl1271 *wl, struct ieee80211_vif *vif,
        if (passive)
                scan_options |= WL1271_SCAN_OPT_PASSIVE;
 
-       if (wlvif->bss_type == BSS_TYPE_AP_BSS ||
-           test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
-               cmd->params.role_id = wlvif->role_id;
-       else
-               cmd->params.role_id = wlvif->dev_role_id;
+       cmd->params.role_id = wlvif->role_id;
 
        if (WARN_ON(cmd->params.role_id == WL12XX_INVALID_ROLE_ID)) {
                ret = -EINVAL;
@@ -593,7 +589,7 @@ wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
                goto out;
        }
 
-       cmd->role_id = wlvif->dev_role_id;
+       cmd->role_id = wlvif->role_id;
        if (!n_match_ssids) {
                /* No filter, with ssids */
                type = SCAN_SSID_FILTER_DISABLED;
@@ -683,7 +679,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
        if (!cfg)
                return -ENOMEM;
 
-       cfg->role_id = wlvif->dev_role_id;
+       cfg->role_id = wlvif->role_id;
        cfg->rssi_threshold = c->rssi_threshold;
        cfg->snr_threshold  = c->snr_threshold;
        cfg->n_probe_reqs = c->num_probe_reqs;
@@ -718,7 +714,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
        if (!force_passive && cfg->active[0]) {
                u8 band = IEEE80211_BAND_2GHZ;
                ret = wl12xx_cmd_build_probe_req(wl, wlvif,
-                                                wlvif->dev_role_id, band,
+                                                wlvif->role_id, band,
                                                 req->ssids[0].ssid,
                                                 req->ssids[0].ssid_len,
                                                 ies->ie[band],
@@ -732,7 +728,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
        if (!force_passive && cfg->active[1]) {
                u8 band = IEEE80211_BAND_5GHZ;
                ret = wl12xx_cmd_build_probe_req(wl, wlvif,
-                                                wlvif->dev_role_id, band,
+                                                wlvif->role_id, band,
                                                 req->ssids[0].ssid,
                                                 req->ssids[0].ssid_len,
                                                 ies->ie[band],
@@ -774,7 +770,7 @@ int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif)
        if (!start)
                return -ENOMEM;
 
-       start->role_id = wlvif->dev_role_id;
+       start->role_id = wlvif->role_id;
        start->tag = WL1271_SCAN_DEFAULT_TAG;
 
        ret = wl1271_cmd_send(wl, CMD_START_PERIODIC_SCAN, start,
@@ -810,7 +806,7 @@ void wl1271_scan_sched_scan_stop(struct wl1271 *wl,  struct wl12xx_vif *wlvif)
                return;
        }
 
-       stop->role_id = wlvif->dev_role_id;
+       stop->role_id = wlvif->role_id;
        stop->tag = WL1271_SCAN_DEFAULT_TAG;
 
        ret = wl1271_cmd_send(wl, CMD_STOP_PERIODIC_SCAN, stop,
index 8da4ed243ebcd7ca5b33f7b78d140b21adfb404a..a519bc3adec1d6ade15542ba852918114f5cdb32 100644 (file)
 /* HW limitation: maximum possible chunk size is 4095 bytes */
 #define WSPI_MAX_CHUNK_SIZE    4092
 
-#define WSPI_MAX_NUM_OF_CHUNKS (WL1271_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE)
+/*
+ * only support SPI for 12xx - this code should be reworked when 18xx
+ * support is introduced
+ */
+#define SPI_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
+
+#define WSPI_MAX_NUM_OF_CHUNKS (SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE)
 
 struct wl12xx_spi_glue {
        struct device *dev;
@@ -271,7 +277,7 @@ static int __must_check wl12xx_spi_raw_write(struct device *child, int addr,
        u32 chunk_len;
        int i;
 
-       WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
+       WARN_ON(len > SPI_AGGR_BUFFER_SIZE);
 
        spi_message_init(&m);
        memset(t, 0, sizeof(t));
index 49e5ee1525c999f04655244e0d8a3bb81a324907..f3442762d884b04def7e2bb2e8b8bee603b3f0ae 100644 (file)
@@ -92,7 +92,7 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
 
        mutex_lock(&wl->mutex);
 
-       if (wl->state == WL1271_STATE_OFF) {
+       if (unlikely(wl->state != WLCORE_STATE_ON)) {
                ret = -EINVAL;
                goto out;
        }
@@ -164,7 +164,7 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
 
        mutex_lock(&wl->mutex);
 
-       if (wl->state == WL1271_STATE_OFF) {
+       if (unlikely(wl->state != WLCORE_STATE_ON)) {
                ret = -EINVAL;
                goto out;
        }
index f0081f746482d8060810d27486c58e50d79df928..a90d3cd094089c82fe60db12dca55bedaa33af0f 100644 (file)
@@ -130,16 +130,13 @@ bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(wl12xx_is_dummy_packet);
 
-u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-                        struct sk_buff *skb)
+static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                               struct sk_buff *skb, struct ieee80211_sta *sta)
 {
-       struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb);
-
-       if (control->control.sta) {
+       if (sta) {
                struct wl1271_station *wl_sta;
 
-               wl_sta = (struct wl1271_station *)
-                               control->control.sta->drv_priv;
+               wl_sta = (struct wl1271_station *)sta->drv_priv;
                return wl_sta->hlid;
        } else {
                struct ieee80211_hdr *hdr;
@@ -156,7 +153,7 @@ u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 }
 
 u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-                     struct sk_buff *skb)
+                     struct sk_buff *skb, struct ieee80211_sta *sta)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 
@@ -164,7 +161,7 @@ u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                return wl->system_hlid;
 
        if (wlvif->bss_type == BSS_TYPE_AP_BSS)
-               return wl12xx_tx_get_hlid_ap(wl, wlvif, skb);
+               return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta);
 
        if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
             test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) &&
@@ -196,7 +193,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
        int id, ret = -EBUSY, ac;
        u32 spare_blocks;
 
-       if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
+       if (buf_offset + total_len > wl->aggr_buf_size)
                return -EAGAIN;
 
        spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem);
@@ -322,8 +319,12 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                if (hlid == wlvif->ap.global_hlid)
                        rate_idx = wlvif->ap.mgmt_rate_idx;
                else if (hlid == wlvif->ap.bcast_hlid ||
-                        skb->protocol == cpu_to_be16(ETH_P_PAE))
-                       /* send AP bcast and EAPOLs using the min basic rate */
+                        skb->protocol == cpu_to_be16(ETH_P_PAE) ||
+                        !ieee80211_is_data(frame_control))
+                       /*
+                        * send non-data, bcast and EAPOLs using the
+                        * min basic rate
+                        */
                        rate_idx = wlvif->ap.bcast_rate_idx;
                else
                        rate_idx = wlvif->ap.ucast_rate_idx[ac];
@@ -344,13 +345,12 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 
 /* caller must hold wl->mutex */
 static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-                                  struct sk_buff *skb, u32 buf_offset)
+                                  struct sk_buff *skb, u32 buf_offset, u8 hlid)
 {
        struct ieee80211_tx_info *info;
        u32 extra = 0;
        int ret = 0;
        u32 total_len;
-       u8 hlid;
        bool is_dummy;
        bool is_gem = false;
 
@@ -359,9 +359,13 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                return -EINVAL;
        }
 
+       if (hlid == WL12XX_INVALID_LINK_ID) {
+               wl1271_error("invalid hlid. dropping skb 0x%p", skb);
+               return -EINVAL;
+       }
+
        info = IEEE80211_SKB_CB(skb);
 
-       /* TODO: handle dummy packets on multi-vifs */
        is_dummy = wl12xx_is_dummy_packet(wl, skb);
 
        if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
@@ -386,11 +390,6 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 
                is_gem = (cipher == WL1271_CIPHER_SUITE_GEM);
        }
-       hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
-       if (hlid == WL12XX_INVALID_LINK_ID) {
-               wl1271_error("invalid hlid. dropping skb 0x%p", skb);
-               return -EINVAL;
-       }
 
        ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid,
                                 is_gem);
@@ -517,7 +516,8 @@ static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl,
 }
 
 static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
-                                             struct wl12xx_vif *wlvif)
+                                             struct wl12xx_vif *wlvif,
+                                             u8 *hlid)
 {
        struct sk_buff *skb = NULL;
        int i, h, start_hlid;
@@ -544,10 +544,11 @@ static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
        if (!skb)
                wlvif->last_tx_hlid = 0;
 
+       *hlid = wlvif->last_tx_hlid;
        return skb;
 }
 
-static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
+static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid)
 {
        unsigned long flags;
        struct wl12xx_vif *wlvif = wl->last_wlvif;
@@ -556,7 +557,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
        /* continue from last wlvif (round robin) */
        if (wlvif) {
                wl12xx_for_each_wlvif_continue(wl, wlvif) {
-                       skb = wl12xx_vif_skb_dequeue(wl, wlvif);
+                       skb = wl12xx_vif_skb_dequeue(wl, wlvif, hlid);
                        if (skb) {
                                wl->last_wlvif = wlvif;
                                break;
@@ -565,13 +566,15 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
        }
 
        /* dequeue from the system HLID before the restarting wlvif list */
-       if (!skb)
+       if (!skb) {
                skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]);
+               *hlid = wl->system_hlid;
+       }
 
        /* do a new pass over the wlvif list */
        if (!skb) {
                wl12xx_for_each_wlvif(wl, wlvif) {
-                       skb = wl12xx_vif_skb_dequeue(wl, wlvif);
+                       skb = wl12xx_vif_skb_dequeue(wl, wlvif, hlid);
                        if (skb) {
                                wl->last_wlvif = wlvif;
                                break;
@@ -591,6 +594,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
                int q;
 
                skb = wl->dummy_packet;
+               *hlid = wl->system_hlid;
                q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
                spin_lock_irqsave(&wl->wl_lock, flags);
                WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
@@ -602,7 +606,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
 }
 
 static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-                                 struct sk_buff *skb)
+                                 struct sk_buff *skb, u8 hlid)
 {
        unsigned long flags;
        int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
@@ -610,7 +614,6 @@ static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
        if (wl12xx_is_dummy_packet(wl, skb)) {
                set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
        } else {
-               u8 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
                skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
 
                /* make sure we dequeue the same packet next time */
@@ -686,26 +689,30 @@ int wlcore_tx_work_locked(struct wl1271 *wl)
        unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
        int ret = 0;
        int bus_ret = 0;
+       u8 hlid;
 
-       if (unlikely(wl->state == WL1271_STATE_OFF))
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                return 0;
 
-       while ((skb = wl1271_skb_dequeue(wl))) {
+       while ((skb = wl1271_skb_dequeue(wl, &hlid))) {
                struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
                bool has_data = false;
 
                wlvif = NULL;
                if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif)
                        wlvif = wl12xx_vif_to_data(info->control.vif);
+               else
+                       hlid = wl->system_hlid;
 
                has_data = wlvif && wl1271_tx_is_data_present(skb);
-               ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset);
+               ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset,
+                                             hlid);
                if (ret == -EAGAIN) {
                        /*
                         * Aggregation buffer is full.
                         * Flush buffer and try again.
                         */
-                       wl1271_skb_queue_head(wl, wlvif, skb);
+                       wl1271_skb_queue_head(wl, wlvif, skb, hlid);
 
                        buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset,
                                                            last_len);
@@ -722,7 +729,7 @@ int wlcore_tx_work_locked(struct wl1271 *wl)
                         * Firmware buffer is full.
                         * Queue back last skb, and stop aggregating.
                         */
-                       wl1271_skb_queue_head(wl, wlvif, skb);
+                       wl1271_skb_queue_head(wl, wlvif, skb, hlid);
                        /* No work left, avoid scheduling redundant tx work */
                        set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
                        goto out_ack;
@@ -732,7 +739,7 @@ int wlcore_tx_work_locked(struct wl1271 *wl)
                                 * fw still expects dummy packet,
                                 * so re-enqueue it
                                 */
-                               wl1271_skb_queue_head(wl, wlvif, skb);
+                               wl1271_skb_queue_head(wl, wlvif, skb, hlid);
                        else
                                ieee80211_free_txskb(wl->hw, skb);
                        goto out_ack;
@@ -1069,39 +1076,54 @@ void wl12xx_tx_reset(struct wl1271 *wl)
 /* caller must *NOT* hold wl->mutex */
 void wl1271_tx_flush(struct wl1271 *wl)
 {
-       unsigned long timeout;
+       unsigned long timeout, start_time;
        int i;
-       timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
+       start_time = jiffies;
+       timeout = start_time + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
 
        /* only one flush should be in progress, for consistent queue state */
        mutex_lock(&wl->flush_mutex);
 
+       mutex_lock(&wl->mutex);
+       if (wl->tx_frames_cnt == 0 && wl1271_tx_total_queue_count(wl) == 0) {
+               mutex_unlock(&wl->mutex);
+               goto out;
+       }
+
        wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
 
        while (!time_after(jiffies, timeout)) {
-               mutex_lock(&wl->mutex);
-               wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d",
+               wl1271_debug(DEBUG_MAC80211, "flushing tx buffer: %d %d",
                             wl->tx_frames_cnt,
                             wl1271_tx_total_queue_count(wl));
+
+               /* force Tx and give the driver some time to flush data */
+               mutex_unlock(&wl->mutex);
+               if (wl1271_tx_total_queue_count(wl))
+                       wl1271_tx_work(&wl->tx_work);
+               msleep(20);
+               mutex_lock(&wl->mutex);
+
                if ((wl->tx_frames_cnt == 0) &&
                    (wl1271_tx_total_queue_count(wl) == 0)) {
-                       mutex_unlock(&wl->mutex);
-                       goto out;
+                       wl1271_debug(DEBUG_MAC80211, "tx flush took %d ms",
+                                    jiffies_to_msecs(jiffies - start_time));
+                       goto out_wake;
                }
-               mutex_unlock(&wl->mutex);
-               msleep(1);
        }
 
-       wl1271_warning("Unable to flush all TX buffers, timed out.");
+       wl1271_warning("Unable to flush all TX buffers, "
+                      "timed out (timeout %d ms",
+                      WL1271_TX_FLUSH_TIMEOUT / 1000);
 
        /* forcibly flush all Tx buffers on our queues */
-       mutex_lock(&wl->mutex);
        for (i = 0; i < WL12XX_MAX_LINKS; i++)
                wl1271_tx_reset_link_queues(wl, i);
-       mutex_unlock(&wl->mutex);
 
-out:
+out_wake:
        wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
+       mutex_unlock(&wl->mutex);
+out:
        mutex_unlock(&wl->flush_mutex);
 }
 EXPORT_SYMBOL_GPL(wl1271_tx_flush);
index 1e939b016155c57a45a5a5582b0823f1cbbbea9b..349520d8b7240686b2e7ffa266c4f9019c5ce481 100644 (file)
@@ -243,10 +243,8 @@ u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band);
 u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
                                enum ieee80211_band rate_band);
 u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set);
-u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-                        struct sk_buff *skb);
 u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-                     struct sk_buff *skb);
+                     struct sk_buff *skb, struct ieee80211_sta *sta);
 void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid);
 void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
 bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb);
index 0ce7a8ebbd46a8be119ed4b84eb146751177b6cf..68584aa0f2b0f731e86b7e1a0c0ea973b3a89ebf 100644 (file)
 /* The maximum number of Tx descriptors in all chip families */
 #define WLCORE_MAX_TX_DESCRIPTORS 32
 
+/*
+ * We always allocate this number of mac addresses. If we don't
+ * have enough allocated addresses, the LAA bit is used
+ */
+#define WLCORE_NUM_MAC_ADDRESSES 3
+
 /* forward declaration */
 struct wl1271_tx_hw_descr;
 enum wl_rx_buf_align;
 struct wl1271_rx_descriptor;
 
 struct wlcore_ops {
+       int (*setup)(struct wl1271 *wl);
        int (*identify_chip)(struct wl1271 *wl);
        int (*identify_fw)(struct wl1271 *wl);
        int (*boot)(struct wl1271 *wl);
@@ -139,10 +146,12 @@ struct wl1271_stats {
 };
 
 struct wl1271 {
+       bool initialized;
        struct ieee80211_hw *hw;
        bool mac80211_registered;
 
        struct device *dev;
+       struct platform_device *pdev;
 
        void *if_priv;
 
@@ -153,7 +162,7 @@ struct wl1271 {
 
        spinlock_t wl_lock;
 
-       enum wl1271_state state;
+       enum wlcore_state state;
        enum wl12xx_fw_type fw_type;
        bool plt;
        enum plt_mode plt_mode;
@@ -181,7 +190,7 @@ struct wl1271 {
        u32 fuse_nic_addr;
 
        /* we have up to 2 MAC addresses */
-       struct mac_address addresses[2];
+       struct mac_address addresses[WLCORE_NUM_MAC_ADDRESSES];
        int channel;
        u8 system_hlid;
 
@@ -190,6 +199,8 @@ struct wl1271 {
        unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
        unsigned long rate_policies_map[
                        BITS_TO_LONGS(WL12XX_MAX_RATE_POLICIES)];
+       unsigned long klv_templates_map[
+                       BITS_TO_LONGS(WLCORE_MAX_KLV_TEMPLATES)];
 
        struct list_head wlvif_list;
 
@@ -237,6 +248,7 @@ struct wl1271 {
 
        /* Intermediate buffer, used for packet aggregation */
        u8 *aggr_buf;
+       u32 aggr_buf_size;
 
        /* Reusable dummy packet template */
        struct sk_buff *dummy_packet;
@@ -393,13 +405,18 @@ struct wl1271 {
        /* sleep auth value currently configured to FW */
        int sleep_auth;
 
+       /* the number of allocated MAC addresses in this chip */
+       int num_mac_addr;
+
        /* the minimum FW version required for the driver to work */
        unsigned int min_fw_ver[NUM_FW_VER];
+
+       struct completion nvs_loading_complete;
 };
 
 int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
 int __devexit wlcore_remove(struct platform_device *pdev);
-struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size);
+struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size);
 int wlcore_free_hw(struct wl1271 *wl);
 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
                   struct ieee80211_vif *vif,
index c0505635bb00dc42652e363c6e56dd199a2b01f8..6678d4b18611556be7617b6ff79dab0dad851d14 100644 (file)
@@ -66,6 +66,7 @@
 #define WLCORE_NUM_BANDS           2
 
 #define WL12XX_MAX_RATE_POLICIES 16
+#define WLCORE_MAX_KLV_TEMPLATES 4
 
 /* Defined by FW as 0. Will not be freed or allocated. */
 #define WL12XX_SYSTEM_HLID         0
 #define WL1271_AP_BSS_INDEX        0
 #define WL1271_AP_DEF_BEACON_EXP   20
 
-#define WL1271_AGGR_BUFFER_SIZE (5 * PAGE_SIZE)
-
-enum wl1271_state {
-       WL1271_STATE_OFF,
-       WL1271_STATE_ON,
+enum wlcore_state {
+       WLCORE_STATE_OFF,
+       WLCORE_STATE_RESTARTING,
+       WLCORE_STATE_ON,
 };
 
 enum wl12xx_fw_type {
@@ -124,6 +124,7 @@ struct wl1271_chip {
        u32 id;
        char fw_ver_str[ETHTOOL_BUSINFO_LEN];
        unsigned int fw_ver[NUM_FW_VER];
+       char phy_fw_ver_str[ETHTOOL_BUSINFO_LEN];
 };
 
 #define NUM_TX_QUEUES              4
@@ -337,6 +338,8 @@ struct wl12xx_vif {
                        u8 ap_rate_idx;
                        u8 p2p_rate_idx;
 
+                       u8 klv_template_id;
+
                        bool qos;
                } sta;
                struct {
index 00f6e69c1dcd6f47e3f1e916cf26f923b9f0e1ea..730186d0449b281b08242a1116c2f85115248f04 100644 (file)
@@ -1520,13 +1520,12 @@ static int wl3501_set_wap(struct net_device *dev, struct iw_request_info *info,
                          union iwreq_data *wrqu, char *extra)
 {
        struct wl3501_card *this = netdev_priv(dev);
-       static const u8 bcast[ETH_ALEN] = { 255, 255, 255, 255, 255, 255 };
        int rc = -EINVAL;
 
        /* FIXME: we support other ARPHRDs...*/
        if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
                goto out;
-       if (!memcmp(bcast, wrqu->ap_addr.sa_data, ETH_ALEN)) {
+       if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data)) {
                /* FIXME: rescan? */
        } else
                memcpy(this->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
index c9e2660e12638d156818367c19cc646064e4aeb3..114364b5d46638f3597e66761c5caff45903cc80 100644 (file)
@@ -937,7 +937,9 @@ static int fill_ctrlset(struct zd_mac *mac,
  * control block of the skbuff will be initialized. If necessary the incoming
  * mac80211 queues will be stopped.
  */
-static void zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void zd_op_tx(struct ieee80211_hw *hw,
+                    struct ieee80211_tx_control *control,
+                    struct sk_buff *skb)
 {
        struct zd_mac *mac = zd_hw_mac(hw);
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1176,7 +1178,7 @@ static void zd_beacon_done(struct zd_mac *mac)
                skb = ieee80211_get_buffered_bc(mac->hw, mac->vif);
                if (!skb)
                        break;
-               zd_op_tx(mac->hw, skb);
+               zd_op_tx(mac->hw, NULL, skb);
        }
 
        /*
@@ -1399,7 +1401,8 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
 
        hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
                    IEEE80211_HW_SIGNAL_UNSPEC |
-                   IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
+                   IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
+                   IEEE80211_HW_MFP_CAPABLE;
 
        hw->wiphy->interface_modes =
                BIT(NL80211_IFTYPE_MESH_POINT) |
index 650f79a1f2bd4a89cd96326d63c85982b24db0aa..c934fe8583f5f17c33a577993a934205db2d3609 100644 (file)
@@ -1712,7 +1712,7 @@ static void netback_changed(struct xenbus_device *dev,
                break;
 
        case XenbusStateConnected:
-               netif_notify_peers(netdev);
+               netdev_notify_peers(netdev);
                break;
 
        case XenbusStateClosing:
index 3b20b73ee649bf46d62cc27f81c7a6f78366a148..ec857676c39ffaffcb6d55c25917ec9beb1192ea 100644 (file)
@@ -5,21 +5,9 @@
 menu "Near Field Communication (NFC) devices"
        depends on NFC
 
-config PN544_NFC
-       tristate "PN544 NFC driver"
-       depends on I2C
-       select CRC_CCITT
-       default n
-       ---help---
-         Say yes if you want PN544 Near Field Communication driver.
-         This is for i2c connected version. If unsure, say N here.
-
-         To compile this driver as a module, choose m here. The module will
-         be called pn544.
-
 config PN544_HCI_NFC
        tristate "HCI PN544 NFC driver"
-       depends on I2C && NFC_SHDLC
+       depends on I2C && NFC_HCI && NFC_SHDLC
        select CRC_CCITT
        default n
        ---help---
index 473e44cef6122fdc4530fbdf9d796e29afc3f85a..bf05831fdf091d372f154c8f7b3ea9e023cdba64 100644 (file)
@@ -2,7 +2,6 @@
 # Makefile for nfc devices
 #
 
-obj-$(CONFIG_PN544_NFC)                += pn544.o
 obj-$(CONFIG_PN544_HCI_NFC)    += pn544_hci.o
 obj-$(CONFIG_NFC_PN533)                += pn533.o
 obj-$(CONFIG_NFC_WILINK)       += nfcwilink.o
index e7fd4938f9bc2e36191de06f38fa30fd807eb307..50b1ee41afc60e2a3789f1a4e26ab12ff811ce4e 100644 (file)
@@ -352,8 +352,6 @@ static long nfcwilink_receive(void *priv_data, struct sk_buff *skb)
        struct nfcwilink *drv = priv_data;
        int rc;
 
-       nfc_dev_dbg(&drv->pdev->dev, "receive entry, len %d", skb->len);
-
        if (!skb)
                return -EFAULT;
 
@@ -362,6 +360,8 @@ static long nfcwilink_receive(void *priv_data, struct sk_buff *skb)
                return -EFAULT;
        }
 
+       nfc_dev_dbg(&drv->pdev->dev, "receive entry, len %d", skb->len);
+
        /* strip the ST header
        (apart for the chnl byte, which is not received in the hdr) */
        skb_pull(skb, (NFCWILINK_HDR_LEN-1));
@@ -604,21 +604,7 @@ static struct platform_driver nfcwilink_driver = {
        },
 };
 
-/* ------- Module Init/Exit interfaces ------ */
-static int __init nfcwilink_init(void)
-{
-       printk(KERN_INFO "NFC Driver for TI WiLink");
-
-       return platform_driver_register(&nfcwilink_driver);
-}
-
-static void __exit nfcwilink_exit(void)
-{
-       platform_driver_unregister(&nfcwilink_driver);
-}
-
-module_init(nfcwilink_init);
-module_exit(nfcwilink_exit);
+module_platform_driver(nfcwilink_driver);
 
 /* ------ Module Info ------ */
 
index d606f52fec842d5a613cff3f63719a97827178b4..97c440a8cd615798a1e61250628e0030ede37694 100644 (file)
@@ -356,6 +356,7 @@ struct pn533 {
 
        struct workqueue_struct *wq;
        struct work_struct cmd_work;
+       struct work_struct cmd_complete_work;
        struct work_struct poll_work;
        struct work_struct mi_work;
        struct work_struct tg_work;
@@ -383,6 +384,19 @@ struct pn533 {
        u8 tgt_mode;
 
        u32 device_type;
+
+       struct list_head cmd_queue;
+       u8 cmd_pending;
+};
+
+struct pn533_cmd {
+       struct list_head queue;
+       struct pn533_frame *out_frame;
+       struct pn533_frame *in_frame;
+       int in_frame_len;
+       pn533_cmd_complete_t cmd_complete;
+       void *arg;
+       gfp_t flags;
 };
 
 struct pn533_frame {
@@ -487,7 +501,7 @@ static bool pn533_rx_frame_is_cmd_response(struct pn533_frame *frame, u8 cmd)
 
 static void pn533_wq_cmd_complete(struct work_struct *work)
 {
-       struct pn533 *dev = container_of(work, struct pn533, cmd_work);
+       struct pn533 *dev = container_of(work, struct pn533, cmd_complete_work);
        struct pn533_frame *in_frame;
        int rc;
 
@@ -502,7 +516,7 @@ static void pn533_wq_cmd_complete(struct work_struct *work)
                                        PN533_FRAME_CMD_PARAMS_LEN(in_frame));
 
        if (rc != -EINPROGRESS)
-               mutex_unlock(&dev->cmd_lock);
+               queue_work(dev->wq, &dev->cmd_work);
 }
 
 static void pn533_recv_response(struct urb *urb)
@@ -550,7 +564,7 @@ static void pn533_recv_response(struct urb *urb)
        dev->wq_in_frame = in_frame;
 
 sched_wq:
-       queue_work(dev->wq, &dev->cmd_work);
+       queue_work(dev->wq, &dev->cmd_complete_work);
 }
 
 static int pn533_submit_urb_for_response(struct pn533 *dev, gfp_t flags)
@@ -606,7 +620,7 @@ static void pn533_recv_ack(struct urb *urb)
 
 sched_wq:
        dev->wq_in_frame = NULL;
-       queue_work(dev->wq, &dev->cmd_work);
+       queue_work(dev->wq, &dev->cmd_complete_work);
 }
 
 static int pn533_submit_urb_for_ack(struct pn533 *dev, gfp_t flags)
@@ -669,6 +683,31 @@ error:
        return rc;
 }
 
+static void pn533_wq_cmd(struct work_struct *work)
+{
+       struct pn533 *dev = container_of(work, struct pn533, cmd_work);
+       struct pn533_cmd *cmd;
+
+       mutex_lock(&dev->cmd_lock);
+
+       if (list_empty(&dev->cmd_queue)) {
+               dev->cmd_pending = 0;
+               mutex_unlock(&dev->cmd_lock);
+               return;
+       }
+
+       cmd = list_first_entry(&dev->cmd_queue, struct pn533_cmd, queue);
+
+       mutex_unlock(&dev->cmd_lock);
+
+       __pn533_send_cmd_frame_async(dev, cmd->out_frame, cmd->in_frame,
+                                    cmd->in_frame_len, cmd->cmd_complete,
+                                    cmd->arg, cmd->flags);
+
+       list_del(&cmd->queue);
+       kfree(cmd);
+}
+
 static int pn533_send_cmd_frame_async(struct pn533 *dev,
                                        struct pn533_frame *out_frame,
                                        struct pn533_frame *in_frame,
@@ -676,21 +715,44 @@ static int pn533_send_cmd_frame_async(struct pn533 *dev,
                                        pn533_cmd_complete_t cmd_complete,
                                        void *arg, gfp_t flags)
 {
-       int rc;
+       struct pn533_cmd *cmd;
+       int rc = 0;
 
        nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
 
-       if (!mutex_trylock(&dev->cmd_lock))
-               return -EBUSY;
+       mutex_lock(&dev->cmd_lock);
 
-       rc = __pn533_send_cmd_frame_async(dev, out_frame, in_frame,
-                                       in_frame_len, cmd_complete, arg, flags);
-       if (rc)
-               goto error;
+       if (!dev->cmd_pending) {
+               rc = __pn533_send_cmd_frame_async(dev, out_frame, in_frame,
+                                                 in_frame_len, cmd_complete,
+                                                 arg, flags);
+               if (!rc)
+                       dev->cmd_pending = 1;
 
-       return 0;
-error:
+               goto unlock;
+       }
+
+       nfc_dev_dbg(&dev->interface->dev, "%s Queueing command", __func__);
+
+       cmd = kzalloc(sizeof(struct pn533_cmd), flags);
+       if (!cmd) {
+               rc = -ENOMEM;
+               goto unlock;
+       }
+
+       INIT_LIST_HEAD(&cmd->queue);
+       cmd->out_frame = out_frame;
+       cmd->in_frame = in_frame;
+       cmd->in_frame_len = in_frame_len;
+       cmd->cmd_complete = cmd_complete;
+       cmd->arg = arg;
+       cmd->flags = flags;
+
+       list_add_tail(&cmd->queue, &dev->cmd_queue);
+
+unlock:
        mutex_unlock(&dev->cmd_lock);
+
        return rc;
 }
 
@@ -1305,8 +1367,6 @@ static void pn533_listen_mode_timer(unsigned long data)
 
        dev->cancel_listen = 1;
 
-       mutex_unlock(&dev->cmd_lock);
-
        pn533_poll_next_mod(dev);
 
        queue_work(dev->wq, &dev->poll_work);
@@ -2131,7 +2191,7 @@ error_cmd:
 
        kfree(arg);
 
-       mutex_unlock(&dev->cmd_lock);
+       queue_work(dev->wq, &dev->cmd_work);
 }
 
 static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
@@ -2330,13 +2390,12 @@ static int pn533_probe(struct usb_interface *interface,
                        NULL, 0,
                        pn533_send_complete, dev);
 
-       INIT_WORK(&dev->cmd_work, pn533_wq_cmd_complete);
+       INIT_WORK(&dev->cmd_work, pn533_wq_cmd);
+       INIT_WORK(&dev->cmd_complete_work, pn533_wq_cmd_complete);
        INIT_WORK(&dev->mi_work, pn533_wq_mi_recv);
        INIT_WORK(&dev->tg_work, pn533_wq_tg_get_data);
        INIT_WORK(&dev->poll_work, pn533_wq_poll);
-       dev->wq = alloc_workqueue("pn533",
-                                 WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM,
-                                 1);
+       dev->wq = alloc_ordered_workqueue("pn533", 0);
        if (dev->wq == NULL)
                goto error;
 
@@ -2346,6 +2405,8 @@ static int pn533_probe(struct usb_interface *interface,
 
        skb_queue_head_init(&dev->resp_q);
 
+       INIT_LIST_HEAD(&dev->cmd_queue);
+
        usb_set_intfdata(interface, dev);
 
        pn533_tx_frame_init(dev->out_frame, PN533_CMD_GET_FIRMWARE_VERSION);
@@ -2417,6 +2478,7 @@ error:
 static void pn533_disconnect(struct usb_interface *interface)
 {
        struct pn533 *dev;
+       struct pn533_cmd *cmd, *n;
 
        dev = usb_get_intfdata(interface);
        usb_set_intfdata(interface, NULL);
@@ -2433,6 +2495,11 @@ static void pn533_disconnect(struct usb_interface *interface)
 
        del_timer(&dev->listen_timer);
 
+       list_for_each_entry_safe(cmd, n, &dev->cmd_queue, queue) {
+               list_del(&cmd->queue);
+               kfree(cmd);
+       }
+
        kfree(dev->in_frame);
        usb_free_urb(dev->in_urb);
        kfree(dev->out_frame);
diff --git a/drivers/nfc/pn544.c b/drivers/nfc/pn544.c
deleted file mode 100644 (file)
index 724f65d..0000000
+++ /dev/null
@@ -1,893 +0,0 @@
-/*
- * Driver for the PN544 NFC chip.
- *
- * Copyright (C) Nokia Corporation
- *
- * Author: Jari Vanhala <ext-jari.vanhala@nokia.com>
- * Contact: Matti Aaltonen <matti.j.aaltonen@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.        See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/completion.h>
-#include <linux/crc-ccitt.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/miscdevice.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/nfc/pn544.h>
-#include <linux/poll.h>
-#include <linux/regulator/consumer.h>
-#include <linux/serial_core.h> /* for TCGETS */
-#include <linux/slab.h>
-
-#define DRIVER_CARD    "PN544 NFC"
-#define DRIVER_DESC    "NFC driver for PN544"
-
-static struct i2c_device_id pn544_id_table[] = {
-       { PN544_DRIVER_NAME, 0 },
-       { }
-};
-MODULE_DEVICE_TABLE(i2c, pn544_id_table);
-
-#define HCI_MODE       0
-#define FW_MODE                1
-
-enum pn544_state {
-       PN544_ST_COLD,
-       PN544_ST_FW_READY,
-       PN544_ST_READY,
-};
-
-enum pn544_irq {
-       PN544_NONE,
-       PN544_INT,
-};
-
-struct pn544_info {
-       struct miscdevice miscdev;
-       struct i2c_client *i2c_dev;
-       struct regulator_bulk_data regs[3];
-
-       enum pn544_state state;
-       wait_queue_head_t read_wait;
-       loff_t read_offset;
-       enum pn544_irq read_irq;
-       struct mutex read_mutex; /* Serialize read_irq access */
-       struct mutex mutex; /* Serialize info struct access */
-       u8 *buf;
-       size_t buflen;
-};
-
-static const char reg_vdd_io[] = "Vdd_IO";
-static const char reg_vbat[]   = "VBat";
-static const char reg_vsim[]   = "VSim";
-
-/* sysfs interface */
-static ssize_t pn544_test(struct device *dev,
-                         struct device_attribute *attr, char *buf)
-{
-       struct pn544_info *info = dev_get_drvdata(dev);
-       struct i2c_client *client = info->i2c_dev;
-       struct pn544_nfc_platform_data *pdata = client->dev.platform_data;
-
-       return snprintf(buf, PAGE_SIZE, "%d\n", pdata->test());
-}
-
-static int pn544_enable(struct pn544_info *info, int mode)
-{
-       struct pn544_nfc_platform_data *pdata;
-       struct i2c_client *client = info->i2c_dev;
-
-       int r;
-
-       r = regulator_bulk_enable(ARRAY_SIZE(info->regs), info->regs);
-       if (r < 0)
-               return r;
-
-       pdata = client->dev.platform_data;
-       info->read_irq = PN544_NONE;
-       if (pdata->enable)
-               pdata->enable(mode);
-
-       if (mode) {
-               info->state = PN544_ST_FW_READY;
-               dev_dbg(&client->dev, "now in FW-mode\n");
-       } else {
-               info->state = PN544_ST_READY;
-               dev_dbg(&client->dev, "now in HCI-mode\n");
-       }
-
-       usleep_range(10000, 15000);
-
-       return 0;
-}
-
-static void pn544_disable(struct pn544_info *info)
-{
-       struct pn544_nfc_platform_data *pdata;
-       struct i2c_client *client = info->i2c_dev;
-
-       pdata = client->dev.platform_data;
-       if (pdata->disable)
-               pdata->disable();
-
-       info->state = PN544_ST_COLD;
-
-       dev_dbg(&client->dev, "Now in OFF-mode\n");
-
-       msleep(PN544_RESETVEN_TIME);
-
-       info->read_irq = PN544_NONE;
-       regulator_bulk_disable(ARRAY_SIZE(info->regs), info->regs);
-}
-
-static int check_crc(u8 *buf, int buflen)
-{
-       u8 len;
-       u16 crc;
-
-       len = buf[0] + 1;
-       if (len < 4 || len != buflen || len > PN544_MSG_MAX_SIZE) {
-               pr_err(PN544_DRIVER_NAME
-                      ": CRC; corrupt packet len %u (%d)\n", len, buflen);
-               print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
-                              16, 2, buf, buflen, false);
-               return -EPERM;
-       }
-       crc = crc_ccitt(0xffff, buf, len - 2);
-       crc = ~crc;
-
-       if (buf[len-2] != (crc & 0xff) || buf[len-1] != (crc >> 8)) {
-               pr_err(PN544_DRIVER_NAME ": CRC error 0x%x != 0x%x 0x%x\n",
-                      crc, buf[len-1], buf[len-2]);
-
-               print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
-                              16, 2, buf, buflen, false);
-               return -EPERM;
-       }
-       return 0;
-}
-
-static int pn544_i2c_write(struct i2c_client *client, u8 *buf, int len)
-{
-       int r;
-
-       if (len < 4 || len != (buf[0] + 1)) {
-               dev_err(&client->dev, "%s: Illegal message length: %d\n",
-                       __func__, len);
-               return -EINVAL;
-       }
-
-       if (check_crc(buf, len))
-               return -EINVAL;
-
-       usleep_range(3000, 6000);
-
-       r = i2c_master_send(client, buf, len);
-       dev_dbg(&client->dev, "send: %d\n", r);
-
-       if (r == -EREMOTEIO) { /* Retry, chip was in standby */
-               usleep_range(6000, 10000);
-               r = i2c_master_send(client, buf, len);
-               dev_dbg(&client->dev, "send2: %d\n", r);
-       }
-
-       if (r != len)
-               return -EREMOTEIO;
-
-       return r;
-}
-
-static int pn544_i2c_read(struct i2c_client *client, u8 *buf, int buflen)
-{
-       int r;
-       u8 len;
-
-       /*
-        * You could read a packet in one go, but then you'd need to read
-        * max size and rest would be 0xff fill, so we do split reads.
-        */
-       r = i2c_master_recv(client, &len, 1);
-       dev_dbg(&client->dev, "recv1: %d\n", r);
-
-       if (r != 1)
-               return -EREMOTEIO;
-
-       if (len < PN544_LLC_HCI_OVERHEAD)
-               len = PN544_LLC_HCI_OVERHEAD;
-       else if (len > (PN544_MSG_MAX_SIZE - 1))
-               len = PN544_MSG_MAX_SIZE - 1;
-
-       if (1 + len > buflen) /* len+(data+crc16) */
-               return -EMSGSIZE;
-
-       buf[0] = len;
-
-       r = i2c_master_recv(client, buf + 1, len);
-       dev_dbg(&client->dev, "recv2: %d\n", r);
-
-       if (r != len)
-               return -EREMOTEIO;
-
-       usleep_range(3000, 6000);
-
-       return r + 1;
-}
-
-static int pn544_fw_write(struct i2c_client *client, u8 *buf, int len)
-{
-       int r;
-
-       dev_dbg(&client->dev, "%s\n", __func__);
-
-       if (len < PN544_FW_HEADER_SIZE ||
-           (PN544_FW_HEADER_SIZE + (buf[1] << 8) + buf[2]) != len)
-               return -EINVAL;
-
-       r = i2c_master_send(client, buf, len);
-       dev_dbg(&client->dev, "fw send: %d\n", r);
-
-       if (r == -EREMOTEIO) { /* Retry, chip was in standby */
-               usleep_range(6000, 10000);
-               r = i2c_master_send(client, buf, len);
-               dev_dbg(&client->dev, "fw send2: %d\n", r);
-       }
-
-       if (r != len)
-               return -EREMOTEIO;
-
-       return r;
-}
-
-static int pn544_fw_read(struct i2c_client *client, u8 *buf, int buflen)
-{
-       int r, len;
-
-       if (buflen < PN544_FW_HEADER_SIZE)
-               return -EINVAL;
-
-       r = i2c_master_recv(client, buf, PN544_FW_HEADER_SIZE);
-       dev_dbg(&client->dev, "FW recv1: %d\n", r);
-
-       if (r < 0)
-               return r;
-
-       if (r < PN544_FW_HEADER_SIZE)
-               return -EINVAL;
-
-       len = (buf[1] << 8) + buf[2];
-       if (len == 0) /* just header, no additional data */
-               return r;
-
-       if (len > buflen - PN544_FW_HEADER_SIZE)
-               return -EMSGSIZE;
-
-       r = i2c_master_recv(client, buf + PN544_FW_HEADER_SIZE, len);
-       dev_dbg(&client->dev, "fw recv2: %d\n", r);
-
-       if (r != len)
-               return -EINVAL;
-
-       return r + PN544_FW_HEADER_SIZE;
-}
-
-static irqreturn_t pn544_irq_thread_fn(int irq, void *dev_id)
-{
-       struct pn544_info *info = dev_id;
-       struct i2c_client *client = info->i2c_dev;
-
-       BUG_ON(!info);
-       BUG_ON(irq != info->i2c_dev->irq);
-
-       dev_dbg(&client->dev, "IRQ\n");
-
-       mutex_lock(&info->read_mutex);
-       info->read_irq = PN544_INT;
-       mutex_unlock(&info->read_mutex);
-
-       wake_up_interruptible(&info->read_wait);
-
-       return IRQ_HANDLED;
-}
-
-static enum pn544_irq pn544_irq_state(struct pn544_info *info)
-{
-       enum pn544_irq irq;
-
-       mutex_lock(&info->read_mutex);
-       irq = info->read_irq;
-       mutex_unlock(&info->read_mutex);
-       /*
-        * XXX: should we check GPIO-line status directly?
-        * return pdata->irq_status() ? PN544_INT : PN544_NONE;
-        */
-
-       return irq;
-}
-
-static ssize_t pn544_read(struct file *file, char __user *buf,
-                         size_t count, loff_t *offset)
-{
-       struct pn544_info *info = container_of(file->private_data,
-                                              struct pn544_info, miscdev);
-       struct i2c_client *client = info->i2c_dev;
-       enum pn544_irq irq;
-       size_t len;
-       int r = 0;
-
-       dev_dbg(&client->dev, "%s: info: %p, count: %zu\n", __func__,
-               info, count);
-
-       mutex_lock(&info->mutex);
-
-       if (info->state == PN544_ST_COLD) {
-               r = -ENODEV;
-               goto out;
-       }
-
-       irq = pn544_irq_state(info);
-       if (irq == PN544_NONE) {
-               if (file->f_flags & O_NONBLOCK) {
-                       r = -EAGAIN;
-                       goto out;
-               }
-
-               if (wait_event_interruptible(info->read_wait,
-                                            (info->read_irq == PN544_INT))) {
-                       r = -ERESTARTSYS;
-                       goto out;
-               }
-       }
-
-       if (info->state == PN544_ST_FW_READY) {
-               len = min(count, info->buflen);
-
-               mutex_lock(&info->read_mutex);
-               r = pn544_fw_read(info->i2c_dev, info->buf, len);
-               info->read_irq = PN544_NONE;
-               mutex_unlock(&info->read_mutex);
-
-               if (r < 0) {
-                       dev_err(&info->i2c_dev->dev, "FW read failed: %d\n", r);
-                       goto out;
-               }
-
-               print_hex_dump(KERN_DEBUG, "FW read: ", DUMP_PREFIX_NONE,
-                              16, 2, info->buf, r, false);
-
-               *offset += r;
-               if (copy_to_user(buf, info->buf, r)) {
-                       r = -EFAULT;
-                       goto out;
-               }
-       } else {
-               len = min(count, info->buflen);
-
-               mutex_lock(&info->read_mutex);
-               r = pn544_i2c_read(info->i2c_dev, info->buf, len);
-               info->read_irq = PN544_NONE;
-               mutex_unlock(&info->read_mutex);
-
-               if (r < 0) {
-                       dev_err(&info->i2c_dev->dev, "read failed (%d)\n", r);
-                       goto out;
-               }
-               print_hex_dump(KERN_DEBUG, "read: ", DUMP_PREFIX_NONE,
-                              16, 2, info->buf, r, false);
-
-               *offset += r;
-               if (copy_to_user(buf, info->buf, r)) {
-                       r = -EFAULT;
-                       goto out;
-               }
-       }
-
-out:
-       mutex_unlock(&info->mutex);
-
-       return r;
-}
-
-static unsigned int pn544_poll(struct file *file, poll_table *wait)
-{
-       struct pn544_info *info = container_of(file->private_data,
-                                              struct pn544_info, miscdev);
-       struct i2c_client *client = info->i2c_dev;
-       int r = 0;
-
-       dev_dbg(&client->dev, "%s: info: %p\n", __func__, info);
-
-       mutex_lock(&info->mutex);
-
-       if (info->state == PN544_ST_COLD) {
-               r = -ENODEV;
-               goto out;
-       }
-
-       poll_wait(file, &info->read_wait, wait);
-
-       if (pn544_irq_state(info) == PN544_INT) {
-               r = POLLIN | POLLRDNORM;
-               goto out;
-       }
-out:
-       mutex_unlock(&info->mutex);
-
-       return r;
-}
-
-static ssize_t pn544_write(struct file *file, const char __user *buf,
-                          size_t count, loff_t *ppos)
-{
-       struct pn544_info *info = container_of(file->private_data,
-                                              struct pn544_info, miscdev);
-       struct i2c_client *client = info->i2c_dev;
-       ssize_t len;
-       int r;
-
-       dev_dbg(&client->dev, "%s: info: %p, count %zu\n", __func__,
-               info, count);
-
-       mutex_lock(&info->mutex);
-
-       if (info->state == PN544_ST_COLD) {
-               r = -ENODEV;
-               goto out;
-       }
-
-       /*
-        * XXX: should we detect rset-writes and clean possible
-        * read_irq state
-        */
-       if (info->state == PN544_ST_FW_READY) {
-               size_t fw_len;
-
-               if (count < PN544_FW_HEADER_SIZE) {
-                       r = -EINVAL;
-                       goto out;
-               }
-
-               len = min(count, info->buflen);
-               if (copy_from_user(info->buf, buf, len)) {
-                       r = -EFAULT;
-                       goto out;
-               }
-
-               print_hex_dump(KERN_DEBUG, "FW write: ", DUMP_PREFIX_NONE,
-                              16, 2, info->buf, len, false);
-
-               fw_len = PN544_FW_HEADER_SIZE + (info->buf[1] << 8) +
-                       info->buf[2];
-
-               if (len > fw_len) /* 1 msg at a time */
-                       len = fw_len;
-
-               r = pn544_fw_write(info->i2c_dev, info->buf, len);
-       } else {
-               if (count < PN544_LLC_MIN_SIZE) {
-                       r = -EINVAL;
-                       goto out;
-               }
-
-               len = min(count, info->buflen);
-               if (copy_from_user(info->buf, buf, len)) {
-                       r = -EFAULT;
-                       goto out;
-               }
-
-               print_hex_dump(KERN_DEBUG, "write: ", DUMP_PREFIX_NONE,
-                              16, 2, info->buf, len, false);
-
-               if (len > (info->buf[0] + 1)) /* 1 msg at a time */
-                       len  = info->buf[0] + 1;
-
-               r = pn544_i2c_write(info->i2c_dev, info->buf, len);
-       }
-out:
-       mutex_unlock(&info->mutex);
-
-       return r;
-
-}
-
-static long pn544_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-       struct pn544_info *info = container_of(file->private_data,
-                                              struct pn544_info, miscdev);
-       struct i2c_client *client = info->i2c_dev;
-       struct pn544_nfc_platform_data *pdata;
-       unsigned int val;
-       int r = 0;
-
-       dev_dbg(&client->dev, "%s: info: %p, cmd: 0x%x\n", __func__, info, cmd);
-
-       mutex_lock(&info->mutex);
-
-       if (info->state == PN544_ST_COLD) {
-               r = -ENODEV;
-               goto out;
-       }
-
-       pdata = info->i2c_dev->dev.platform_data;
-       switch (cmd) {
-       case PN544_GET_FW_MODE:
-               dev_dbg(&client->dev, "%s:  PN544_GET_FW_MODE\n", __func__);
-
-               val = (info->state == PN544_ST_FW_READY);
-               if (copy_to_user((void __user *)arg, &val, sizeof(val))) {
-                       r = -EFAULT;
-                       goto out;
-               }
-
-               break;
-
-       case PN544_SET_FW_MODE:
-               dev_dbg(&client->dev, "%s:  PN544_SET_FW_MODE\n", __func__);
-
-               if (copy_from_user(&val, (void __user *)arg, sizeof(val))) {
-                       r = -EFAULT;
-                       goto out;
-               }
-
-               if (val) {
-                       if (info->state == PN544_ST_FW_READY)
-                               break;
-
-                       pn544_disable(info);
-                       r = pn544_enable(info, FW_MODE);
-                       if (r < 0)
-                               goto out;
-               } else {
-                       if (info->state == PN544_ST_READY)
-                               break;
-                       pn544_disable(info);
-                       r = pn544_enable(info, HCI_MODE);
-                       if (r < 0)
-                               goto out;
-               }
-               file->f_pos = info->read_offset;
-               break;
-
-       case TCGETS:
-               dev_dbg(&client->dev, "%s:  TCGETS\n", __func__);
-
-               r = -ENOIOCTLCMD;
-               break;
-
-       default:
-               dev_err(&client->dev, "Unknown ioctl 0x%x\n", cmd);
-               r = -ENOIOCTLCMD;
-               break;
-       }
-
-out:
-       mutex_unlock(&info->mutex);
-
-       return r;
-}
-
-static int pn544_open(struct inode *inode, struct file *file)
-{
-       struct pn544_info *info = container_of(file->private_data,
-                                              struct pn544_info, miscdev);
-       struct i2c_client *client = info->i2c_dev;
-       int r = 0;
-
-       dev_dbg(&client->dev, "%s: info: %p, client %p\n", __func__,
-               info, info->i2c_dev);
-
-       mutex_lock(&info->mutex);
-
-       /*
-        * Only 1 at a time.
-        * XXX: maybe user (counter) would work better
-        */
-       if (info->state != PN544_ST_COLD) {
-               r = -EBUSY;
-               goto out;
-       }
-
-       file->f_pos = info->read_offset;
-       r = pn544_enable(info, HCI_MODE);
-
-out:
-       mutex_unlock(&info->mutex);
-       return r;
-}
-
-static int pn544_close(struct inode *inode, struct file *file)
-{
-       struct pn544_info *info = container_of(file->private_data,
-                                              struct pn544_info, miscdev);
-       struct i2c_client *client = info->i2c_dev;
-
-       dev_dbg(&client->dev, "%s: info: %p, client %p\n",
-               __func__, info, info->i2c_dev);
-
-       mutex_lock(&info->mutex);
-       pn544_disable(info);
-       mutex_unlock(&info->mutex);
-
-       return 0;
-}
-
-static const struct file_operations pn544_fops = {
-       .owner          = THIS_MODULE,
-       .llseek         = no_llseek,
-       .read           = pn544_read,
-       .write          = pn544_write,
-       .poll           = pn544_poll,
-       .open           = pn544_open,
-       .release        = pn544_close,
-       .unlocked_ioctl = pn544_ioctl,
-};
-
-#ifdef CONFIG_PM
-static int pn544_suspend(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-       struct pn544_info *info;
-       int r = 0;
-
-       dev_info(&client->dev, "***\n%s: client %p\n***\n", __func__, client);
-
-       info = i2c_get_clientdata(client);
-       dev_info(&client->dev, "%s: info: %p, client %p\n", __func__,
-                info, client);
-
-       mutex_lock(&info->mutex);
-
-       switch (info->state) {
-       case PN544_ST_FW_READY:
-               /* Do not suspend while upgrading FW, please! */
-               r = -EPERM;
-               break;
-
-       case PN544_ST_READY:
-               /*
-                * CHECK: Device should be in standby-mode. No way to check?
-                * Allowing low power mode for the regulator is potentially
-                * dangerous if pn544 does not go to suspension.
-                */
-               break;
-
-       case PN544_ST_COLD:
-               break;
-       };
-
-       mutex_unlock(&info->mutex);
-       return r;
-}
-
-static int pn544_resume(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-       struct pn544_info *info = i2c_get_clientdata(client);
-       int r = 0;
-
-       dev_dbg(&client->dev, "%s: info: %p, client %p\n", __func__,
-               info, client);
-
-       mutex_lock(&info->mutex);
-
-       switch (info->state) {
-       case PN544_ST_READY:
-               /*
-                * CHECK: If regulator low power mode is allowed in
-                * pn544_suspend, we should go back to normal mode
-                * here.
-                */
-               break;
-
-       case PN544_ST_COLD:
-               break;
-
-       case PN544_ST_FW_READY:
-               break;
-       };
-
-       mutex_unlock(&info->mutex);
-
-       return r;
-}
-
-static SIMPLE_DEV_PM_OPS(pn544_pm_ops, pn544_suspend, pn544_resume);
-#endif
-
-static struct device_attribute pn544_attr =
-       __ATTR(nfc_test, S_IRUGO, pn544_test, NULL);
-
-static int __devinit pn544_probe(struct i2c_client *client,
-                                const struct i2c_device_id *id)
-{
-       struct pn544_info *info;
-       struct pn544_nfc_platform_data *pdata;
-       int r = 0;
-
-       dev_dbg(&client->dev, "%s\n", __func__);
-       dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
-
-       /* private data allocation */
-       info = kzalloc(sizeof(struct pn544_info), GFP_KERNEL);
-       if (!info) {
-               dev_err(&client->dev,
-                       "Cannot allocate memory for pn544_info.\n");
-               r = -ENOMEM;
-               goto err_info_alloc;
-       }
-
-       info->buflen = max(PN544_MSG_MAX_SIZE, PN544_MAX_I2C_TRANSFER);
-       info->buf = kzalloc(info->buflen, GFP_KERNEL);
-       if (!info->buf) {
-               dev_err(&client->dev,
-                       "Cannot allocate memory for pn544_info->buf.\n");
-               r = -ENOMEM;
-               goto err_buf_alloc;
-       }
-
-       info->regs[0].supply = reg_vdd_io;
-       info->regs[1].supply = reg_vbat;
-       info->regs[2].supply = reg_vsim;
-       r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs),
-                                info->regs);
-       if (r < 0)
-               goto err_kmalloc;
-
-       info->i2c_dev = client;
-       info->state = PN544_ST_COLD;
-       info->read_irq = PN544_NONE;
-       mutex_init(&info->read_mutex);
-       mutex_init(&info->mutex);
-       init_waitqueue_head(&info->read_wait);
-       i2c_set_clientdata(client, info);
-       pdata = client->dev.platform_data;
-       if (!pdata) {
-               dev_err(&client->dev, "No platform data\n");
-               r = -EINVAL;
-               goto err_reg;
-       }
-
-       if (!pdata->request_resources) {
-               dev_err(&client->dev, "request_resources() missing\n");
-               r = -EINVAL;
-               goto err_reg;
-       }
-
-       r = pdata->request_resources(client);
-       if (r) {
-               dev_err(&client->dev, "Cannot get platform resources\n");
-               goto err_reg;
-       }
-
-       r = request_threaded_irq(client->irq, NULL, pn544_irq_thread_fn,
-                                IRQF_TRIGGER_RISING, PN544_DRIVER_NAME,
-                                info);
-       if (r < 0) {
-               dev_err(&client->dev, "Unable to register IRQ handler\n");
-               goto err_res;
-       }
-
-       /* If we don't have the test we don't need the sysfs file */
-       if (pdata->test) {
-               r = device_create_file(&client->dev, &pn544_attr);
-               if (r) {
-                       dev_err(&client->dev,
-                               "sysfs registration failed, error %d\n", r);
-                       goto err_irq;
-               }
-       }
-
-       info->miscdev.minor = MISC_DYNAMIC_MINOR;
-       info->miscdev.name = PN544_DRIVER_NAME;
-       info->miscdev.fops = &pn544_fops;
-       info->miscdev.parent = &client->dev;
-       r = misc_register(&info->miscdev);
-       if (r < 0) {
-               dev_err(&client->dev, "Device registration failed\n");
-               goto err_sysfs;
-       }
-
-       dev_dbg(&client->dev, "%s: info: %p, pdata %p, client %p\n",
-               __func__, info, pdata, client);
-
-       return 0;
-
-err_sysfs:
-       if (pdata->test)
-               device_remove_file(&client->dev, &pn544_attr);
-err_irq:
-       free_irq(client->irq, info);
-err_res:
-       if (pdata->free_resources)
-               pdata->free_resources();
-err_reg:
-       regulator_bulk_free(ARRAY_SIZE(info->regs), info->regs);
-err_kmalloc:
-       kfree(info->buf);
-err_buf_alloc:
-       kfree(info);
-err_info_alloc:
-       return r;
-}
-
-static __devexit int pn544_remove(struct i2c_client *client)
-{
-       struct pn544_info *info = i2c_get_clientdata(client);
-       struct pn544_nfc_platform_data *pdata = client->dev.platform_data;
-
-       dev_dbg(&client->dev, "%s\n", __func__);
-
-       misc_deregister(&info->miscdev);
-       if (pdata->test)
-               device_remove_file(&client->dev, &pn544_attr);
-
-       if (info->state != PN544_ST_COLD) {
-               if (pdata->disable)
-                       pdata->disable();
-
-               info->read_irq = PN544_NONE;
-       }
-
-       free_irq(client->irq, info);
-       if (pdata->free_resources)
-               pdata->free_resources();
-
-       regulator_bulk_free(ARRAY_SIZE(info->regs), info->regs);
-       kfree(info->buf);
-       kfree(info);
-
-       return 0;
-}
-
-static struct i2c_driver pn544_driver = {
-       .driver = {
-               .name = PN544_DRIVER_NAME,
-#ifdef CONFIG_PM
-               .pm = &pn544_pm_ops,
-#endif
-       },
-       .probe = pn544_probe,
-       .id_table = pn544_id_table,
-       .remove = __devexit_p(pn544_remove),
-};
-
-static int __init pn544_init(void)
-{
-       int r;
-
-       pr_debug(DRIVER_DESC ": %s\n", __func__);
-
-       r = i2c_add_driver(&pn544_driver);
-       if (r) {
-               pr_err(PN544_DRIVER_NAME ": driver registration failed\n");
-               return r;
-       }
-
-       return 0;
-}
-
-static void __exit pn544_exit(void)
-{
-       i2c_del_driver(&pn544_driver);
-       pr_info(DRIVER_DESC ", Exiting.\n");
-}
-
-module_init(pn544_init);
-module_exit(pn544_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION(DRIVER_DESC);
index aa71807189ba3235a0720fc1660049507bf19039..c9c8570273ab5a58c782e9fe953339e091042d2a 100644 (file)
@@ -29,7 +29,7 @@
 
 #include <linux/nfc.h>
 #include <net/nfc/hci.h>
-#include <net/nfc/shdlc.h>
+#include <net/nfc/llc.h>
 
 #include <linux/nfc/pn544.h>
 
@@ -128,10 +128,12 @@ static struct nfc_hci_gate pn544_gates[] = {
 
 /* Largest headroom needed for outgoing custom commands */
 #define PN544_CMDS_HEADROOM    2
+#define PN544_FRAME_HEADROOM 1
+#define PN544_FRAME_TAILROOM 2
 
 struct pn544_hci_info {
        struct i2c_client *i2c_dev;
-       struct nfc_shdlc *shdlc;
+       struct nfc_hci_dev *hdev;
 
        enum pn544_state state;
 
@@ -146,6 +148,9 @@ struct pn544_hci_info {
                                 * < 0 if hardware error occured (e.g. i2c err)
                                 * and prevents normal operation.
                                 */
+       int async_cb_type;
+       data_exchange_cb_t async_cb;
+       void *async_cb_context;
 };
 
 static void pn544_hci_platform_init(struct pn544_hci_info *info)
@@ -230,8 +235,12 @@ static int pn544_hci_i2c_write(struct i2c_client *client, u8 *buf, int len)
                r = i2c_master_send(client, buf, len);
        }
 
-       if (r >= 0 && r != len)
-               r = -EREMOTEIO;
+       if (r >= 0) {
+               if (r != len)
+                       return -EREMOTEIO;
+               else
+                       return 0;
+       }
 
        return r;
 }
@@ -341,13 +350,16 @@ flush:
 static irqreturn_t pn544_hci_irq_thread_fn(int irq, void *dev_id)
 {
        struct pn544_hci_info *info = dev_id;
-       struct i2c_client *client = info->i2c_dev;
+       struct i2c_client *client;
        struct sk_buff *skb = NULL;
        int r;
 
-       BUG_ON(!info);
-       BUG_ON(irq != info->i2c_dev->irq);
+       if (!info || irq != info->i2c_dev->irq) {
+               WARN_ON_ONCE(1);
+               return IRQ_NONE;
+       }
 
+       client = info->i2c_dev;
        dev_dbg(&client->dev, "IRQ\n");
 
        if (info->hard_fault != 0)
@@ -357,21 +369,21 @@ static irqreturn_t pn544_hci_irq_thread_fn(int irq, void *dev_id)
        if (r == -EREMOTEIO) {
                info->hard_fault = r;
 
-               nfc_shdlc_recv_frame(info->shdlc, NULL);
+               nfc_hci_recv_frame(info->hdev, NULL);
 
                return IRQ_HANDLED;
        } else if ((r == -ENOMEM) || (r == -EBADMSG)) {
                return IRQ_HANDLED;
        }
 
-       nfc_shdlc_recv_frame(info->shdlc, skb);
+       nfc_hci_recv_frame(info->hdev, skb);
 
        return IRQ_HANDLED;
 }
 
-static int pn544_hci_open(struct nfc_shdlc *shdlc)
+static int pn544_hci_open(struct nfc_hci_dev *hdev)
 {
-       struct pn544_hci_info *info = nfc_shdlc_get_clientdata(shdlc);
+       struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
        int r = 0;
 
        mutex_lock(&info->info_lock);
@@ -391,9 +403,9 @@ out:
        return r;
 }
 
-static void pn544_hci_close(struct nfc_shdlc *shdlc)
+static void pn544_hci_close(struct nfc_hci_dev *hdev)
 {
-       struct pn544_hci_info *info = nfc_shdlc_get_clientdata(shdlc);
+       struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
 
        mutex_lock(&info->info_lock);
 
@@ -408,9 +420,8 @@ out:
        mutex_unlock(&info->info_lock);
 }
 
-static int pn544_hci_ready(struct nfc_shdlc *shdlc)
+static int pn544_hci_ready(struct nfc_hci_dev *hdev)
 {
-       struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
        struct sk_buff *skb;
        static struct hw_config {
                u8 adr[2];
@@ -576,21 +587,45 @@ static int pn544_hci_ready(struct nfc_shdlc *shdlc)
        return 0;
 }
 
-static int pn544_hci_xmit(struct nfc_shdlc *shdlc, struct sk_buff *skb)
+static void pn544_hci_add_len_crc(struct sk_buff *skb)
 {
-       struct pn544_hci_info *info = nfc_shdlc_get_clientdata(shdlc);
+       u16 crc;
+       int len;
+
+       len = skb->len + 2;
+       *skb_push(skb, 1) = len;
+
+       crc = crc_ccitt(0xffff, skb->data, skb->len);
+       crc = ~crc;
+       *skb_put(skb, 1) = crc & 0xff;
+       *skb_put(skb, 1) = crc >> 8;
+}
+
+static void pn544_hci_remove_len_crc(struct sk_buff *skb)
+{
+       skb_pull(skb, PN544_FRAME_HEADROOM);
+       skb_trim(skb, PN544_FRAME_TAILROOM);
+}
+
+static int pn544_hci_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb)
+{
+       struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
        struct i2c_client *client = info->i2c_dev;
+       int r;
 
        if (info->hard_fault != 0)
                return info->hard_fault;
 
-       return pn544_hci_i2c_write(client, skb->data, skb->len);
+       pn544_hci_add_len_crc(skb);
+       r = pn544_hci_i2c_write(client, skb->data, skb->len);
+       pn544_hci_remove_len_crc(skb);
+
+       return r;
 }
 
-static int pn544_hci_start_poll(struct nfc_shdlc *shdlc,
+static int pn544_hci_start_poll(struct nfc_hci_dev *hdev,
                                u32 im_protocols, u32 tm_protocols)
 {
-       struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
        u8 phases = 0;
        int r;
        u8 duration[2];
@@ -641,7 +676,7 @@ static int pn544_hci_start_poll(struct nfc_shdlc *shdlc,
        return r;
 }
 
-static int pn544_hci_target_from_gate(struct nfc_shdlc *shdlc, u8 gate,
+static int pn544_hci_target_from_gate(struct nfc_hci_dev *hdev, u8 gate,
                                      struct nfc_target *target)
 {
        switch (gate) {
@@ -659,11 +694,10 @@ static int pn544_hci_target_from_gate(struct nfc_shdlc *shdlc, u8 gate,
        return 0;
 }
 
-static int pn544_hci_complete_target_discovered(struct nfc_shdlc *shdlc,
+static int pn544_hci_complete_target_discovered(struct nfc_hci_dev *hdev,
                                                u8 gate,
                                                struct nfc_target *target)
 {
-       struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
        struct sk_buff *uid_skb;
        int r = 0;
 
@@ -704,6 +738,26 @@ static int pn544_hci_complete_target_discovered(struct nfc_shdlc *shdlc,
        return r;
 }
 
+#define PN544_CB_TYPE_READER_F 1
+
+static void pn544_hci_data_exchange_cb(void *context, struct sk_buff *skb,
+                                      int err)
+{
+       struct pn544_hci_info *info = context;
+
+       switch (info->async_cb_type) {
+       case PN544_CB_TYPE_READER_F:
+               if (err == 0)
+                       skb_pull(skb, 1);
+               info->async_cb(info->async_cb_context, skb, err);
+               break;
+       default:
+               if (err == 0)
+                       kfree_skb(skb);
+               break;
+       }
+}
+
 #define MIFARE_CMD_AUTH_KEY_A  0x60
 #define MIFARE_CMD_AUTH_KEY_B  0x61
 #define MIFARE_CMD_HEADER      2
@@ -715,13 +769,12 @@ static int pn544_hci_complete_target_discovered(struct nfc_shdlc *shdlc,
  * <= 0: driver handled the data exchange
  *    1: driver doesn't especially handle, please do standard processing
  */
-static int pn544_hci_data_exchange(struct nfc_shdlc *shdlc,
+static int pn544_hci_data_exchange(struct nfc_hci_dev *hdev,
                                   struct nfc_target *target,
-                                  struct sk_buff *skb,
-                                  struct sk_buff **res_skb)
+                                  struct sk_buff *skb, data_exchange_cb_t cb,
+                                  void *cb_context)
 {
-       struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
-       int r;
+       struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
 
        pr_info(DRIVER_DESC ": %s for gate=%d\n", __func__,
                target->hci_reader_gate);
@@ -746,41 +799,43 @@ static int pn544_hci_data_exchange(struct nfc_shdlc *shdlc,
                                memcpy(data, uid, MIFARE_UID_LEN);
                        }
 
-                       return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
-                                               PN544_MIFARE_CMD,
-                                               skb->data, skb->len, res_skb);
+                       return nfc_hci_send_cmd_async(hdev,
+                                                     target->hci_reader_gate,
+                                                     PN544_MIFARE_CMD,
+                                                     skb->data, skb->len,
+                                                     cb, cb_context);
                } else
                        return 1;
        case PN544_RF_READER_F_GATE:
                *skb_push(skb, 1) = 0;
                *skb_push(skb, 1) = 0;
 
-               r = nfc_hci_send_cmd(hdev, target->hci_reader_gate,
-                                    PN544_FELICA_RAW,
-                                    skb->data, skb->len, res_skb);
-               if (r == 0)
-                       skb_pull(*res_skb, 1);
-               return r;
+               info->async_cb_type = PN544_CB_TYPE_READER_F;
+               info->async_cb = cb;
+               info->async_cb_context = cb_context;
+
+               return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
+                                             PN544_FELICA_RAW, skb->data,
+                                             skb->len,
+                                             pn544_hci_data_exchange_cb, info);
        case PN544_RF_READER_JEWEL_GATE:
-               return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
-                                       PN544_JEWEL_RAW_CMD,
-                                       skb->data, skb->len, res_skb);
+               return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
+                                             PN544_JEWEL_RAW_CMD, skb->data,
+                                             skb->len, cb, cb_context);
        default:
                return 1;
        }
 }
 
-static int pn544_hci_check_presence(struct nfc_shdlc *shdlc,
+static int pn544_hci_check_presence(struct nfc_hci_dev *hdev,
                                   struct nfc_target *target)
 {
-       struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
-
        return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
                                PN544_RF_READER_CMD_PRESENCE_CHECK,
                                NULL, 0, NULL);
 }
 
-static struct nfc_shdlc_ops pn544_shdlc_ops = {
+static struct nfc_hci_ops pn544_hci_ops = {
        .open = pn544_hci_open,
        .close = pn544_hci_close,
        .hci_ready = pn544_hci_ready,
@@ -848,8 +903,8 @@ static int __devinit pn544_hci_probe(struct i2c_client *client,
        pn544_hci_platform_init(info);
 
        r = request_threaded_irq(client->irq, NULL, pn544_hci_irq_thread_fn,
-                                IRQF_TRIGGER_RISING, PN544_HCI_DRIVER_NAME,
-                                info);
+                                IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+                                PN544_HCI_DRIVER_NAME, info);
        if (r < 0) {
                dev_err(&client->dev, "Unable to register IRQ handler\n");
                goto err_rti;
@@ -872,22 +927,30 @@ static int __devinit pn544_hci_probe(struct i2c_client *client,
                    NFC_PROTO_ISO14443_B_MASK |
                    NFC_PROTO_NFC_DEP_MASK;
 
-       info->shdlc = nfc_shdlc_allocate(&pn544_shdlc_ops,
-                                        &init_data, protocols,
-                                        PN544_CMDS_HEADROOM, 0,
-                                        PN544_HCI_LLC_MAX_PAYLOAD,
-                                        dev_name(&client->dev));
-       if (!info->shdlc) {
-               dev_err(&client->dev, "Cannot allocate nfc shdlc.\n");
+       info->hdev = nfc_hci_allocate_device(&pn544_hci_ops, &init_data,
+                                            protocols, LLC_SHDLC_NAME,
+                                            PN544_FRAME_HEADROOM +
+                                            PN544_CMDS_HEADROOM,
+                                            PN544_FRAME_TAILROOM,
+                                            PN544_HCI_LLC_MAX_PAYLOAD);
+       if (!info->hdev) {
+               dev_err(&client->dev, "Cannot allocate nfc hdev.\n");
                r = -ENOMEM;
-               goto err_allocshdlc;
+               goto err_alloc_hdev;
        }
 
-       nfc_shdlc_set_clientdata(info->shdlc, info);
+       nfc_hci_set_clientdata(info->hdev, info);
+
+       r = nfc_hci_register_device(info->hdev);
+       if (r)
+               goto err_regdev;
 
        return 0;
 
-err_allocshdlc:
+err_regdev:
+       nfc_hci_free_device(info->hdev);
+
+err_alloc_hdev:
        free_irq(client->irq, info);
 
 err_rti:
@@ -908,7 +971,7 @@ static __devexit int pn544_hci_remove(struct i2c_client *client)
 
        dev_dbg(&client->dev, "%s\n", __func__);
 
-       nfc_shdlc_free(info->shdlc);
+       nfc_hci_free_device(info->hdev);
 
        if (info->state != PN544_ST_COLD) {
                if (pdata->disable)
index 1e528b539a07f202a04b9e9dbdfbffbac6656703..79f4bce061bd289d5f01a35601ad151f73ee8d80 100644 (file)
@@ -143,10 +143,12 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct timex *tx)
                kt = timespec_to_ktime(ts);
                delta = ktime_to_ns(kt);
                err = ops->adjtime(ops, delta);
-
        } else if (tx->modes & ADJ_FREQUENCY) {
-
                err = ops->adjfreq(ops, scaled_ppm_to_ppb(tx->freq));
+               ptp->dialed_frequency = tx->freq;
+       } else if (tx->modes == 0) {
+               tx->freq = ptp->dialed_frequency;
+               err = 0;
        }
 
        return err;
@@ -180,7 +182,8 @@ static void delete_ptp_clock(struct posix_clock *pc)
 
 /* public interface */
 
-struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info)
+struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
+                                    struct device *parent)
 {
        struct ptp_clock *ptp;
        int err = 0, index, major = MAJOR(ptp_devt);
@@ -213,7 +216,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info)
        init_waitqueue_head(&ptp->tsev_wq);
 
        /* Create a new device in our class. */
-       ptp->dev = device_create(ptp_class, NULL, ptp->devid, ptp,
+       ptp->dev = device_create(ptp_class, parent, ptp->devid, ptp,
                                 "ptp%d", ptp->index);
        if (IS_ERR(ptp->dev))
                goto no_device;
@@ -300,6 +303,11 @@ void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event)
                pps_get_ts(&evt);
                pps_event(ptp->pps_source, &evt, PTP_PPS_EVENT, NULL);
                break;
+
+       case PTP_CLOCK_PPSUSR:
+               pps_event(ptp->pps_source, &event->pps_times,
+                         PTP_PPS_EVENT, NULL);
+               break;
        }
 }
 EXPORT_SYMBOL(ptp_clock_event);
index e03c40692b0073106f845f9adbb1d38250590e76..d49b85164fd22e830f51e066337801134eef9354 100644 (file)
@@ -298,7 +298,7 @@ static int __init ptp_ixp_init(void)
 
        ixp_clock.caps = ptp_ixp_caps;
 
-       ixp_clock.ptp_clock = ptp_clock_register(&ixp_clock.caps);
+       ixp_clock.ptp_clock = ptp_clock_register(&ixp_clock.caps, NULL);
 
        if (IS_ERR(ixp_clock.ptp_clock))
                return PTR_ERR(ixp_clock.ptp_clock);
index 3a9c17eced10c3a34d591fd465db555ec9498b88..e624e4dd2abb001444c08dc0593800ddab9f8da0 100644 (file)
@@ -627,7 +627,7 @@ pch_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        }
 
        chip->caps = ptp_pch_caps;
-       chip->ptp_clock = ptp_clock_register(&chip->caps);
+       chip->ptp_clock = ptp_clock_register(&chip->caps, &pdev->dev);
 
        if (IS_ERR(chip->ptp_clock))
                return PTR_ERR(chip->ptp_clock);
index 4d5b5082c3b19d4766be52dbed3904aa5f4cd6c9..69d32070cc654526c4391229f89140e8eff35303 100644 (file)
@@ -45,6 +45,7 @@ struct ptp_clock {
        dev_t devid;
        int index; /* index into clocks.map */
        struct pps_device *pps_source;
+       long dialed_frequency; /* remembers the frequency adjustment */
        struct timestamp_event_queue tsevq; /* simple fifo for time stamps */
        struct mutex tsevq_mux; /* one process at a time reading the fifo */
        wait_queue_head_t tsev_wq;
index d4ade9e92fbbd4fa97f9ce1acf4043a7b91b4f65..fb92524d24ef9ac1e72a6bf0912a5092e15433fd 100644 (file)
@@ -1523,7 +1523,7 @@ static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
                                goto done;
        default:
                break;
-       };
+       }
 
        fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
                     ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
index 5227e5734a9d152e45a2b4920a13f92b7354051e..98ea9cc6f1aaba9f442ecb4b46724c0f3f5ccd7b 100644 (file)
@@ -1454,7 +1454,7 @@ static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
                                ch_fsm_len, GFP_KERNEL);
        }
        if (ch->fsm == NULL)
-                               goto free_return;
+                               goto nomem_return;
 
        fsm_newstate(ch->fsm, CTC_STATE_IDLE);
 
index a3adf4b1c60d7e2e5efc074ce4fb6b2e7a6276be..2ca0f1dd7a00b857211348cf28d2c9e4a9df161a 100644 (file)
@@ -282,7 +282,7 @@ lcs_setup_write_ccws(struct lcs_card *card)
 
        LCS_DBF_TEXT(3, setup, "iwritccw");
        /* Setup write ccws. */
-       memset(card->write.ccws, 0, sizeof(struct ccw1) * LCS_NUM_BUFFS + 1);
+       memset(card->write.ccws, 0, sizeof(struct ccw1) * (LCS_NUM_BUFFS + 1));
        for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
                card->write.ccws[cnt].cmd_code = LCS_CCW_WRITE;
                card->write.ccws[cnt].count = 0;
index cf6da7fafe54d5996bff33ec38b0976e046d0a0c..3e25d31504560a79b75b782a45153190a7694ffa 100644 (file)
@@ -489,7 +489,7 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
                atomic_set(&reply->refcnt, 1);
                atomic_set(&reply->received, 0);
                reply->card = card;
-       };
+       }
        return reply;
 }
 
@@ -1257,7 +1257,30 @@ static void qeth_clean_channel(struct qeth_channel *channel)
                kfree(channel->iob[cnt].data);
 }
 
-static void qeth_get_channel_path_desc(struct qeth_card *card)
+static void qeth_set_single_write_queues(struct qeth_card *card)
+{
+       if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
+           (card->qdio.no_out_queues == 4))
+               qeth_free_qdio_buffers(card);
+
+       card->qdio.no_out_queues = 1;
+       if (card->qdio.default_out_queue != 0)
+               dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
+
+       card->qdio.default_out_queue = 0;
+}
+
+static void qeth_set_multiple_write_queues(struct qeth_card *card)
+{
+       if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
+           (card->qdio.no_out_queues == 1)) {
+               qeth_free_qdio_buffers(card);
+               card->qdio.default_out_queue = 2;
+       }
+       card->qdio.no_out_queues = 4;
+}
+
+static void qeth_update_from_chp_desc(struct qeth_card *card)
 {
        struct ccw_device *ccwdev;
        struct channelPath_dsc {
@@ -1274,38 +1297,23 @@ static void qeth_get_channel_path_desc(struct qeth_card *card)
        QETH_DBF_TEXT(SETUP, 2, "chp_desc");
 
        ccwdev = card->data.ccwdev;
-       chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
-       if (chp_dsc != NULL) {
-               if (card->info.type != QETH_CARD_TYPE_IQD) {
-                       /* CHPP field bit 6 == 1 -> single queue */
-                       if ((chp_dsc->chpp & 0x02) == 0x02) {
-                               if ((atomic_read(&card->qdio.state) !=
-                                       QETH_QDIO_UNINITIALIZED) &&
-                                   (card->qdio.no_out_queues == 4))
-                                       /* change from 4 to 1 outbound queues */
-                                       qeth_free_qdio_buffers(card);
-                               card->qdio.no_out_queues = 1;
-                               if (card->qdio.default_out_queue != 0)
-                                       dev_info(&card->gdev->dev,
-                                       "Priority Queueing not supported\n");
-                               card->qdio.default_out_queue = 0;
-                       } else {
-                               if ((atomic_read(&card->qdio.state) !=
-                                       QETH_QDIO_UNINITIALIZED) &&
-                                   (card->qdio.no_out_queues == 1)) {
-                                       /* change from 1 to 4 outbound queues */
-                                       qeth_free_qdio_buffers(card);
-                                       card->qdio.default_out_queue = 2;
-                               }
-                               card->qdio.no_out_queues = 4;
-                       }
-               }
-               card->info.func_level = 0x4100 + chp_dsc->desc;
-               kfree(chp_dsc);
-       }
+       chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
+       if (!chp_dsc)
+               goto out;
+
+       card->info.func_level = 0x4100 + chp_dsc->desc;
+       if (card->info.type == QETH_CARD_TYPE_IQD)
+               goto out;
+
+       /* CHPP field bit 6 == 1 -> single queue */
+       if ((chp_dsc->chpp & 0x02) == 0x02)
+               qeth_set_single_write_queues(card);
+       else
+               qeth_set_multiple_write_queues(card);
+out:
+       kfree(chp_dsc);
        QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
        QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
-       return;
 }
 
 static void qeth_init_qdio_info(struct qeth_card *card)
@@ -1473,7 +1481,7 @@ static int qeth_determine_card_type(struct qeth_card *card)
                        card->qdio.no_in_queues = 1;
                        card->info.is_multicast_different =
                                known_devices[i][QETH_MULTICAST_IND];
-                       qeth_get_channel_path_desc(card);
+                       qeth_update_from_chp_desc(card);
                        return 0;
                }
                i++;
@@ -2029,7 +2037,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
                        if (time_after(jiffies, timeout))
                                goto time_err;
                        cpu_relax();
-               };
+               }
        }
 
        if (reply->rc == -EIO)
@@ -4735,7 +4743,7 @@ int qeth_core_hardsetup_card(struct qeth_card *card)
 
        QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
        atomic_set(&card->force_alloc_skb, 0);
-       qeth_get_channel_path_desc(card);
+       qeth_update_from_chp_desc(card);
 retry:
        if (retries)
                QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
index c5f03fa70fbaea0e774da0891a1ba9338c69f88d..4cd310cb5bdf8f1c070cc1193294928015834a94 100644 (file)
@@ -794,6 +794,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
                rc = -EEXIST;
        spin_unlock_irqrestore(&card->ip_lock, flags);
        if (rc) {
+               kfree(ipaddr);
                return rc;
        }
        if (!qeth_l3_add_ip(card, ipaddr))
@@ -858,6 +859,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
                rc = -EEXIST;
        spin_unlock_irqrestore(&card->ip_lock, flags);
        if (rc) {
+               kfree(ipaddr);
                return rc;
        }
        if (!qeth_l3_add_ip(card, ipaddr))
index 8818dd681c194a445ea937c6f1e4b0dd70efff5f..65123a21b97ec17ffb695a3e09dd06556bc5b0a8 100644 (file)
 struct sock *scsi_nl_sock = NULL;
 EXPORT_SYMBOL_GPL(scsi_nl_sock);
 
-static DEFINE_SPINLOCK(scsi_nl_lock);
-static struct list_head scsi_nl_drivers;
-
-static u32     scsi_nl_state;
-#define STATE_EHANDLER_BSY             0x00000001
-
-struct scsi_nl_transport {
-       int (*msg_handler)(struct sk_buff *);
-       void (*event_handler)(struct notifier_block *, unsigned long, void *);
-       unsigned int refcnt;
-       int flags;
-};
-
-/* flags values (bit flags) */
-#define HANDLER_DELETING               0x1
-
-static struct scsi_nl_transport transports[SCSI_NL_MAX_TRANSPORTS] =
-       { {NULL, }, };
-
-
-struct scsi_nl_drvr {
-       struct list_head next;
-       int (*dmsg_handler)(struct Scsi_Host *shost, void *payload,
-                                u32 len, u32 pid);
-       void (*devt_handler)(struct notifier_block *nb,
-                                unsigned long event, void *notify_ptr);
-       struct scsi_host_template *hostt;
-       u64 vendor_id;
-       unsigned int refcnt;
-       int flags;
-};
-
-
-
 /**
  * scsi_nl_rcv_msg - Receive message handler.
  * @skb:               socket receive buffer
@@ -81,7 +47,6 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
 {
        struct nlmsghdr *nlh;
        struct scsi_nl_hdr *hdr;
-       unsigned long flags;
        u32 rlen;
        int err, tport;
 
@@ -126,22 +91,24 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
                /*
                 * Deliver message to the appropriate transport
                 */
-               spin_lock_irqsave(&scsi_nl_lock, flags);
-
                tport = hdr->transport;
-               if ((tport < SCSI_NL_MAX_TRANSPORTS) &&
-                   !(transports[tport].flags & HANDLER_DELETING) &&
-                   (transports[tport].msg_handler)) {
-                       transports[tport].refcnt++;
-                       spin_unlock_irqrestore(&scsi_nl_lock, flags);
-                       err = transports[tport].msg_handler(skb);
-                       spin_lock_irqsave(&scsi_nl_lock, flags);
-                       transports[tport].refcnt--;
-               } else
+               if (tport == SCSI_NL_TRANSPORT) {
+                       switch (hdr->msgtype) {
+                       case SCSI_NL_SHOST_VENDOR:
+                               /* Locate the driver that corresponds to the message */
+                               err = -ESRCH;
+                               break;
+                       default:
+                               err = -EBADR;
+                               break;
+                       }
+                       if (err)
+                               printk(KERN_WARNING "%s: Msgtype %d failed - err %d\n",
+                                      __func__, hdr->msgtype, err);
+               }
+               else
                        err = -ENOENT;
 
-               spin_unlock_irqrestore(&scsi_nl_lock, flags);
-
 next_msg:
                if ((err) || (nlh->nlmsg_flags & NLM_F_ACK))
                        netlink_ack(skb, nlh, err);
@@ -150,333 +117,6 @@ next_msg:
        }
 }
 
-
-/**
- * scsi_nl_rcv_event - Event handler for a netlink socket.
- * @this:              event notifier block
- * @event:             event type
- * @ptr:               event payload
- *
- **/
-static int
-scsi_nl_rcv_event(struct notifier_block *this, unsigned long event, void *ptr)
-{
-       struct netlink_notify *n = ptr;
-       struct scsi_nl_drvr *driver;
-       unsigned long flags;
-       int tport;
-
-       if (n->protocol != NETLINK_SCSITRANSPORT)
-               return NOTIFY_DONE;
-
-       spin_lock_irqsave(&scsi_nl_lock, flags);
-       scsi_nl_state |= STATE_EHANDLER_BSY;
-
-       /*
-        * Pass event on to any transports that may be listening
-        */
-       for (tport = 0; tport < SCSI_NL_MAX_TRANSPORTS; tport++) {
-               if (!(transports[tport].flags & HANDLER_DELETING) &&
-                   (transports[tport].event_handler)) {
-                       spin_unlock_irqrestore(&scsi_nl_lock, flags);
-                       transports[tport].event_handler(this, event, ptr);
-                       spin_lock_irqsave(&scsi_nl_lock, flags);
-               }
-       }
-
-       /*
-        * Pass event on to any drivers that may be listening
-        */
-       list_for_each_entry(driver, &scsi_nl_drivers, next) {
-               if (!(driver->flags & HANDLER_DELETING) &&
-                   (driver->devt_handler)) {
-                       spin_unlock_irqrestore(&scsi_nl_lock, flags);
-                       driver->devt_handler(this, event, ptr);
-                       spin_lock_irqsave(&scsi_nl_lock, flags);
-               }
-       }
-
-       scsi_nl_state &= ~STATE_EHANDLER_BSY;
-       spin_unlock_irqrestore(&scsi_nl_lock, flags);
-
-       return NOTIFY_DONE;
-}
-
-static struct notifier_block scsi_netlink_notifier = {
-       .notifier_call  = scsi_nl_rcv_event,
-};
-
-
-/*
- * GENERIC SCSI transport receive and event handlers
- */
-
-/**
- * scsi_generic_msg_handler - receive message handler for GENERIC transport messages
- * @skb:               socket receive buffer
- **/
-static int
-scsi_generic_msg_handler(struct sk_buff *skb)
-{
-       struct nlmsghdr *nlh = nlmsg_hdr(skb);
-       struct scsi_nl_hdr *snlh = NLMSG_DATA(nlh);
-       struct scsi_nl_drvr *driver;
-       struct Scsi_Host *shost;
-       unsigned long flags;
-       int err = 0, match, pid;
-
-       pid = NETLINK_CREDS(skb)->pid;
-
-       switch (snlh->msgtype) {
-       case SCSI_NL_SHOST_VENDOR:
-               {
-               struct scsi_nl_host_vendor_msg *msg = NLMSG_DATA(nlh);
-
-               /* Locate the driver that corresponds to the message */
-               spin_lock_irqsave(&scsi_nl_lock, flags);
-               match = 0;
-               list_for_each_entry(driver, &scsi_nl_drivers, next) {
-                       if (driver->vendor_id == msg->vendor_id) {
-                               match = 1;
-                               break;
-                       }
-               }
-
-               if ((!match) || (!driver->dmsg_handler)) {
-                       spin_unlock_irqrestore(&scsi_nl_lock, flags);
-                       err = -ESRCH;
-                       goto rcv_exit;
-               }
-
-               if (driver->flags & HANDLER_DELETING) {
-                       spin_unlock_irqrestore(&scsi_nl_lock, flags);
-                       err = -ESHUTDOWN;
-                       goto rcv_exit;
-               }
-
-               driver->refcnt++;
-               spin_unlock_irqrestore(&scsi_nl_lock, flags);
-
-
-               /* if successful, scsi_host_lookup takes a shost reference */
-               shost = scsi_host_lookup(msg->host_no);
-               if (!shost) {
-                       err = -ENODEV;
-                       goto driver_exit;
-               }
-
-               /* is this host owned by the vendor ? */
-               if (shost->hostt != driver->hostt) {
-                       err = -EINVAL;
-                       goto vendormsg_put;
-               }
-
-               /* pass message on to the driver */
-               err = driver->dmsg_handler(shost, (void *)&msg[1],
-                                        msg->vmsg_datalen, pid);
-
-vendormsg_put:
-               /* release reference by scsi_host_lookup */
-               scsi_host_put(shost);
-
-driver_exit:
-               /* release our own reference on the registration object */
-               spin_lock_irqsave(&scsi_nl_lock, flags);
-               driver->refcnt--;
-               spin_unlock_irqrestore(&scsi_nl_lock, flags);
-               break;
-               }
-
-       default:
-               err = -EBADR;
-               break;
-       }
-
-rcv_exit:
-       if (err)
-               printk(KERN_WARNING "%s: Msgtype %d failed - err %d\n",
-                        __func__, snlh->msgtype, err);
-       return err;
-}
-
-
-/**
- * scsi_nl_add_transport -
- *    Registers message and event handlers for a transport. Enables
- *    receipt of netlink messages and events to a transport.
- *
- * @tport:             transport registering handlers
- * @msg_handler:       receive message handler callback
- * @event_handler:     receive event handler callback
- **/
-int
-scsi_nl_add_transport(u8 tport,
-       int (*msg_handler)(struct sk_buff *),
-       void (*event_handler)(struct notifier_block *, unsigned long, void *))
-{
-       unsigned long flags;
-       int err = 0;
-
-       if (tport >= SCSI_NL_MAX_TRANSPORTS)
-               return -EINVAL;
-
-       spin_lock_irqsave(&scsi_nl_lock, flags);
-
-       if (scsi_nl_state & STATE_EHANDLER_BSY) {
-               spin_unlock_irqrestore(&scsi_nl_lock, flags);
-               msleep(1);
-               spin_lock_irqsave(&scsi_nl_lock, flags);
-       }
-
-       if (transports[tport].msg_handler || transports[tport].event_handler) {
-               err = -EALREADY;
-               goto register_out;
-       }
-
-       transports[tport].msg_handler = msg_handler;
-       transports[tport].event_handler = event_handler;
-       transports[tport].flags = 0;
-       transports[tport].refcnt = 0;
-
-register_out:
-       spin_unlock_irqrestore(&scsi_nl_lock, flags);
-
-       return err;
-}
-EXPORT_SYMBOL_GPL(scsi_nl_add_transport);
-
-
-/**
- * scsi_nl_remove_transport -
- *    Disable transport receiption of messages and events
- *
- * @tport:             transport deregistering handlers
- *
- **/
-void
-scsi_nl_remove_transport(u8 tport)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&scsi_nl_lock, flags);
-       if (scsi_nl_state & STATE_EHANDLER_BSY) {
-               spin_unlock_irqrestore(&scsi_nl_lock, flags);
-               msleep(1);
-               spin_lock_irqsave(&scsi_nl_lock, flags);
-       }
-
-       if (tport < SCSI_NL_MAX_TRANSPORTS) {
-               transports[tport].flags |= HANDLER_DELETING;
-
-               while (transports[tport].refcnt != 0) {
-                       spin_unlock_irqrestore(&scsi_nl_lock, flags);
-                       schedule_timeout_uninterruptible(HZ/4);
-                       spin_lock_irqsave(&scsi_nl_lock, flags);
-               }
-               transports[tport].msg_handler = NULL;
-               transports[tport].event_handler = NULL;
-               transports[tport].flags = 0;
-       }
-
-       spin_unlock_irqrestore(&scsi_nl_lock, flags);
-
-       return;
-}
-EXPORT_SYMBOL_GPL(scsi_nl_remove_transport);
-
-
-/**
- * scsi_nl_add_driver -
- *    A driver is registering its interfaces for SCSI netlink messages
- *
- * @vendor_id:          A unique identification value for the driver.
- * @hostt:             address of the driver's host template. Used
- *                     to verify an shost is bound to the driver
- * @nlmsg_handler:     receive message handler callback
- * @nlevt_handler:     receive event handler callback
- *
- * Returns:
- *   0 on Success
- *   error result otherwise
- **/
-int
-scsi_nl_add_driver(u64 vendor_id, struct scsi_host_template *hostt,
-       int (*nlmsg_handler)(struct Scsi_Host *shost, void *payload,
-                                u32 len, u32 pid),
-       void (*nlevt_handler)(struct notifier_block *nb,
-                                unsigned long event, void *notify_ptr))
-{
-       struct scsi_nl_drvr *driver;
-       unsigned long flags;
-
-       driver = kzalloc(sizeof(*driver), GFP_KERNEL);
-       if (unlikely(!driver)) {
-               printk(KERN_ERR "%s: allocation failure\n", __func__);
-               return -ENOMEM;
-       }
-
-       driver->dmsg_handler = nlmsg_handler;
-       driver->devt_handler = nlevt_handler;
-       driver->hostt = hostt;
-       driver->vendor_id = vendor_id;
-
-       spin_lock_irqsave(&scsi_nl_lock, flags);
-       if (scsi_nl_state & STATE_EHANDLER_BSY) {
-               spin_unlock_irqrestore(&scsi_nl_lock, flags);
-               msleep(1);
-               spin_lock_irqsave(&scsi_nl_lock, flags);
-       }
-       list_add_tail(&driver->next, &scsi_nl_drivers);
-       spin_unlock_irqrestore(&scsi_nl_lock, flags);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(scsi_nl_add_driver);
-
-
-/**
- * scsi_nl_remove_driver -
- *    An driver is unregistering with the SCSI netlink messages
- *
- * @vendor_id:          The unique identification value for the driver.
- **/
-void
-scsi_nl_remove_driver(u64 vendor_id)
-{
-       struct scsi_nl_drvr *driver;
-       unsigned long flags;
-
-       spin_lock_irqsave(&scsi_nl_lock, flags);
-       if (scsi_nl_state & STATE_EHANDLER_BSY) {
-               spin_unlock_irqrestore(&scsi_nl_lock, flags);
-               msleep(1);
-               spin_lock_irqsave(&scsi_nl_lock, flags);
-       }
-
-       list_for_each_entry(driver, &scsi_nl_drivers, next) {
-               if (driver->vendor_id == vendor_id) {
-                       driver->flags |= HANDLER_DELETING;
-                       while (driver->refcnt != 0) {
-                               spin_unlock_irqrestore(&scsi_nl_lock, flags);
-                               schedule_timeout_uninterruptible(HZ/4);
-                               spin_lock_irqsave(&scsi_nl_lock, flags);
-                       }
-                       list_del(&driver->next);
-                       kfree(driver);
-                       spin_unlock_irqrestore(&scsi_nl_lock, flags);
-                       return;
-               }
-       }
-
-       spin_unlock_irqrestore(&scsi_nl_lock, flags);
-
-       printk(KERN_ERR "%s: removal of driver failed - vendor_id 0x%llx\n",
-              __func__, (unsigned long long)vendor_id);
-       return;
-}
-EXPORT_SYMBOL_GPL(scsi_nl_remove_driver);
-
-
 /**
  * scsi_netlink_init - Called by SCSI subsystem to initialize
  *     the SCSI transport netlink interface
@@ -485,36 +125,19 @@ EXPORT_SYMBOL_GPL(scsi_nl_remove_driver);
 void
 scsi_netlink_init(void)
 {
-       int error;
        struct netlink_kernel_cfg cfg = {
                .input  = scsi_nl_rcv_msg,
                .groups = SCSI_NL_GRP_CNT,
        };
 
-       INIT_LIST_HEAD(&scsi_nl_drivers);
-
-       error = netlink_register_notifier(&scsi_netlink_notifier);
-       if (error) {
-               printk(KERN_ERR "%s: register of event handler failed - %d\n",
-                               __func__, error);
-               return;
-       }
-
        scsi_nl_sock = netlink_kernel_create(&init_net, NETLINK_SCSITRANSPORT,
-                                            THIS_MODULE, &cfg);
+                                            &cfg);
        if (!scsi_nl_sock) {
                printk(KERN_ERR "%s: register of receive handler failed\n",
                                __func__);
-               netlink_unregister_notifier(&scsi_netlink_notifier);
                return;
        }
 
-       /* Register the entry points for the generic SCSI transport */
-       error = scsi_nl_add_transport(SCSI_NL_TRANSPORT,
-                               scsi_generic_msg_handler, NULL);
-       if (error)
-               printk(KERN_ERR "%s: register of GENERIC transport handler"
-                               "  failed - %d\n", __func__, error);
        return;
 }
 
@@ -526,158 +149,10 @@ scsi_netlink_init(void)
 void
 scsi_netlink_exit(void)
 {
-       scsi_nl_remove_transport(SCSI_NL_TRANSPORT);
-
        if (scsi_nl_sock) {
                netlink_kernel_release(scsi_nl_sock);
-               netlink_unregister_notifier(&scsi_netlink_notifier);
        }
 
        return;
 }
 
-
-/*
- * Exported Interfaces
- */
-
-/**
- * scsi_nl_send_transport_msg -
- *    Generic function to send a single message from a SCSI transport to
- *    a single process
- *
- * @pid:               receiving pid
- * @hdr:               message payload
- *
- **/
-void
-scsi_nl_send_transport_msg(u32 pid, struct scsi_nl_hdr *hdr)
-{
-       struct sk_buff *skb;
-       struct nlmsghdr *nlh;
-       const char *fn;
-       char *datab;
-       u32 len, skblen;
-       int err;
-
-       if (!scsi_nl_sock) {
-               err = -ENOENT;
-               fn = "netlink socket";
-               goto msg_fail;
-       }
-
-       len = NLMSG_SPACE(hdr->msglen);
-       skblen = NLMSG_SPACE(len);
-
-       skb = alloc_skb(skblen, GFP_KERNEL);
-       if (!skb) {
-               err = -ENOBUFS;
-               fn = "alloc_skb";
-               goto msg_fail;
-       }
-
-       nlh = nlmsg_put(skb, pid, 0, SCSI_TRANSPORT_MSG, len - sizeof(*nlh), 0);
-       if (!nlh) {
-               err = -ENOBUFS;
-               fn = "nlmsg_put";
-               goto msg_fail_skb;
-       }
-       datab = NLMSG_DATA(nlh);
-       memcpy(datab, hdr, hdr->msglen);
-
-       err = nlmsg_unicast(scsi_nl_sock, skb, pid);
-       if (err < 0) {
-               fn = "nlmsg_unicast";
-               /* nlmsg_unicast already kfree_skb'd */
-               goto msg_fail;
-       }
-
-       return;
-
-msg_fail_skb:
-       kfree_skb(skb);
-msg_fail:
-       printk(KERN_WARNING
-               "%s: Dropped Message : pid %d Transport %d, msgtype x%x, "
-               "msglen %d: %s : err %d\n",
-               __func__, pid, hdr->transport, hdr->msgtype, hdr->msglen,
-               fn, err);
-       return;
-}
-EXPORT_SYMBOL_GPL(scsi_nl_send_transport_msg);
-
-
-/**
- * scsi_nl_send_vendor_msg - called to send a shost vendor unique message
- *                      to a specific process id.
- *
- * @pid:               process id of the receiver
- * @host_no:           host # sending the message
- * @vendor_id:         unique identifier for the driver's vendor
- * @data_len:          amount, in bytes, of vendor unique payload data
- * @data_buf:          pointer to vendor unique data buffer
- *
- * Returns:
- *   0 on successful return
- *   otherwise, failing error code
- *
- * Notes:
- *     This routine assumes no locks are held on entry.
- */
-int
-scsi_nl_send_vendor_msg(u32 pid, unsigned short host_no, u64 vendor_id,
-                        char *data_buf, u32 data_len)
-{
-       struct sk_buff *skb;
-       struct nlmsghdr *nlh;
-       struct scsi_nl_host_vendor_msg *msg;
-       u32 len, skblen;
-       int err;
-
-       if (!scsi_nl_sock) {
-               err = -ENOENT;
-               goto send_vendor_fail;
-       }
-
-       len = SCSI_NL_MSGALIGN(sizeof(*msg) + data_len);
-       skblen = NLMSG_SPACE(len);
-
-       skb = alloc_skb(skblen, GFP_KERNEL);
-       if (!skb) {
-               err = -ENOBUFS;
-               goto send_vendor_fail;
-       }
-
-       nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG,
-                               skblen - sizeof(*nlh), 0);
-       if (!nlh) {
-               err = -ENOBUFS;
-               goto send_vendor_fail_skb;
-       }
-       msg = NLMSG_DATA(nlh);
-
-       INIT_SCSI_NL_HDR(&msg->snlh, SCSI_NL_TRANSPORT,
-                               SCSI_NL_SHOST_VENDOR, len);
-       msg->vendor_id = vendor_id;
-       msg->host_no = host_no;
-       msg->vmsg_datalen = data_len;   /* bytes */
-       memcpy(&msg[1], data_buf, data_len);
-
-       err = nlmsg_unicast(scsi_nl_sock, skb, pid);
-       if (err)
-               /* nlmsg_multicast already kfree_skb'd */
-               goto send_vendor_fail;
-
-       return 0;
-
-send_vendor_fail_skb:
-       kfree_skb(skb);
-send_vendor_fail:
-       printk(KERN_WARNING
-               "%s: Dropped SCSI Msg : host %d vendor_unique - err %d\n",
-               __func__, host_no, err);
-       return err;
-}
-EXPORT_SYMBOL(scsi_nl_send_vendor_msg);
-
-
index fa1dfaa83e32986061586c4fcb2f6f8e9e23eaf9..31969f2e13ceff07e2304dd0cca84b27c655fa22 100644 (file)
@@ -2119,7 +2119,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
        switch (nlh->nlmsg_type) {
        case ISCSI_UEVENT_CREATE_SESSION:
                err = iscsi_if_create_session(priv, ep, ev,
-                                             NETLINK_CB(skb).pid,
+                                             NETLINK_CB(skb).portid,
                                              ev->u.c_session.initial_cmdsn,
                                              ev->u.c_session.cmds_max,
                                              ev->u.c_session.queue_depth);
@@ -2132,7 +2132,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
                }
 
                err = iscsi_if_create_session(priv, ep, ev,
-                                       NETLINK_CB(skb).pid,
+                                       NETLINK_CB(skb).portid,
                                        ev->u.c_bound_session.initial_cmdsn,
                                        ev->u.c_bound_session.cmds_max,
                                        ev->u.c_bound_session.queue_depth);
@@ -2969,8 +2969,7 @@ static __init int iscsi_transport_init(void)
        if (err)
                goto unregister_conn_class;
 
-       nls = netlink_kernel_create(&init_net, NETLINK_ISCSI,
-                                   THIS_MODULE, &cfg);
+       nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, &cfg);
        if (!nls) {
                err = -ENOBUFS;
                goto unregister_session_class;
index 7e2ddc042f5bff19954bc4d61200a36bf6523609..c6250867a95d4cb0de8ec92e498b995aaf84d99f 100644 (file)
@@ -190,16 +190,30 @@ static void ssb_mips_flash_detect(struct ssb_mipscore *mcore)
 {
        struct ssb_bus *bus = mcore->dev->bus;
 
-       mcore->flash_buswidth = 2;
-       if (bus->chipco.dev) {
-               mcore->flash_window = 0x1c000000;
-               mcore->flash_window_size = 0x02000000;
+       /* When there is no chipcommon on the bus there is 4MB flash */
+       if (!bus->chipco.dev) {
+               mcore->flash_buswidth = 2;
+               mcore->flash_window = SSB_FLASH1;
+               mcore->flash_window_size = SSB_FLASH1_SZ;
+               return;
+       }
+
+       /* There is ChipCommon, so use it to read info about flash */
+       switch (bus->chipco.capabilities & SSB_CHIPCO_CAP_FLASHT) {
+       case SSB_CHIPCO_FLASHT_STSER:
+       case SSB_CHIPCO_FLASHT_ATSER:
+               pr_err("Serial flash not supported\n");
+               break;
+       case SSB_CHIPCO_FLASHT_PARA:
+               pr_debug("Found parallel flash\n");
+               mcore->flash_window = SSB_FLASH2;
+               mcore->flash_window_size = SSB_FLASH2_SZ;
                if ((ssb_read32(bus->chipco.dev, SSB_CHIPCO_FLASH_CFG)
                               & SSB_CHIPCO_CFG_DS16) == 0)
                        mcore->flash_buswidth = 1;
-       } else {
-               mcore->flash_window = 0x1fc00000;
-               mcore->flash_window_size = 0x00400000;
+               else
+                       mcore->flash_buswidth = 2;
+               break;
        }
 }
 
index 3abb31df8f28289a2cdad7a5c41c1ab3063123e2..20d0aec52e72fded49622158d11926a7edc6af13 100644 (file)
@@ -95,7 +95,7 @@ struct sock *netlink_init(int unit, void (*cb)(struct net_device *dev, u16 type,
        init_MUTEX(&netlink_mutex);
 #endif
 
-       sock = netlink_kernel_create(&init_net, unit, THIS_MODULE, &cfg);
+       sock = netlink_kernel_create(&init_net, unit, &cfg);
 
        if (sock)
                rcv_cb = cb;
@@ -135,7 +135,7 @@ int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
        }
        memcpy(nlmsg_data(nlh), msg, len);
 
-       NETLINK_CB(skb).pid = 0;
+       NETLINK_CB(skb).portid = 0;
        NETLINK_CB(skb).dst_group = 0;
 
        ret = netlink_broadcast(sock, skb, 0, group+1, GFP_ATOMIC);
index 0ca857ac473e91e3171c0963f85a4be214b64947..48aa1361903e3b5c4f7ae49e38112e30f0e05dac 100644 (file)
@@ -119,7 +119,9 @@ static void wbsoft_configure_filter(struct ieee80211_hw *dev,
        *total_flags = new_flags;
 }
 
-static void wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void wbsoft_tx(struct ieee80211_hw *dev,
+                     struct ieee80211_tx_control *control,
+                     struct sk_buff *skb)
 {
        struct wbsoft_priv *priv = dev->priv;
 
index fdc9ff045ef8c3073519115b0e383bd65e97c333..eeb14030d8a24e67f113d65f1da1578d04b3de89 100644 (file)
@@ -42,7 +42,6 @@ fw-shipped-$(CONFIG_BNX2) += bnx2/bnx2-mips-09-6.2.1a.fw \
 fw-shipped-$(CONFIG_CASSINI) += sun/cassini.bin
 fw-shipped-$(CONFIG_CHELSIO_T3) += cxgb3/t3b_psram-1.1.0.bin \
                                   cxgb3/t3c_psram-1.1.0.bin \
-                                  cxgb3/t3fw-7.10.0.bin \
                                   cxgb3/ael2005_opt_edc.bin \
                                   cxgb3/ael2005_twx_edc.bin \
                                   cxgb3/ael2020_twx_edc.bin
diff --git a/firmware/cxgb3/t3fw-7.10.0.bin.ihex b/firmware/cxgb3/t3fw-7.10.0.bin.ihex
deleted file mode 100644 (file)
index 96399d8..0000000
+++ /dev/null
@@ -1,1935 +0,0 @@
-:1000000060007400200380002003700000001000D6
-:1000100000002000E100028400070000E1000288E7
-:1000200000010000E0000000E00000A0010000006E
-:1000300044444440E3000183200200002001E0002A
-:100040002001FF101FFFD0001FFFC000E300043C91
-:100050000200000020006C841FFFC2A020006CCCB6
-:100060001FFFC2A420006D0C1FFFC2A820006D80DE
-:100070001FFFC2AC200003C0C00000E43100EA3121
-:1000800000A13100A03103020002ED306E2A05000C
-:10009000ED3100020002160012FFDBC03014FFDA5F
-:1000A000D30FD30FD30F03431F244C107249F0D347
-:1000B0000FD30FD30F12FFD5230A00240A00D30F4A
-:1000C000D30FD30F03431F244C107249F0D30FD327
-:1000D0000FD30F14FFCE03421F14FFCB03421F1296
-:1000E000FFCCC0302D37302D37342D37382D373CED
-:1000F000233D017233ED00020012FFC4C0302F37E0
-:10010000002F37102F37202F3730233D017233ED6A
-:1001100000020012FFBEC0302737002737102737F4
-:1001200020273730233D017233ED03020012FFB95F
-:1001300013FFBA0C0200932012FFB913FFB90C028F
-:1001400000932012FFB8C0319320822012FFB71312
-:10015000FFB7932012FFB715FFB316FFB6C030D715
-:100160002005660160001B00000000000000000088
-:10017000043605000200D30FD30F05330C6E3B1479
-:100180000747140704437631E604360505330C6F40
-:100190003BED00020012FFA615FFA3230A00D720A3
-:1001A000070443043E0505330C0747146F3BF00377
-:1001B000020012FFA1C03014FFA1D30FD30FD30F41
-:1001C0009340B4447249F2D30FD30FD30F14FF9B63
-:1001D000834014FF9B834012FF9B230A0014FF9A65
-:1001E000D30FD30FD30F9340B4447249F2D30FD33C
-:1001F0000FD30F14FF95834012FF95C92F832084DE
-:10020000218522BC22743B0F8650B4559630B433FE
-:100210007433F463FFE60000653FE1655FDE12FFC3
-:100220007C230A0028374028374428374828374C91
-:10023000233D017233ED03020000020012FF7AC079
-:1002400032032E0503020012FF7813FF819320C0B2
-:1002500011014931004831010200C00014FF7E0441
-:10026000D23115FF7D945014FF7D04D33115FF7CEE
-:10027000945014FF7C04D43115FF7C24560014FFE5
-:100280007B04D53115FF7B24560010FF7A03000054
-:10029000000000000000000000000000000000005E
-:1002A000000000000000000000000000000000004E
-:1002B000000000000000000000000000000000003E
-:1002C000000000000000000000000000000000002E
-:1002D000000000000000000000000000000000001E
-:1002E000000000000000000000000000000000000E
-:1002F00000000000000000000000000000000000FE
-:1003000000000000000000000000000000000000ED
-:1003100000000000000000000000000000000000DD
-:1003200000000000000000000000000000000000CD
-:1003300000000000000000000000000000000000BD
-:1003400000000000000000000000000000000000AD
-:10035000000000000000000000000000000000009D
-:10036000000000000000000000000000000000008D
-:10037000000000000000000000000000000000007D
-:10038000000000000000000000000000000000006D
-:10039000000000000000000000000000000000005D
-:1003A000000000000000000000000000000000004D
-:1003B000000000000000000000000000000000003D
-:1003C000000000000000000000000000000000002D
-:1003D000000000000000000000000000000000001D
-:1003E000000000000000000000000000000000000D
-:1003F00000000000000000000000000000000000FD
-:1004000000000000000000000000000000000000EC
-:1004100000000000000000000000000000000000DC
-:1004200063FFFC000000000000000000000000006E
-:100430000000000000000000000000001FFC0000A1
-:100440001FFC0000E30005C81FFC00001FFC0000AB
-:10045000E30005C81FFC00001FFC0000E30005C806
-:100460001FFFC0001FFFC000E30005C81FFFC00042
-:100470001FFFC018E30005C81FFFC0181FFFC018EA
-:10048000E30005E01FFFC0181FFFC294E30005E072
-:100490001FFFC2941FFFC294E300085C1FFFC2A0AD
-:1004A0001FFFC59CE300085C200000002000016ADB
-:1004B000E3000B582000018020000180E3000CC401
-:1004C0002000020020000203E3000CC42000021CF4
-:1004D00020000220E3000CC8200002202000022699
-:1004E000E3000CCC2000023C20000240E3000CD4CE
-:1004F0002000024020000249E3000CD82000024CFA
-:1005000020000250E3000CE42000025020000259B9
-:10051000E3000CE82000025C20000260E3000CF421
-:100520002000026020000269E3000CF82000026C49
-:1005300020000270E3000D04200002702000027908
-:10054000E3000D082000028C2000028CE3000D1453
-:100550002000029020000293E3000D14200002AC62
-:10056000200002B0E3000D18200002D0200002F2AB
-:10057000E3000D1C200003B0200003B0E3000D4099
-:10058000200003B0200003B0E3000D40200003B0C2
-:10059000200003B0E3000D40200003B0200003B0B2
-:1005A000E3000D40200003B020006EA4E3000D40E6
-:1005B00020006EA420006EA4E30078340000000048
-:1005C00000000000000000001FFC00001FFC0000F5
-:1005D0001FFFC5A01FFFC69020006EA820006EA8B8
-:1005E000DEFFFE000000080CDEADBEEF1FFFC2B054
-:1005F0001FFCFE001FFFC0A41FFFC5D0300000007D
-:10060000003FFFFF8040000010000000080FFFFFC8
-:100610001FFFC27D000FFFFF804FFFFF8000000023
-:1006200000000880B000000560500000600000007D
-:1006300040000011350000004100000010000001E2
-:100640002000000000001000400000000500000035
-:10065000800000190400000000000800E100020012
-:1006600010000005806000007000000020000009FC
-:10067000001FF8008000001EA0000000F80000002D
-:1006800007FFFFFF080000001800000001008001C4
-:10069000420000001FFFC22D1FFFC0EC00010080C0
-:1006A000604000001A0000000C0000001000000A6A
-:1006B000000030000001000080000018FC00000075
-:1006C0008000000100004000600008008000001C65
-:1006D0008000001A030000008000040004030403EB
-:1006E00050000003FFFFBFFF1FFFC3E400000FFF28
-:1006F000FFFFF000000016D00000FFF7A50000008B
-:100700001FFFC4C01FFFC4710001000800000B20C0
-:10071000202FFF801FFFC46500002C00FFFEFFF8A4
-:1007200000FFFFFF1FFFC58800002000FFFFDFFF65
-:100730000000FFEF010011001FFFC3E21FFFC5A073
-:10074000FFFFEFFF0000FFFB1FFFC6501FFFBEB003
-:10075000FFFFF7FF1FFFC0740000FFFD1FFFC64033
-:100760000001FBD01FFFC5C01FFFC6801FFFC5A132
-:10077000E0FFFE001FFFC5B0000080001FFFC54C5A
-:100780001FFFC5C41FFFC0781FFFC4E41FFCFFD8B4
-:10079000000100817FFFFFFFE1000600000027103D
-:1007A0001FFCFE301FFCFE701FFFC5481FFFC56009
-:1007B0000003D0901FFFC5742B5063802B507980AD
-:1007C0002B5090802B50A6801FFFC4790100110F81
-:1007D000202FFE0020300080202FFF000000FFFFB0
-:1007E0000001FFF82B50B2002B50B208000100109E
-:1007F0002B50B1802B50B2802B50BA000001001159
-:100800002B50BD282B50BC802B50BDA020300000A9
-:10081000DFFFFE005000000200C0000002000000E8
-:10082000FFFFF7F41FFFC07C000FF800044000003A
-:10083000001000000C4000001C400000E00000A080
-:100840001FFFC5501FFD00081FFFC5641FFFC578AF
-:100850001FFFC58CE1000690E10006EC00000000DF
-:100860000000000000000000000000000100000087
-:100870000000000000000000000000002010004008
-:10088000201000402010004020140080200C0000A8
-:10089000200C0000200C00002010004020140080DC
-:1008A0002014008020140080201800C0201C0100AB
-:1008B000201C0100201C010020200140201800C045
-:1008C000201800C0201800C0201C0100201800C003
-:1008D000201800C0201800C0201C0100202001406A
-:1008E00020200140202001402020094020200940F4
-:1008F000202009402020094020240980FFFFFFFF1D
-:10090000FFFFFFFFFFFFFFFF0000000000000000EF
-:1009100000000000000000000000000020005588DA
-:1009200020005458200055882000558820005394FA
-:100930002000539420005394200051D4200051D41F
-:10094000200051CC2000513820004FE020004DC045
-:1009500020004B94000000000000000020005558CB
-:1009600020005424200054C8200054C82000527C89
-:100970002000527C2000527C2000527C2000527CBF
-:10098000200051C42000527C20004F0020004D70F8
-:1009900020004B40000000000000000020000BF091
-:1009A00020003ADC200004C02000473020000BE883
-:1009B000200041F4200003F0200046F020004B1CF2
-:1009C00020003F0020003E1C20003A58200038E85C
-:1009D00020003658200031B820003C7820002DD06F
-:1009E0002000286420006828200023F0200020D068
-:1009F0002000207C20001D68200018602000158841
-:100A000020000E5420000C3420001134200013204C
-:100A1000200043EC20003EB420000BF8200004C06E
-:100A200000000000000000000000000000000000C6
-:100A300000000000000000000000000000000000B6
-:100A400000000000000000000000000000000000A6
-:100A50000000000000000000000000000000000096
-:100A60000000000000000000000000000000000086
-:100A70000000000000000000000000000000000076
-:100A80000000000000000000000000000000000066
-:100A90000000000000000000000000000000000056
-:100AA0003264000000000000326400006400640052
-:100AB00064006400640064006400640000000000DE
-:100AC0000000000000000000000000000000000026
-:100AD0000000000000000000000000000000000016
-:100AE0000000000000000000000000000000000006
-:100AF00000000000000000000000000000000000F6
-:100B000000000000000010000000000000000000D5
-:100B100000000000000000000000000000001000C5
-:100B200000000000000000000000000000000000C5
-:100B300000432380000000000000000000000000CF
-:100B400000000000000000000000000000000000A5
-:100B50000000000000000000005C94015D94025E53
-:100B600094035F94004300000000000000000000B8
-:100B70000000000000000000000000000000000075
-:100B80000000000000000000000000000000000065
-:100B90000000000000000000005C90015D90025E1B
-:100BA00090035F9000530000000000000000000070
-:100BB0000000000000000000000000000000000035
-:100BC0000000000000000000000000000000000025
-:100BD0000000000000000000009C94001D90019D9A
-:100BE00094029E94039F94040894050994060A9421
-:100BF000070B94004300000000000000000000000C
-:100C000000000000000000000000000000000000E4
-:100C10000000000000000000009C90019D90029EDA
-:100C200090071D90039F90047890057990067A9024
-:100C3000077B90005300000000000000000000004F
-:100C400000000000000000000000000000000000A4
-:100C5000000000000000000000DC94001D9001DD99
-:100C60009402DE9403DF940404940505940606942C
-:100C70000707940808940909940A0A940B0B940036
-:100C80004300000000000000000000000000000021
-:100C9000000000000000000000DC9001DD9002DE9A
-:100CA000900B1D9003DF9004B49005B59006B690AC
-:100CB00007B79008B89009B9900ABA900BBB90009A
-:100CC0005300000063FFFC0020006C6010FFFF0A6F
-:100CD0000000000020006C8400D23110FFFE0A00EA
-:100CE0000000000020006CCC00D33110FFFE0A0091
-:100CF0000000000020006D0C00D43110FFFE0A003F
-:100D00000000000020006D8000D53110FFFE0A00B9
-:100D10000000000063FFFC00E00000A012FFF7826B
-:100D200020028257C82163FFFC12FFF303E830045E
-:100D3000EE3005C03093209421952263FFFC000023
-:100D40001FFFD000000400201FFFC5A01FFFC6909A
-:100D5000200A0011FFFB13FFFB03E631010200161E
-:100D6000FFFA17FFFAD30F776B069060B4667763CC
-:100D7000F85415F3541AA50F140063FFF90000008E
-:100D80006C1004C020D10F006C1004C0C71AEF060D
-:100D9000D830BC2BD72085720D4211837105450BCD
-:100DA000957202330C2376017B3B04233D0893713B
-:100DB000A32D12EEFE19EEFEA2767D632C2E0A0004
-:100DC000088202280A01038E380E0E42C8EE29A6B8
-:100DD0007E6D4A0500208800308C8271D10FC0F0F2
-:100DE000028F387FC0EA63FFE400C0F1C050037E89
-:100DF0000CA2EE0E3D1208820203F538050542CB27
-:100E00005729A67E2FDC100F4F366DFA050020887B
-:100E100000308CBC75C03008E208280A0105833810
-:100E2000030342C93E29A67E0D480CD30F6D8A05E7
-:100E300000208800B08C8271D10FC05008F5387541
-:100E4000C0C163FFBBC06002863876C0DA63FFD4DE
-:100E50006C101216EED8C1F9C1E8C1C72B221E28AA
-:100E6000221DC0D07B81352920060BB702299CFAB0
-:100E7000655008282072288CFF2824726491642A07
-:100E8000B0000CA80C64816F0EA90C6492BB7FA10A
-:100E90003FC1CE7CA13669AC336000370029200603
-:100EA000D7D0299CFACC57282072288CFF2824728E
-:100EB0006491392AD0000CA80C6481680EA90C64D6
-:100EC000931F7FA10BC1CE7CA10268AC06C020D1CC
-:100ED0000F2D25028A32C0900A6E5065E5B529248F
-:100EE00067090F4765F5B12C200C1FEEB30CCE112E
-:100EF000AFEE29E286B44879830260058219EEAF2D
-:100F000009C90A2992A36890078F2009FF0C65F58B
-:100F10006E2FE28564F56865559628221D7B810554
-:100F2000D9B060000200C0908B9417EEA50B881416
-:100F300087740B0B47A87718EEA309BB100877023C
-:100F400097F018EEA117EEA208A8010B8802074738
-:100F5000021BEE9E97F10B880298F22790232B90AC
-:100F60002204781006BB1007471208BB0228902104
-:100F70000777100C88100788020B880217EE968BF3
-:100F80003307BB0187340B880298F3979997F48B4A
-:100F90009587399BF588968B3898F688979BF897B4
-:100FA000F998F717EE8D28E28507C7082D74CF084A
-:100FB000480B28E68565550F2B221E28221D7B89AC
-:100FC000022B0A0064BF052CB00728B000DA200607
-:100FD000880A28824CC0D10B8000DBA065AFE76394
-:100FE000FEEA0000292072659E946004E72A2072C0
-:100FF00065AEBF6004DE00002EB0032C2067D4E095
-:1010000065C1058A328C330AFF500C4554BC5564C7
-:10101000F4EB19EE72882A09A90109880C64821F71
-:10102000C0926000DD2ED0032A2067D4E065A0D8EE
-:101030008A328B330AFC500B4554BC5564C4BE192C
-:10104000EE67882A09A9017989D50BEA5064A4E3DF
-:101050000CEE11C0F02F16132E16168AE78CE82A14
-:1010600016128EE9DFC0AAEA7EAB01B1CF0BA85001
-:101070006583468837DBC0AE89991E789B022BCCEE
-:10108000012B161B29120E2B0A0029161A7FC307E3
-:101090007FC9027EAB01C0B165B49D8B352F0A00BC
-:1010A0002A0A007AC30564C3CB2F0A0165F4892B91
-:1010B00012162B1619005104C0C100CC1A2CCCFFFB
-:1010C0002C16170CFC132C16182B121A2A121BDCC8
-:1010D000505819B6C0D0C0902E5CF42C12172812AC
-:1010E000182F121B2A121A08FF010CAA01883407B4
-:1010F0004C0AAB8B2812192BC6162F86082A860994
-:101100002E74102924672E70038975B1EA2A74039E
-:10111000B09909490C659DB42B20672D250265B354
-:10112000FA2B221E2C221D7BC901C0B064BD9D2C50
-:10113000B00728B000DA2006880A28824CC0D10BFC
-:101140008000DBA065AFE763FD8289BAB199659045
-:101150009788341CEE2398BA8F331EEE1C0F4F5421
-:101160002FB42C8D2A8A320EDD020CAC017DC966AB
-:101170000A49516F92608A3375A65B2CB0130AED51
-:10118000510DCD010D0D410C0C417DC9492EB01200
-:10119000B0EE65E3C6C0D08E378CB88A368FB97C86
-:1011A000A3077AC9027EFB01C0D1CED988350AAD2A
-:1011B000020E8E0878EB022DAC0189B7DAC0AF9B26
-:1011C00079BB01B1CADCB0C0B07DA3077AD9027C7B
-:1011D000EB01C0B164B161C091292467C020D10F77
-:1011E00000008ADAB1AA64A0C02C20672D25026510
-:1011F000C3111DEDF68A321EEDFB0DAD010EDD0CA7
-:1012000065D28A0A4E516FE202600281C0902924A1
-:1012100067090F4765F2F828221D7B89022B0A0017
-:1012200064BCA92CB00728B000DA2006880A2882FE
-:101230004CC0D10B8000DBA065AFE763FC8E0000E3
-:101240000CE9506492ED0CEF11C080281611AFBF6D
-:101250002F16198EF88BF7DAE08FF92B1610ABFBEF
-:101260007FBB01B1EA0CA8506580D68837DCE0AFBF
-:1012700089991C789B022CEC012C161B29120C2C32
-:101280000A0029161A7AE3077AE9027FBB01C0C176
-:1012900065C2A58B352C0A002A0A007AE30564E1B1
-:1012A000CA2C0A0164CE0D60028E88341BEDCD98E5
-:1012B000DA8F331EEDC60F4F542FD42C8C2A8A326E
-:1012C0000ECC020BAB010CBB0C65BF0A0A49516E78
-:1012D000920263FF018A330AAB5064BEF92CD0132B
-:1012E0000AEE510ECE010E0E410C0C410ECC0C65D7
-:1012F000CEE42FD012B0FF65F26EC0B08E378CD81E
-:101300008A362FD2097CA3077AC9027EFB01C0B1BD
-:1013100065BEC38835DBA0AE8E78EB01B1AB89D753
-:10132000DAC0AF9D79DB01B1CAC0C07BA3077AB92F
-:10133000027DEB01C0C165CE9DC090292467C0200D
-:10134000D10F88378C3698140CE90C29161408F83C
-:101350000C981D78FB07281214B088281614891DD4
-:101360009F159B16C0F02B121429161A2B161B8BD7
-:10137000147AE30B7AE90688158E1678EB01C0F132
-:1013800065F1BA29121A2F12118A352E121B9A1AD8
-:10139000AFEE2F1210C0A0AF9F79FB01B1EE9F11ED
-:1013A000881AC0F098107AE30A7EA9052A12017AF9
-:1013B0008B01C0F164F08160018389368B37991706
-:1013C0000BE80C981F09C90C29161578EB07281291
-:1013D00015B088281615D9C09A199E188A1F2E1282
-:1013E000152A161A2E161BDAC0C0E08C177F930B35
-:1013F0007FA90688188F1978FB01C0E165E13E29B5
-:10140000121A2F12138A352E121B9A1BAFEE2F12AF
-:1014100012C0A0AF9F79FB01B1EE9F13881BC0F0F3
-:1014200098127AE30A7EA9052A12037A8B01C0F189
-:1014300065F10A2E12162E16192A121B005104C02D
-:10144000E100EE1AB0EE2E16170EFF132F16180F2E
-:10145000CC01ACAA2F121A0EBC01ACFC7FCB01B19F
-:10146000AA2A161B2C161A63FC5E00007FB30263C7
-:10147000FE3163FE2B7EB30263FC3063FC2A000066
-:101480006450C0DA20DBC058168AC020D10FC0914A
-:1014900063FD7A00C09163FA44DA20DB70C0D12E7C
-:1014A0000A80C09A2924682C7007581575D2A0D1DB
-:1014B0000F03470B18ED4DDB70A8287873022B7DC6
-:1014C000F8D9B063FA6100002A2C74DB40580EEEA4
-:1014D00063FAE4000029221D2D25027B9901C0B08A
-:1014E000C9B62CB00728B000DA2006880A28824C3A
-:1014F000C0D10B8000DBA065AFE7C020D10FC09149
-:1015000063FBFF00022A0258024C0AA202060000F6
-:10151000022A025802490AA202060000DB70DA2001
-:10152000C0D12E0A80C09E2924682C7007581554FB
-:10153000C020D10FC09463FBC9C09663FBC4C096A2
-:1015400063FBBF002A2C74DB30DC405BFE0FDBA0AA
-:10155000C2A02AB4002C200C63FF27008D358CB765
-:101560007DCB0263FDD263FC6D8F358ED77FEB029E
-:1015700063FDC563FC6000006C1004C020D10F0047
-:101580006C1004C020D10F006C10042B221E2822E6
-:101590001DC0A0C0942924062A25027B8901DBA056
-:1015A000C9B913ED04DA2028B0002CB00703880A6B
-:1015B00028824CC0D10B8000DBA065AFE7C020D1F2
-:1015C0000F0000006C10042C20062A210268C805B8
-:1015D00028CCF965812E0A094C6591048F30C1B879
-:1015E0000F8F147FB00528212365812716ECF3297E
-:1015F000629E6F98026000F819ECEF2992266890BD
-:10160000078A2009AA0C65A0E72A629D64A0E12B45
-:10161000200C0CB911A6992D92866FD9026000DBBF
-:101620001DECE70DBD0A2DD2A368D0078E200DEE6C
-:101630000C65E0C7279285C0E06470BF1DECEC68C4
-:10164000434E1CECEB8A2B0CAA029A708920089955
-:10165000110D99029971882A98748F329F752821EB
-:1016600004088811987718ECDC0CBF11A6FF2DF246
-:1016700085A8B82E84CF2DDC282DF685C85A2A2CB3
-:1016800074DB40580E81D2A0D10FC020D10F0000D2
-:101690000029CCF96490B12C20668931B1CC0C0CB6
-:1016A000472C24666EC60260008509F85065807F6D
-:1016B0001CECD18A2B0F08400B881008AA020CAA38
-:1016C000029A7089200899110D99029971883398AE
-:1016D000738C329C728A2A9A748934997563FF7D5F
-:1016E00000CC57DA20DB30DC4058155FC020D10F2A
-:1016F00000DA20C0B65815EE63FFE500DA20581571
-:10170000EC63FFDC00DA20DB30DC40DD5058167A79
-:10171000D2A0D10FC858DA20DB305814C72A2102D2
-:1017200065AFBDC09409A90229250263FFB200007C
-:101730002B21045814731DECADC0E02E24668F30AD
-:101740002B200C0F8F1463FF66292138C088798302
-:101750001F8C310CFC5064CF562B2104C0C0581490
-:10176000681DECA2C0E08F302B200C0F8F1463FF9C
-:101770003E2C20662B2104B1CC0C0C472C2466583F
-:1017800014601DEC9AC0E02E24668F302B200C0FC5
-:101790008F1463FF1A0000006C1004C0B7C0A116BC
-:1017A000EC9615EC88D720D840B822C04005350209
-:1017B0009671957002A438040442C94B1AEC7B1947
-:1017C000EC7C29A67EC140D30F6D4A0500808800BD
-:1017D000208C220A88A272D10FC05008A53875B09B
-:1017E000E363FFD76C10069313941129200665520A
-:1017F00088C0716898052A9CF965A29816EC6F2933
-:1018000021028A1309094C6590CD8AA00A6A512ADF
-:10181000ACFD65A0C2CC5FDB30DA208C115815120C
-:10182000C0519A13C7BF9BA98E132EE20968E060CE
-:101830002F629E1DEC606FF8026000842DD2266836
-:10184000D0052F22007DF9782C629DC79064C0706E
-:101850009C108A132B200C2AA0200CBD11A6DD0A97
-:101860004F14BFA809880129D286AF88288C09792E
-:101870008B591FEC520FBF0A2FF2A368F0052822E4
-:10188000007F894729D285D4906590756000430018
-:10189000002B200C1FEC4A0CBD11A6DD29D2860FAF
-:1018A000BF0A6E96102FF2A368F00488207F890586
-:1018B00029D285659165DA2058157DC95C6001FFE4
-:1018C00000DA20C0B658157A60000C00C09063FFA3
-:1018D000B50000DA205815766551E48D138C11DBC4
-:1018E000D08DD0022A020D6D515813E39A1364A1D2
-:1018F000CEC75F8FA195A9C0510F0F479F1163FEFF
-:10190000FD00C091C0F12820062C2066288CF9A784
-:10191000CC0C0C472C24666FC6098D138DD170DE5C
-:1019200002290A00099D02648159C9D38A102B211A
-:10193000045813F38A13C0B02B24662EA2092AA0E0
-:10194000200E28141CEC298D1315EC1DC1700A778C
-:101950003685562DDC28AC2C9C12DED0A8557CD3C5
-:10196000022EDDF8D3E0DA40055B02DC305BFF8A53
-:10197000D4A028200CB455C0D02B0A882F0A800C84
-:101980008C11A6CC29C285AF3FAB9929C6851CEC2A
-:1019900012DEF0AC882D84CF28120229120378F3CE
-:1019A000022EFDF8289020D3E007880CC1700808AB
-:1019B00047289420087736657FAB891313EC10898C
-:1019C00090C0F47797491BEC0EC1CA2821048513F7
-:1019D000099E4006EE11875304881185520E880235
-:1019E0000C88029BA09FA18F2B9DA598A497A795DB
-:1019F000A603FF029FA22C200C1EEBF7AECE0CCC50
-:101A00001106CC082BC2852DE4CF2BBC202BC6851C
-:101A10002A2C748B11580D9CD2A0D10F28203DC0C8
-:101A2000E07C877F2E24670E0A4765A07B1AEBF5C2
-:101A300088201EEBE38F138EE48FF40888110A8848
-:101A4000020F8F14AFEE1FEBF098910FEE029E90F5
-:101A50001EEBEFC0801AEBE02CD285AABAB8CC28D6
-:101A6000A4CF2CD6852C21022F20720ECC02B1FFE0
-:101A70002F24722C2502C020D10F871387700707EF
-:101A80004763FD6E282138C099798B0263FE9ADD89
-:101A9000F063FE9500DA20DB308C11DD505815968E
-:101AA000D2A0D10FC0E163FF7A8B138C11DD50C03F
-:101AB000AA2E0A802A2468DA205813F1D2A0D10F66
-:101AC000C020D10F6C1006292102C0D07597102AB2
-:101AD00032047FA70A8B357FBF052D25020DD90261
-:101AE000090C4C65C18216EBB41EEBB228629EC095
-:101AF000FA78F30260018829E2266890078A2009B3
-:101B0000AA0C65A17A2A629DDFA064A1772B200C24
-:101B10000CBC11A6CC29C286C08C79830260015707
-:101B200019EBA709B90A2992A368900788200988A8
-:101B30000C65814327C2851CEBA964713A89310980
-:101B40008B140CBB016FB11D2C20669F10B1CC0C07
-:101B50000C472C24666EC60260014009FF5065F1F7
-:101B60003A8A102AAC188934C0C47F973C18EBA974
-:101B70001BEBA88F359C719B708B209D7408BB025A
-:101B80009B72C08298751BEBA40F08409B730F8853
-:101B90001198777FF70B2F2102284A0008FF022FA8
-:101BA0002502C0B4600004000000C0B07E97048F1E
-:101BB000362F25227D970488372825217C9736C02B
-:101BC000F1C0900AF9382F3C200909426490861927
-:101BD000EB7618EB7728967E00F08800A08C00F05A
-:101BE0008800A08C00F08800A08C2A629D2DE4A2C1
-:101BF0002AAC182A669D89307797388F338A321835
-:101C0000EB8007BE0B2C2104B4BB04CC1198E0C0C0
-:101C10008498E1882B9DE59AE69FE71AEB78099F67
-:101C20004006FF110FCC020A880298E2C1FC0FCCDB
-:101C3000022CE604C9B82C200C1EEB670CCA11AEAE
-:101C4000CC06AA0829A2852DC4CF09B90B29A685DF
-:101C5000CF5CC020D10FC081C0900F8938C0877978
-:101C6000880263FF7263FF6600CC57DA20DB30DC4A
-:101C7000405813FDC020D10FDA2058148D63FFE8BF
-:101C8000C0A063FE82DA20C0B658148963FFD90071
-:101C9000DB402A2C74580CFCD2A0D10F8A102B21C7
-:101CA000045813171EEB44C0D02D246663FEB10008
-:101CB0006C1006D62019EB3F1EEB4128610217EB92
-:101CC0003E08084C65805F8A300A6A5169A3572B29
-:101CD000729E6EB83F2A922668A0048C607AC9343E
-:101CE0002A729D2C4CFECAAB2B600CB64F0CBD115A
-:101CF000A7DD28D2860EBE0A78FB269C112EE2A311
-:101D00002C160068E0052F62007EF91522D285CFDF
-:101D10002560000D00DA60C0B6581465C85A60012D
-:101D20000F00DA60581462655106DC40DB308D30FC
-:101D3000DA600D6D515812D0D3A064A0F384A1C015
-:101D40005104044763FF6D00C0B02C60668931B157
-:101D5000CC0C0C472C64666FC60270960A2B61048B
-:101D60005812E7C0B02B64666550B42A3C10C0E737
-:101D7000DC20C0D1C0F002DF380F0F4264F09019B0
-:101D8000EB0A18EB0B28967E8D106DDA0500A08803
-:101D900000C08CC0A089301DEB1A77975388328C15
-:101DA000108F3302CE0BC02492E12261049DE00427
-:101DB00022118D6B9BE59FE798E61FEB1009984079
-:101DC0000688110822020FDD02C18D9DE208220261
-:101DD00092E4B4C22E600C1FEB000CE811A7882C13
-:101DE0008285AFEE0C220B2BE4CF228685D2A0D1C8
-:101DF0000F28600CD2A08C1119EAF80C8D11A9885B
-:101E0000A7DD2ED2852B84CF0ECC0B2CD685D10FFF
-:101E1000C0F00ADF387FE80263FF6C63FF600000F8
-:101E20002A6C74C0B2DC20DD405812C5C0B063FF1C
-:101E300063C020D10F0000006C10042920062A2264
-:101E40001EC0392C221D232468C0307AC107DDA0B2
-:101E5000600004000000C0D06E9738C08F2E0A804A
-:101E60002B2014C0962924060EBB022E21022B24FF
-:101E7000147E8004232502DE307AC10EC8ABDBD08D
-:101E8000DA202C0A00580B062E21020E0F4CC8FE39
-:101E90006000690068956528210208084C65805C2F
-:101EA0001AEAC61EEAC42BA29EC09A7B9B5E2BE256
-:101EB0002668B0048C207BC95329A29D1FEAC16407
-:101EC000904A9390C0C31DEAD52B21049D9608BB70
-:101ED000110CBB029B979B911CEAD2C08523E4A204
-:101EE0002BA29D2824068DFA282102B0DD2BBC30C0
-:101EF0002BA69D9DFA0C8802282502C8D2C020D1AD
-:101F00000F8EF912EAC82E2689C020D10FDA20C020
-:101F1000B65813E7C020D10F6C10062A2006941083
-:101F200068A80528ACF965825029210209094C6589
-:101F3000920ACC5FDB30DA208C1058134BC051D39F
-:101F4000A0C7AF9A3AC0D01CEA9D14EAA31EEA9C2F
-:101F50008F3A16EA99B1FB64B13128629E6F88020C
-:101F60006001ED294C332992266890078A2009AA3E
-:101F70000C65A1DC2A629DC08E64A1D42B200C0CC0
-:101F8000B7110677082972867983026001CD0CB9F2
-:101F90000A2992A36890082C220009CC0C65C1BBC9
-:101FA0002772856471B5282006288CF96481E52C98
-:101FB00020668931B1CC0C0C472C24666EC60260B9
-:101FC00001A109F85065819B2A21048CE488361E02
-:101FD000EA7D088914A9CC08084709881019EA92F3
-:101FE0000ECC029C7099718C2A1EEA9008CC020ECD
-:101FF000CC029C722E302C293013283012049910F8
-:102000000688100CEE109F740EAE0209880208EECE
-:10201000029E738C3704AA119C758938C0F4997696
-:102020008839C0C1987718EA828E359C7B9E780EDD
-:102030008E1408EE029E7A8E301CEA7177E73088A3
-:102040003289339C7C9F7D0E9C4006CC118F2B29BE
-:1020500076132D76112876120CAA0218EA68C1C9E7
-:102060000CAA022A761008FF029F7EC0AA60000117
-:10207000C0A6A4BC0CB911A6992892852DC4CF087E
-:10208000A80B289685655100C020D10F2B200C0C81
-:10209000B7110677082A72860CB90A6FA902600187
-:1020A000182992A36890082A220009AA0C65A109A0
-:1020B0002A728564A1032C203D0C2C4064C08C8CBA
-:1020C000350C8C1464C0848FE57CF37F8C360C8CCB
-:1020D0001464C0777CF374283013C0FC78F86CC0AB
-:1020E00090292467090C4765C0D719EA4718EA45C3
-:1020F0008F208C3508FF110C8C1408FF0288E49F98
-:10210000A1AC8C09CC029CA08C369FA30C8C14AC87
-:102110008809880298A218EA3DA4BC2F72852DC4B4
-:10212000CF2FFC102F76852F210229207208FF0265
-:10213000B2992924722F2502C020D10F00CC57DA82
-:1021400020DB308C105812C8C020D10FC09163FF23
-:102150008FDA20C0B658135663FFE100DA20581317
-:102160005463FFD82B21045811E61EEA152B200CCE
-:10217000C0D02D24668F3A63FE4DDA20DB30DC4080
-:10218000DD505813DDD2A0D10F2A2C748B10580BC0
-:10219000BED2A0D10F292138C08879832E8C310C72
-:1021A000FC5064CE222B2104C0C05811D5C0D01ED3
-:1021B000EA048F3A2B200C63FE0DDA2058133C639F
-:1021C000FF7ADA205BFF1CD2A0D10F002C20662BF7
-:1021D0002104B1CC0C0C472C24665811C91EE9F817
-:1021E0002B200CC0D02D24668F3A63FDDA0000004E
-:1021F0006C10089514C061C1B0D9402A203DC04080
-:102200000BAA010A64382A200629160568A8052C9D
-:10221000ACF965C33F1DE9EA6440052F120464F27E
-:10222000A02621021EE9E606064C6562E615E9E2F3
-:102230006440D98A352930039A130A990C6490CCEA
-:102240002C200C8B139C100CCC11A5CC9C112CC2F7
-:1022500086B4BB7CB3026002D78F100EFE0A2EE25A
-:10226000A368E0098620D30F0E660C6562C2881150
-:102270002882856482BA891364905EDA80D9308CB2
-:10228000201EE9E01FE9E11DE9CE8B138DD4D4B007
-:102290007FB718B88A293C10853608C6110E660229
-:1022A0009681058514A5D50F550295800418146DE7
-:1022B0008927889608CB110888140EBB02A8D82954
-:1022C0009C200F88029BA198A088929BA308881449
-:1022D000A8D80F880298A22AAC1019E9CCC0C08FE8
-:1022E000131EE9BD86118D10286285AEDD08FF0B37
-:1022F0002CD4CF2821022F66858B352A207209889D
-:1023000002ABAA2825022A2472C020D10F29529E8E
-:1023100018E9A96F980260020B28822668800829B4
-:10232000220008990C6591FC2A529DC1CE9A126434
-:10233000A1F22B200C2620060CB8110588082D824E
-:10234000860EBE0A7DC3026002052EE2A368E00885
-:102350002F22000EFF0C65F1F6288285D780DE80E3
-:102360006482009816266CF96462012C206688311C
-:102370002CCC010C0C472C24666EC6026001BC08F4
-:10238000FD5065D1B61DE9AB1CE98F19E9962A21EC
-:10239000048B2D2830102F211D0C88100BFB090AEF
-:1023A00088020988020CBB026441529B709D71989F
-:1023B00072C04D8D35D9E064D06ED730DBD0D830C7
-:1023C0007FD714273C10BCE92632168C3996E69C40
-:1023D000E78A37B4382AE6080B131464304A2A8295
-:1023E0001686799A9696978C778A7D9C982B821779
-:1023F0002C7C209A9A2A9C189B99867BB03B298C2E
-:10240000086DB9218BC996A52692162AAC18B899E1
-:102410009BA196A08BC786CD9BA22B921596A49BC1
-:10242000A386CB2CCC2026A605C0346BD4200D3B34
-:102430000C0DD8090E880A7FB705C0909988BC8812
-:10244000C0900B1A126DAA069988998B288C18C017
-:10245000D01BE97A1CE97916E96EB1FF2A211C2309
-:10246000E6130F0F4F26E6122F251D7FA906C0F099
-:10247000C08028251D05F6111AE9678F202BE61567
-:102480002CE6162DE61726E6180AFA022AE6142983
-:102490002006299CF96490F829200C8D14C0801A1C
-:1024A000E94E0C9C11AA99A5CCDA202BC285289460
-:1024B000CF0B4B0B2BC685C0B08C155811BBD2A0CF
-:1024C000D10F8A356FA546D8308BD56DA90C8A8679
-:1024D0000A8A14CBA77AB335288C10C080282467C9
-:1024E000080B4765B10BDA20DB302C12055811DEE2
-:1024F000D3A0C0C1C0D02DA4039C1463FD22863696
-:102500006461059B709D719872C04D63FEA4C0818B
-:1025100063FFC9008814CC87DA20DB308C15581192
-:10252000D2C020D10FDA20C0B658126163FFE40098
-:1025300000DA208B1058125E63FFD8009E178A12B3
-:102540002B21045810EF8E17C09029246663FE34A7
-:10255000C08063FE06DA20DB308C15DD505812E6B1
-:10256000D2A0D10FDA2058125263FFA7002B2138D6
-:10257000C0A87BAB026001048C310CFC5064CE041B
-:102580008A122B2104C0C098175810DD8E1763FDE6
-:10259000F32D21382DDCFF0D0D4F2D253865DEF78D
-:1025A00028206A7F87050826416460A3C09016E949
-:1025B000141CE9232A200723E61BB1AA0CFD0226DE
-:1025C000E61A2B200A29E61D2DE61E0CBB022BE67F
-:1025D0001C8B260A0A472BE6208B282AE53E2BE691
-:1025E000212924072820062A2064688346B44463EE
-:1025F000FEA5DB30DA208C158D142E0A80C08E28C3
-:10260000246858111FD2A0D10F2E7C4819E8ED2A5A
-:1026100032162B76129D712D761328761489960A20
-:102620002A14AA990C9902997069ED71C14663FD4B
-:102630008100000064AFB51DE8E22C20168DD20A9F
-:10264000CC0C00D10400CC1AACBC9C2963FF9D00CB
-:102650002B21046EB81E2C2066B8CC0C0C472C2401
-:1026600066C9C09E178A125810A68E17C0348F20D4
-:10267000C0D02D2466C06826240663FF2E8A122B44
-:1026800021042C20669817B1CC0C0C472C246658DA
-:10269000109C8E178716C0D02D246663FCE68D35FE
-:1026A000C08064D04AD9E0DC30DBE0DF301AE8E5F6
-:1026B000B188B4FF16E8E584C92D9DFF87C82CCCEE
-:1026C0001027D63006460127D6320A440117E8DF24
-:1026D00024D631A74727D63324F21596B794B68D62
-:1026E000C3BCBB9DB58D35299C107D83C22F211D98
-:1026F000C14663FD330000006C1006292006289CAB
-:10270000F86582BF2921022B200C09094C6590E154
-:1027100016E8AA0CBA11A6AA2DA2862C0A127DC30D
-:102720000260028C19E8A609B90A2992A3689007E9
-:102730008C2009CC0C65C27829A2856492722D6226
-:102740009E1AE89C6FD80260026E2AA22629160102
-:1027500068A0082B22000ABB0C65B25C29629DC1EF
-:102760008C6492542A21200A806099102C203CC746
-:10277000EF000F3E010B3EB1BD0FDB390BBB098FE4
-:10278000260DBD112DDC1C0D0D410EDD038E27B174
-:10279000DD0D0D410FEE0C0DBB0B2BBC1C0BB7025E
-:1027A0007EC71C2C21257BCB162D1AFC0CBA0C0DD8
-:1027B000A16000093E01073EB1780987390B770A0D
-:1027C00077EB0260020A2C2123282121B1CC0C0CCA
-:1027D0004F2C25237C8B29B0CD2D2523C855DA20FD
-:1027E000DB30581095292102CC96C0E80E9E022EAF
-:1027F0002502CC57DA20DB30DC4058111BC020D139
-:102800000F2C20668931B1CC0C0C472C24666EC687
-:10281000026001D309FD5065D1CD2F0A012E301180
-:1028200029221464E01128221B090C4400C1040071
-:10283000FA1A0A880228261B2E3010C0A0C0B094B5
-:102840001295131CE85F88302CC022088D147787FE
-:1028500004C0F10CFA38C041C0F225203CC0840805
-:1028600058010F5F010F4B3805354007BB10C0F012
-:10287000084F3808FF100FBB0228ECFEC0F0084FCD
-:1028800038842B0BA8100AFF102A21200F88020B76
-:10289000880208440218E86E8F1108440228212596
-:1028A0000A2A140828140488110A88022A21049488
-:1028B000F08B2004E41008BB1104BB02C04A04BB27
-:1028C000029BF1842A08AB110BEB0294F40A541119
-:1028D0000B44020555100D1B4094F707BB100B5518
-:1028E00002085502C08195F68433C05094F3B19428
-:1028F0008B3295F898F99BF2C080C1BC24261499BC
-:10290000FA9BF598FB853895FC843A94FD8B3B9BAC
-:10291000FE883998FF853525F6108436851324F610
-:10292000118B3784122BF612C0B064C07E893077C9
-:1029300097438D3288332E30108F111CE83109995E
-:10294000400699112CF614C0C42CF6158C2B2DF6CC
-:102950001A28F61B2BF61904A81109880208EE02A2
-:1029600019E827C18008EE0209C90229F6162EF6D9
-:1029700018C09E600001C09A2F200C18E8170CFEAA
-:1029800011A8FFA6EE2DE2852BF4CF0D9D0B2DE6B1
-:1029900085C87F8A268929A7AA9A260A990C090937
-:1029A00048292525655050C020D10F00C09A63FFEB
-:1029B000C6DA2058113F63FE38DA20C0B658113C01
-:1029C00063FE2E0068973C2B9CFD64BE24C020D182
-:1029D0000FDA20DB705810F8C0C0C0D10ADA390A0B
-:1029E000DC3865CDE063FE098A102B2104580FC442
-:1029F000C0B02B246663FE21DB402A2C745809A248
-:102A0000D2A0D10FDA20580FC963FCF76C1004C0B4
-:102A100020D10F006C1004290A801EE80E1FE80E5A
-:102A20001CE7E60C2B11ACBB2C2CFC2DB2850FCC7B
-:102A3000029ED19CD0C051C07013E80A14E8091856
-:102A4000E8072AB285A82804240A234691A986B853
-:102A5000AA2AB685A98827849F25649FD10F0000E4
-:102A60006C100AD630283010292006288CF9648290
-:102A70009B68980B2A9CF965A1B2022A02580FABF9
-:102A800089371BE7CFC89164520E2A21020A0C4CE9
-:102A900065C2588D3019E7C874D7052E212365E229
-:102AA0009E2F929E1AE7C46FF8026002532AA22654
-:102AB00068A0082C22000ACC0C65C2442A929D64AE
-:102AC000A23E9A151FE7BE8D67C1E6C8DD2B6206E0
-:102AD00018E7BC64B0052880217B8B432B200C18A1
-:102AE000E7B60CBC11A8CC29C28679EB460FBE0A0A
-:102AF0002EE2A368E0052F22007EF9372CC2859CC8
-:102B00001864C2332B212F87660B7B360B790C6F31
-:102B10009D266ED2462C203D7BC740CE5560001EC0
-:102B20002A200CC1B28C205811229A1864A2458D1B
-:102B30006763FFCFC0C063FFC5D7B063FFD300C0DA
-:102B4000E06000022E60030EDB0C6EB20EDC700C37
-:102B5000EA11AA6A2AAC20580199D7A0DA20DB70C2
-:102B6000C1C82D21205810BC8C268B279A160CBB6F
-:102B70000C7AB3348F18896399F3886298F28E6562
-:102B80009EF82D60108A189D1768D729C0D09DA97E
-:102B90002C22182B22139CAB9BAA97A58E667E73C2
-:102BA00002600097CF5860001FDA208B1658108201
-:102BB00065A13863FFBDC081C0908F18C0A29AF98B
-:102BC00099FB98FA97F563FFD2DB30DA20DC4058A6
-:102BD0001026C051D6A0C0C02BA0102CA4039B1758
-:102BE0002C1208022A02066B02DF702D60038E177A
-:102BF0009D149E100CDD11C0E0AD6D2DDC20580140
-:102C0000188C148B16ACAC2C64038A268929ABAAC9
-:102C10000A990C9A26886609094829252507880CEF
-:102C200098662F2218A7FF2F261863FE96DA20DB5E
-:102C300030DC40DD50581130D2A0D10FC0302C20F4
-:102C4000668961B1CC0C0C472C24666EC60260000C
-:102C5000D2C03009FD5065D0CA8E6764E0696470E7
-:102C600066DB608C18DF70DA202D60038E170CDDB8
-:102C7000119E10AD6D2DDC201EE7755800F923263E
-:102C800018DA208B16DC402F2213DD50B1FF2F26DF
-:102C900013580FC5D2A0D10F0028203D0848406529
-:102CA0008DE76F953EDA308DB56D990C8CA80C8C44
-:102CB00014CACF7CD32D2AAC10C090292467090DEB
-:102CC0004764DDC5600092002C1208066B022D6C73
-:102CD00020077F028E17DA209E101EE75C58007DC9
-:102CE00063FF9A00C09163FFD1000000655081DA54
-:102CF00020DB60DC40580FDCC020C0F02FA403D1E3
-:102D00000FDA20C0B658106A63FFE000006F95022A
-:102D100063FD6CDA20DB30DC40DD50C4E0580F5836
-:102D2000D2A0D10F8A152B2104580EF52324662832
-:102D30006010981763FF2100DA2058105D63FFAB25
-:102D4000C858DB30DA20580F3C2A210265AF9CC0FE
-:102D50009409A90229250263FF91DB30DC40DD5094
-:102D6000C0A32E0A802A2468DA20580F45D2A0D1A9
-:102D70000FC020D10FDA202B200C58107263FF6B8C
-:102D80006C1004282006C062288CF8658125C0508C
-:102D9000C7DF2B221BC0E12A206B29212300A104BD
-:102DA000B099292523B1AA00EC1A0BC4010A0A44E0
-:102DB0002A246B04E4390DCC030CBB012B261B64C5
-:102DC000406929200C1BE6FC0C9A110BAA082FA2C3
-:102DD000861BE6FA6FF9026000B60B9B0A2BB2A3C2
-:102DE00068B0082C22000BCC0C65C0A42BA2851D5A
-:102DF000E71E64B09B8C2B2421040DCC029CB08870
-:102E000020C0C50888110C880298B1882A0844118E
-:102E100098B48F3494B79FB5C0401EE6EF2DA285BD
-:102E20000E9E0825E4CF2DDC282DA6852921020938
-:102E3000094C68941A689820C9402A210265A00BA1
-:102E40002A221E2B221D7AB10265A079C020D10F43
-:102E50002C212365CFDE6000082E21212D21237E29
-:102E6000DBD52B221E2F221D2525027BF901C0B0A8
-:102E700064BFC413E6D02CB00728B000DA20038862
-:102E80000A28824CC0D10B8000DBA065AFE763FF4E
-:102E9000A62A2C74C0B02C0A02580E2F1CE6F49CF3
-:102EA000A08B2008BB1106BB029BA1893499A263A9
-:102EB000FF790000262468DA20DB30DC40DD505842
-:102EC000108ED2A0D10FDA202B200C580FF9C02081
-:102ED000D10F00006C1006073D14C080DC30DB40D1
-:102EE000DA20C047C02123BC3003283808084277C5
-:102EF0004001B1DD64815A1EE6AC19E6AD29E67EDB
-:102F0000D30F6DDA0500508800308CC0E0C020255A
-:102F1000A03C14E6ABB6D38FC0C0D00F87142440BA
-:102F2000220F8940941077F704C081048238C0F1E1
-:102F30000B2810C044C02204540104FD3802520181
-:102F400002FE3808DD10821C07EE100E6E020EDD48
-:102F500002242CFEC0E004FE380AEE100E88020D9A
-:102F600088028DAB1EE69B08D8020E880298B0C07E
-:102F7000E80428100E5E0184A025A125084411084C
-:102F80004402052514045511043402C0810E8E3903
-:102F900094B18FAA84109FB475660C26A11FC0F24D
-:102FA000062614600009000026A120C0F20626149F
-:102FB0000565020F770107873905E61007781008C5
-:102FC000660206550295B625A1040AE611085811B5
-:102FD00008280208660296B7C060644056649053A1
-:102FE000067E11C0F489C288C30B340B96459847FE
-:102FF000994618E6829F410459110E99021FE680F6
-:10300000020E4708D80298420E99029F40C1E00E76
-:10301000990299442FA00CB4380CF91114E66F1ED4
-:10302000E666A4FFAE992E928526F4CF0E880B2873
-:103030009685D10F2BA00C1FE6601CE6670CBE1115
-:10304000ACBBAFEE2DE28526B4CF0D3D0B2DE68552
-:10305000D10FC08005283878480263FEA263FE962F
-:103060006C1006C0C06570F18830C03008871477D6
-:103070008712C0B0C0A619E652299022C030CC9762
-:10308000C031600003C0B0C0A6C0E0C091C0D4C0D1
-:103090008225203C0B3F109712831CC070085801FA
-:1030A0000D5D01089738C0800B98380777100488A9
-:1030B00010086802087702C0800D98382D3CFE0881
-:1030C00088100D9E388D2B0AEE1008EE0207EE02D6
-:1030D0000CB8100FDD02053B400EDD029D4089203B
-:1030E000043D100899110D99022D210409A9020827
-:1030F000DD119941872A05B9100D3D020ABB110D5A
-:10310000BB02087702974428212587120828140457
-:103110008811071E4007EE100E99027566092621D8
-:103120001F062614600006002621200626140868C3
-:10313000029B47098802984629200CD2C0C0800C07
-:103140009E111BE6251FE61CAB99AFEE2DE28528EC
-:1031500094CF0DAD0B2DE685D10FDD40C0A6C0B0DC
-:103160008E51CAE0B2AAB1BB2DDC108F500E78365A
-:10317000981008770C9FD898D989538F5299119934
-:10318000DB9FDA7E8309B1CC255C10C97763FFCF62
-:1031900088108D1108E70C9751AD8DD7F078DB01C1
-:1031A000B1F79D5397528830C03008871408884083
-:1031B000648ED565BEC963FEBC0000006C1004D7E8
-:1031C00020B03A8820C0308221CAA0742B1E2972F8
-:1031D000046D080FC980C9918575B133A2527A3B3D
-:1031E0000B742B0863FFE900649FECD10FD240D130
-:1031F0000F0000006C100AD6302E3027D950DA406C
-:1032000015E5F02430269A1529160464E00264932B
-:10321000732920062A9CF865A3CE2A2102270A04D6
-:103220000A0B4C65B3978C3074C7052D212365D4E8
-:10323000A0C0A62B0A032C2200580F3664A3B9178E
-:10324000E5DE8E389A1664E3BA2F6027285021C92C
-:10325000F37E8311C2B08C202A200C580F55D7A0C2
-:10326000CDA16004A200C2B08C202A200C580F29E6
-:10327000D7A064A4862F212E8B680FBF360FB90C00
-:103280006F9D54296027D5B06E920528203D7B8F15
-:103290004CDA20DB50C1C42D211F580EEF8B269A2B
-:1032A000189A1989272AAC380B990C7A9353896399
-:1032B000C08099738F6298789F728E659E798D67B2
-:1032C0009D7B8C6695759C7A8E687E53026000B1FA
-:1032D0008B1465B050600038DBF063FFA5008A14E2
-:1032E000C9A92E60030E9B0C6EB2A5DC500CEA112E
-:1032F000AA6A2AAC285BFFB1D5A063FF93C0E06344
-:10330000FFE2DA208B18580EAC65A2B163FF9E0075
-:1033100000DA20DB308C15580E54D6A0C0C0C0D1C6
-:103320002D16042CA403DC70DA20DB60DF502D6046
-:1033300003C0E09E109D171EE5B90CDD110D6D0850
-:103340002DDC285BFF478E668F678817AF5FA8A8C4
-:1033500028640375FB01B1EE8A189E669F67892673
-:103360008829AA9909880C99268E6808084805EECC
-:103370000C28252515E5939E6865EECC63FEE600D6
-:103380000000C9432F21232B21212FFC010F0F4FB8
-:103390002F25237FBB026003142C20668961B1CCEA
-:1033A0000C0C472C24666EC60260022809FD50658D
-:1033B000D22264E1B62E602764E1B0DC70DF50DA1F
-:1033C00020DB601EE5AB2D6003C08098100CDD1182
-:1033D000AD6D2DDC285BFF22644181C0442B0A00C7
-:1033E0008C202A200C580ECB0AA70265A00FC0B073
-:1033F0002C22002A200C580EC7D7A064AFEFDA2089
-:10340000C1BCC1C82D21208F188E268929AFEE9E00
-:10341000260E990C090948292525580E8FC090C001
-:1034200050C0C288609A191EE566C0A12EE022082D
-:103430008F14778704C0810E8938C0800B93102DBC
-:10344000203C2921200CDC0104DB010929140BA8F4
-:10345000380CA5380D3D401CE57E8B2B08881007E5
-:1034600055100855020533022821250F154003BBCE
-:10347000020CBB0207551005D3100828140ADD11F1
-:103480000488110988020533022921040833029BAC
-:1034900070C0808A201BE57708AA110BAA029A71D6
-:1034A000C0A1852A9376957408931103DD020ADD85
-:1034B000029D778C63C1DC9C738B6298789A799BB0
-:1034C00072232214C0C0B1352526149C7B9D7593B0
-:1034D0007A2B621A9B7C2A621C9A7D28621D987E38
-:1034E00025621B957F2362172376102D62182D7697
-:1034F000112C62192C761264E0B98E6077E73DC01A
-:10350000FE13E53E1DE53FC1818A628B6304951180
-:103510000E9C4006CC110C5502247615085502C0AD
-:10352000802D76148D2B2B761B2A761A287619255A
-:10353000761803DD022D76166000030000C0FA2E17
-:10354000200C19E52518E51CA9E90CEE11A8EEC020
-:10355000802DE2852894CF0DFD0B2DE685DA208B9A
-:10356000198C158D14580D90D2A0D10FDC70DF503E
-:10357000DB602D6C28C0A01EE53E9A10DA205BFEB1
-:103580005563FE53002B203D0B4B4065BC826FE51D
-:1035900027DA308F556DE90C8EAA0E8E14C9E87E9D
-:1035A000F3162AAC10C090292467090F4764FC6009
-:1035B00060015F00C0FA63FF85C09163FFE8881473
-:1035C000658168DA20DB608C15580DA7C020C0909B
-:1035D00029A403D10F8A162B2104580CC9C0A02A94
-:1035E00024668E6863FDCA00002B9CF965B0FDDA85
-:1035F00020580CCE63FC220000DA20C0B6580E2CF6
-:1036000063FFBA002B200C0CBE11A7EE2DE286C181
-:10361000C27DC30260011819E4E909B90A2992A31D
-:103620006890082A220009AA0C65A10326E2856495
-:1036300060FD2C20668931B1CC0C0C472C24666FC0
-:10364000C60270960C8A162B2104580CADC0D02DE2
-:1036500024668E3077E74D1CE4E91BE4E98F32885D
-:1036600033C0A42D21040E994006991104DD1109DF
-:10367000DD029A61C19009DD029B60C0908B2B9D99
-:10368000649F66986799650CBB029B6228200C1AA0
-:10369000E4D2AA8A0C8811A7882F828529A4CF2F6B
-:1036A000FC202F86858A1465A0A6C020D10FB0FC0F
-:1036B0008B142C2523C8B7022A02066B02580CDE95
-:1036C0002A210265AEF7C0D80DAD022D250263FE9A
-:1036D000EC008E14C8E8DA20DB30580CD72A21021F
-:1036E00065AEDA07AF022F250263FED100DA20DBD8
-:1036F000308C158D14580E80D2A0D10FDA202B20DB
-:103700000C580DEB63FEB600DA202B200C580E0D82
-:1037100063FEAADA20DB308C152D12042E0A8028D5
-:103720000A00282468580CD663FAE500C020D10F9F
-:10373000DA20580DDF8914CD92DA20DB308C155851
-:103740000D4ADBA0C020C0A02AB403D10FC020D1F5
-:103750000F2A2C748B1558064CD2A0D10F000000F4
-:103760006C100E28210224160108084C6583A91F3D
-:10377000E49229F29E6F98026003AD1EE48E29E266
-:10378000266890082A220009AA0C65A39B24F29DB2
-:103790006443952A31160A4B412B240BB4BB0B0B07
-:1037A000472B240C0CB611AF66286286C1CC78C3B7
-:1037B0000260037F19E48209B90A2992A36890077D
-:1037C0008C2009CC0C65C36B276285647365293135
-:1037D00009C0D02D24668C3599139C2A88369C14F8
-:1037E000982B8E3798159E169E2C8C38C0E10C5C59
-:1037F000149C179C2D88392925042E251D28251C4D
-:103800002C3028C0822C243C2930290C0C4708C8B5
-:103810000129243D29311598189912090841089960
-:103820000C299CEC29251F7EC725921C8212282A70
-:1038300000082060991B01023E00093EB128098260
-:1038400039891B0E221102990C821C29251F821C0A
-:10385000941D951E24211F15E4880451609A10C1FF
-:10386000802B1610252014961F05054301063E00E7
-:103870000D3EB16B0DB6398B3C2D9CFC08663606AF
-:10388000441C893D2E26132E26142E26152E246B1D
-:1038900025241406D61CC05025261825261B2524B1
-:1038A000672524682832112525232525242525254B
-:1038B00025252C2925222D25202B252124252E26A2
-:1038C000252F14E46F16E46D1BE45298192D211C6A
-:1038D000C08498719B70892095759577957F967CAB
-:1038E000967E98799B7894731BE46714E4680C388F
-:1038F000400288100C064015E464016610947D9B1C
-:1039000074841D1BE444086602957B18E431851E0F
-:103910000B99029972997A0866022B121096768694
-:103920001F6FD2026001C8C0A0991A6D080AB1AA1F
-:1039300000A10400E81A7D8B0263FFEE891AC0E043
-:10394000961F1DE43E2B1610951E941D28203D2920
-:10395000761A297612C040C051C0B22D76130806DF
-:10396000408D170B8801065E380AEE101BE44A08EA
-:103970005438B0A609661188140B44102B761B042A
-:10398000EE028B1614E44308DA1406EE020D8810DA
-:103990002A761E86131AE41C04EE020D66110866D0
-:1039A000022E76160D14141EE41A0D44110BD814B1
-:1039B0000866020A44022E76182E76102476172600
-:1039C000761FC084287619287611C76F0C24400F03
-:1039D00044111CE3FB26761D26761C2676152676DA
-:1039E000148A262676242676252976222E762028E5
-:1039F00076218E1888150DB91016E4278BC70D880F
-:103A0000110E5E39ADBB851904EE022676230988B6
-:103A100002861F89102876260A04480544110505E8
-:103A2000480E551105440204EE02851E841D2E76B3
-:103A3000272820069B2D29246A2E31172B12102EA1
-:103A40002538CC83C0D02D2407C0D7090840648016
-:103A50008E9A290928416480AA64E0B42D2406C006
-:103A60009809E9362D0AA02A628501C404ADAA2D61
-:103A700021042A668508DD11883F8E3E2732100812
-:103A8000EA1800C40408E8180088110ECE5308771D
-:103A900002C08308DD029D4118E401090D4E9840E3
-:103AA00088209A4397449D4517E3FE1DE3CB058884
-:103AB0001108EE02ADBDC08007EE029E4228D4CFB1
-:103AC0002AF29D87CA2AAC18B1772AF69D1AE3B963
-:103AD00097CA28A4A268711C655060C020D10F004D
-:103AE0002D2406C080C09809E9360E893863FF731B
-:103AF000C0A063FE481BE3CB1AE3EB2AB68963FF41
-:103B0000D600000065EF54C098C0D82D240663FF8E
-:103B1000522D2406C09063FF4ACC57DA20DB308C4C
-:103B200011580C51C020D10F00DA20C0B6580CE05B
-:103B300063FFE500DA20580CDE63FFDC2A2C748B6F
-:103B400011580551D2A0D10F6C10062820068A33D7
-:103B50006F8202600161C05013E39729210216E3CE
-:103B600096699204252502D9502C20159A2814E331
-:103B7000948F2627200B0AFE0C0477092B712064F2
-:103B8000E1398E428D436FBC0260016F00E104B0E9
-:103B9000C800881A08A80808D80298272B200668A9
-:103BA000B32ECE972B221E2C221D0111027BC901A0
-:103BB000C0B064B0172CB00728B000DA2003880A20
-:103BC00028824CC0D10B8000DBA065AFE7C020D1BC
-:103BD0000F2D206464DFCA8B29C0F10BAB0C66BFCC
-:103BE000C02B200C0CBC11A6CC28C2862E0A08784B
-:103BF000EB611EE3720EBE0A2EE2A368E0052822E6
-:103C0000007E894F29C2851EE37E6490461FE38CA7
-:103C10009E90C084989128200A95930F88029892CC
-:103C20008E200FEE029E942F200788262F950A984B
-:103C3000969A972E200625240768E3432921022A15
-:103C4000C2851DE3652AAC20ADBD25D4CF2AC6852B
-:103C500063FF4E002E2065CBEDC082282465C9F697
-:103C600005E4310002002A62821BE36D2941020B48
-:103C7000AA022A668209E43129210263FF23000097
-:103C800064DFB88F422E201600F1040DEE0C00EE1A
-:103C90001AAEAE9E2963FFA38A202B3221B1AA9AC5
-:103CA000B0293221283223B4992936217989A92BC8
-:103CB00032222B362163FFA0C020D10F9F2725245D
-:103CC00015ACB82875202B2006C0C12EBCFE64E0C0
-:103CD000AB68B7772DBCFD65DEC72D2064C0F064EE
-:103CE000D0868E290EAE0C66E089C0F128205A28B5
-:103CF0008CFE08CF3865FEE863FF580000E00493AF
-:103D000010C0810AF30C038339C78F08D80308A8B1
-:103D10000108F80C080819A83303C80CA8B82875BE
-:103D200020030B472B24158310CBB700E104B0BC54
-:103D300000CC1AACAC0CDC029C27659E5EC0B20BBA
-:103D4000990209094F29250263FE50002D206A0DB2
-:103D50002D4165DF7EDA20C0B0580CA864AF18C0D2
-:103D6000F163FEEF9F2763FFD02E221F65EE3263C3
-:103D7000FF79000028221F658E2763FF6E25240629
-:103D800029210263FE1B00006C10066571332B4C69
-:103D900018C0C7293C18C0A1C08009A8380808422B
-:103DA0006481101CE3011AE3022AC67E2A5CFDD35B
-:103DB0000F6DAA0500B08800908C8940C0A00988CA
-:103DC000471FE32B080B47094C50090D5304DD1026
-:103DD000B4CC04CC100D5D029D310CBB029B30882D
-:103DE000438E2098350FEE029E328D26D850A6DDE8
-:103DF0009D268E40C0900E5E5064E0971CE3111E1D
-:103E0000E300038B0BC0F49FB19EB02D200A99B341
-:103E10000CDD029DB28F200CFF029FB48E262D2058
-:103E2000079EB68C282DB50A9CB72924072F20069B
-:103E30002B206469F339CBB61DE2E22320168DD224
-:103E40000B330C00D10400331AB48DA3C393292281
-:103E5000200C13E2E11FE2D80C2E11AFEEA32229B1
-:103E600024CF2FE285D2A00FDD0B2DE685D10F00E8
-:103E70002E200CB48C0CEB111FE2D81DE2CFAFEE5C
-:103E8000ADBB22B28529E4CF02C20B22B685D2A0F7
-:103E9000D10F00002E200C1CE2C81FE2CF0CEB114A
-:103EA000AFEEACBB22B28529E4CF02820B22B685ED
-:103EB000D2A0D10FC0D00BAD387DC80263FEEC6339
-:103EC000FEE08E40272C747BEE12DA70C0B32C3CDF
-:103ED00018DD50580A9B8940C08063FEE3066E02DD
-:103EE000022A02DB30DC40DD505800049A10DB501F
-:103EF000DA70580465881063FEF700006C100692B3
-:103F0000121EE2B98C40AE2D0C8C472E3C1804CA10
-:103F10000BD9A07DA30229ADF875C302600084C04F
-:103F2000B0C023C0A09D106D0844B89F0EB80A8D84
-:103F3000900EB70BB8770D6D36ADAA9D800D660C4F
-:103F4000D8F000808800708C879068B124B2227706
-:103F5000D3278891C0D0CB879890279C1000708879
-:103F600000F08C9D91CB6FC08108BB0375CB36638D
-:103F7000FFB4B1222EEC1863FFD485920D770C8626
-:103F8000939790A6D67D6B01B1559693959260005C
-:103F900016B3CC2D9C188810D9D078D3C729DDF85A
-:103FA00063FFC100C0238A421BE2C000CD322D4412
-:103FB000029B3092318942854379A1051EE2BC0EF5
-:103FC000550187121BE2AB897095350B9902993226
-:103FD00088420A880C98428676A6A696768F44AFC9
-:103FE000AF9F44D10F0000006C10089311D63088A9
-:103FF00030C0910863510808470598389812282165
-:1040000002293CFD08084C6581656591628A630A56
-:104010002B5065B18B0A6F142E0AFF7CA60A2C2048
-:104020005ACCC42D0A022D245A7FE0026002158961
-:104030002888261FE29F09880C65820F2E200B0F0F
-:10404000EE0B2DE0FE2EE0FF08DD110EDD021EE27C
-:1040500099AEDD1EE2991CE2990EDD010DCC37C14F
-:1040600080084837B88DB488981089601AE2557B6B
-:1040700096218B622AA0219C147BA3179D132A20D2
-:104080000C8B108C20580BCA8C148D13DBA0CEAC7B
-:104090006001C4002E200C1BE2480CEA110BAA0898
-:1040A0002BA2861FE2467BDB3B0FEF0A2FF2A368B1
-:1040B000F0052822007F892C2BA28564B0AA876294
-:1040C0008826DE700C7936097A0C6FAD1C8F279B21
-:1040D0001508FF0C77F3197E7B729D139C149B15BA
-:1040E000CF56600025C0B063FFD0D79063FFDD00DE
-:1040F000009D139C14DA20DB70580B2F8B158C1449
-:104100008D1365A06A8E6263FFCC00DA208B11DC10
-:1041100040580AD5D6A08B15C051DE70DA20DC607D
-:10412000DD405BFF768D138C14D9A02E200C1BE292
-:10413000221FE2290CEA11AFEFC0E0ABAA2BA28547
-:104140002EF4CF0B990B29A68563FF1D00DA20DC26
-:1041500060DD40DE708912282007DF50A9882824FE
-:10416000075BFF09D2A0D10F00DBE0DA20580B502B
-:104170006550EF2A20140A3A4065A0EBDB60DC4072
-:10418000DD30022A025809BCD6A064A0D584A183E0
-:10419000A00404470305479512036351C05163FE11
-:1041A0005C2C2006D30F28CCFD6480A568C704C012
-:1041B000932924062C2006C0B18D641FE2019D279F
-:1041C0009D289D298FF29D2600F10400BB1A00F066
-:1041D00004B0BE0EDD01C0F0ADBB8D652F24070D10
-:1041E0000E5E01EE11AEBB2E0AFEB0BB0B0B190E1C
-:1041F000BB36C0E20B0B470EBB372B241618E1F978
-:104200000A09450D0B422B240B29240AB4BE2E2487
-:104210000C7D88572920162FCCFDB09D0A5C520DCD
-:10422000CC362C246465FDEC0C0C4764CDE618E11B
-:10423000E48E2888820C9F0C00810400FF1AAFEEE8
-:104240009E2963FDCF1CE21163FE13001CE20B6389
-:10425000FE0C8D6563FFA500DA202B200C580B396E
-:10426000645F0FC020D10F00C020D10FC09329245C
-:1042700016C09363FFA000006C1004C06017E1CD6E
-:104280001DE1D0C3812931012A300829240A78A1EF
-:1042900008C3B27BA172D260D10FC0C16550512654
-:1042A00025022AD0202F200B290AFB2B20142E2098
-:1042B0001526241509BB010DFF0928F1202B241414
-:1042C000A8EE2EF52064A0A92B221E28221D011184
-:1042D000027B8901DB6064B0172CB00728B000DADC
-:1042E0002007880A28824CC0D10B8000DBA065AF74
-:1042F000E7DB30DC40DD50DA205800DE29210209FE
-:104300000B4CCAB2D2A0D10F00CC5A2C30087BC1C2
-:10431000372ED02064E02D022A02033B02DC40DD70
-:10432000505800D4D2A0D10F2B2014B0BB2B241492
-:104330000B0F4164F0797CB7CAC0C10C9C022C25DC
-:1043400002D2A0D10FC020D10F2E200669E2C126D3
-:1043500024062B221E2F221D29200B2820150D9903
-:10436000092A9120262415AA882895207BF14960E6
-:104370000048B0BB2B24140B0A4164A0627CB70236
-:104380002C25022B221E2C221DD30F7BC901C0B06D
-:10439000C9B62CB00728B000DA2007880A28824C5A
-:1043A000C0D10B8000DBA065AFE7C020D10F0000BB
-:1043B000262406D2A0D10F0000DB601DE18164BF7E
-:1043C0004F2CB00728B000DA2007880A28824CC09A
-:1043D000D10B8000DBA065AFE71DE17963FF310001
-:1043E00026240663FF9C00006C1004282006260A81
-:1043F000046F856364502A2920147D9724022A02C1
-:10440000DB30DC40DD50580019292102090A4CC874
-:10441000A2C020D10FC0B10B9B022B2502C020D11E
-:104420000F00022A02033B022C0A015800D1C9AA3C
-:10443000DA20DB30DC40580A0C29A011D3A07E978B
-:10444000082C0AFD0C9C012CA411C0512D2014062F
-:10445000DD022D241463FFA4DA20DB30DC40DD50C4
-:10446000C0E0580987D2A0D10F0000006C100616DA
-:10447000E1521CE152655157C0E117E14E2821027B
-:104480002D220008084C6580932B32000B695129BE
-:104490009CFD6590872A629E6EA84C2A722668A0B1
-:1044A000027AD9432A629DCBAD7CBE502B200C0CE6
-:1044B000BD11A6DD28D2862F4C0478FB160CBF0A4E
-:1044C0002FF2A368F0052822007F89072DD285D31B
-:1044D0000F65D0742A210419E17AD30F7A9B2EDA62
-:1044E00020580883600035002D21041BE1757DBB39
-:1044F00024DA20C0B658087ECA546001030B2B5042
-:104500002B240BB4BB0B0B472B240C63FFA0DA202E
-:10451000580A67600006DA20C0B6580A656550E0A0
-:10452000DC40DB302D3200022A020D6D515808D2DA
-:104530001CE123D3A064A0C8C05184A18EA00404B0
-:10454000470E0E4763FF3500002B2104C08B8931D5
-:10455000C070DF7009F950098F386EB8172C2066CB
-:10456000AECC0C0C472C24667CFB099D105808E44B
-:104570008D1027246694D11EE126B8DC9ED06550AC
-:1045800056C0D7B83AC0B1C0F00CBF380F0F42CBFD
-:10459000F119E10518E10728967EB04BD30F6DBAEB
-:1045A0000500A08800C08C2C200CC0201DE10B0C45
-:1045B000CF11A6FF2EF285ADCC27C4CF0E4E0B2E09
-:1045C000F685D10FC0800AB83878D0CD63FFC1001E
-:1045D0008E300E0E4763FEA12A2C742B0A01044D67
-:1045E000025808D72F200C12E0FC0CF911A699A252
-:1045F000FF27F4CF289285D2A008480B289685D1B2
-:104600000FC020D10F0000006C1004C060CB55DB40
-:1046100030DC40055D02022A025BFF942921020979
-:10462000084CC882D2A0D10F2B2014B0BB2B24146D
-:104630000B0C41CBC57DB7EBC0C10C9C022C2502F5
-:10464000D2A0D10F0000022A02033B02066C02C076
-:10465000D0C7F72E201428310126250228240A0F5E
-:10466000EE012E241458010E63FFA300262406D267
-:10467000A0D10F006C1006282102D62008084C6536
-:10468000809D2B200C12E0CC0CB811A2882A8286C7
-:10469000B5497A930260009719E0C909B90A2992CD
-:1046A000A36890082A620009AA0C65A08228828566
-:1046B0001CE0D46480799C80B887B14B9B819B10AF
-:1046C000655074C0A7D970280A01C0D0078D380D75
-:1046D0000D42CBDE1FE0B51EE0B62EF67ED830D3FD
-:1046E0000F6D4A0500808800908C2E3008C0A00015
-:1046F000EE322E740028600C19E0B80C8D11A2DD8A
-:10470000A988C0202CD2852284CFD2A00CBC0B2C2F
-:10471000D685D10FC0F0038F387FA0C063FFB400EF
-:10472000CC582A6C74DB30DC4058080BC020D10F09
-:10473000DA605809DF63FFE7DD402A6C74C0B0DC43
-:104740007058087F2E30088B1000EE322E7400282F
-:10475000600C19E0A10C8D11A2DDA988C0202CD21B
-:10476000852284CFD2A00CBC0B2CD685D10F0000A3
-:104770006C1004292014282006B19929241468817A
-:1047800024C0AF2C0A012B21022C24067BA004C0DC
-:10479000D02D2502022A02033B02044C02C0D0584D
-:1047A00000C0D2A0D10FC020D10F00006C1004298E
-:1047B0003101C2B429240A2A3011C28378A16C7B4A
-:1047C000A1696450472C2006C0686FC562CA572D86
-:1047D00020147CD722DA20DB30DC40DD505BFFA5E3
-:1047E000292102090E4CC8E2C020D10FC0F10F9F51
-:1047F000022F2502C020D10FDA20DB30C0C05BFFC2
-:10480000DC28201406880228241463FFC7292015F9
-:104810001BE06C2A200BC0C09C240BAA092BA120F2
-:104820002C2415AB9929A52063FF9900C020D10F36
-:10483000DA20DB30DC40DD50C0E0580891D2A0D156
-:104840000F0000006C1004CB5513E06725221F0DEC
-:10485000461106550CA32326221E25261F06440BAF
-:1048600024261E734B1DC852D240D10F280A80C087
-:104870004024261FA82828261E28261DD240D10FF6
-:10488000C020D10F244DF824261E63FFD80000005D
-:104890006C1004D620282006C0706E85026000D4FB
-:1048A0001DE04E19E04612E0442A8CFC64A1302B36
-:1048B0006102B44C0B0B4C65B0A22B600C8A600CEF
-:1048C000B8110288082E828609B90A7EC3026000E8
-:1048D0009A2992A368900509AA0C65A08E28828562
-:1048E000648088B8891BE04A94819B80655155C0DB
-:1048F000B7B8382A0A01C0C009AC380C0C4264C0F1
-:10490000421FE0291EE02B2EF67EB04AD30F6DAA7F
-:104910000500808800908CC0A029600C0C9C11A21E
-:10492000CC2BC285AD990B4B0B2BC6852860062777
-:1049300094CF6881222D6015D2A0C9D2C0E22E6426
-:1049400006D10F00C0F008AF387FB0BD63FFB100E3
-:10495000276406D2A0D10F00D2A0D10F00CC57DA25
-:1049600060DB30DC405808C0C020D10FDA60580945
-:104970005063FFE80028221E29221DD30F789901D9
-:10498000C080C1D6C1C11BE018C122AB6B6480429C
-:1049900078913F2A80000CAE0C64E0BB02AF0C643F
-:1049A000F0B52EACEC64E0AF0DAF0C64F0A92EAC0A
-:1049B000E864E0A32FACE764F09D2EACE664E097DA
-:1049C0002F800708F80BDA807B83022A8DF8D8A0A5
-:1049D00065AFBC28612308D739D97060007B00001F
-:1049E0002B600C0CB811A2882C82862A0A087CAB9A
-:1049F0007E09BA0A2AA2A368A0052C62007AC96FB0
-:104A00002A828564A0691FDFFE276504C0E3C0C455
-:104A10002E64069CA11CE02B9FA02E600A97A30C7D
-:104A2000EE029EA28F600CFF029FA42E60147AEF0C
-:104A30004627A417ADBC2F828527C4CF2FFC202F7B
-:104A4000868563FE692A6C74C0B1DC90DD4058072E
-:104A5000BC1DDFE163FEC100D9A0DA60DB30C2D04B
-:104A6000C1E0DC4009DE39DD50580805D2A0D10F85
-:104A7000DA6058090F63FEE4290A0129A4170DBF63
-:104A8000082E828527F4CF2EEC202E868564500BCD
-:104A90002A6C74DB4058017CD2A0D10FC020D10F0A
-:104AA0006C10062B221E28221D93107B8901C0B09A
-:104AB000C0C9C03BC1F20406401DDFCBC0E2C074D8
-:104AC0000747010E4E01AD2D9E11C0402E0A146401
-:104AD000B06E6D084428221D7B81652AB0007EA13E
-:104AE0003B7FA1477B51207CA14968A91768AA1484
-:104AF00073A111C09F79A10CC18B78A107C1AE2908
-:104B00000A1E29B4007CA12B2AB0070BAB0BDAB02C
-:104B10007DB3022ABDF8DBA0CAA563FFB428B0109C
-:104B200089116987BB649FB863FFDC00647FB4634D
-:104B3000FFD50000646FD0C041C1AE2AB40063FF4E
-:104B4000C62B2102CEBE2A221D2B221E7AB12A8C10
-:104B5000107CB1217AB901C0B0C9B913DF96DA204F
-:104B600028B0002CB00703880A28824CC0D10B80E3
-:104B700000DBA065AFE7D240D10F8910659FD463F9
-:104B8000FFF300006C1008C0D0C8598C30292102F6
-:104B90000C0C4760000C8E300E1E5065E19E2921E2
-:104BA00002C0C116DF85090B4C65B0908A300A6ED1
-:104BB0005168E3026000852F629E1BDF7E6EF85312
-:104BC0002BB22668B0052E22007BE94727629DB7ED
-:104BD00048CB7F97102B200CB04E0CBF11A6FF299D
-:104BE000F2869E12798B4117DF7507B70A2772A3E9
-:104BF000687004882077893029F285DF90D7906526
-:104C000090652A210419DFAE7A9B22DA205806B873
-:104C1000600029002C21041BDFAA7CBB18DA20C00D
-:104C2000B65806B3C95860014CC09063FFCCDA2077
-:104C300058089F600006DA20C0B658089D655135B7
-:104C4000DC40DB308D30DA200D6D5158070BC0D0C1
-:104C5000D3A064A120292102C05184A18CA0040406
-:104C6000470C0C4763FF3E00C09B8831DBD008F83F
-:104C700050089B3828210498116E8823282066ACA0
-:104C80008C0C0C472C24667CBB159F139E148A1039
-:104C90008B1158071B8E148F13C0D02D24668A30B9
-:104CA000C092C1C81BDF5B7FA6099BF099F12CF471
-:104CB0000827FC106550A4B83ADF70C051C08007C7
-:104CC000583808084264806718DF3819DF392986A8
-:104CD0007E6A420AD30F6DE90500A08800F08CC0FF
-:104CE000A08930B4E37F9628C0F207E90B2C940822
-:104CF0009B909F912F200C12DF380CF811A6882969
-:104D00008285A2FF2DF4CFD2A009330B238685D153
-:104D10000F22200C891218DF300C2B11A6BBA82201
-:104D20002D24CF2CB285D2A00C990B29B685D10F9A
-:104D3000C087C0900A593879809663FF8ADB30DAE1
-:104D400020C0C1C0D05BFF56292102C0D02A9CFEE2
-:104D500065AE4D2D2502C09063FE45009E142A2CA1
-:104D600074C0B1DC70DD405806F68E14C0D01BDF75
-:104D700028C1C863FF6AC020D10F00006C1006284C
-:104D8000210217DF0D08084C65824929729E6F9831
-:104D90000260025019DF082A922668A0078B200AB9
-:104DA000BB0C65B23F2A729DC0CB64A2371DDF04E5
-:104DB000C0602B3008C0F164B0712E0AFFB0B86437
-:104DC00081512DBCFE64D0F364505C2A2C74044BDA
-:104DD000025800AD0AA2020600000000001ADF0817
-:104DE0002C20076EBB0260022218DEFE13DF081BB8
-:104DF000DF36C0E229200A9AD09ED1ABCB039902BC
-:104E000099D223B08026B480B13308330293D318EB
-:104E1000DEF20CFD11A7DD2CD285A8F82684CF0C7C
-:104E2000EC0B2CD685655FA2C020D10F2B21048806
-:104E300031DE6008F85008CE386EB8102C2066B10C
-:104E4000CC0C0C472C24667CEB026001AF2E30109A
-:104E50002930112C301300993200CB3264E1452AFD
-:104E600030141EDF1A00AA3278CF050E9C092BC41D
-:104E70007F1CDF1766A0050E98092A8480B4A71846
-:104E8000DF15C76F009104AC9CDBC000AE1A00F3C5
-:104E90001A6EC1048BD00BCB0C1CDF0F08B81C069C
-:104EA0003303AC882A848B2CD03627848C03CC0126
-:104EB0000ECC022CD4365801AD63FF0B2F200C0C06
-:104EC000FB11A7BB2DB286C0987D9302600121190A
-:104ED000DEBB09F90A2992A36890082D220009DD9A
-:104EE0000C65D10C2DB285DE6064D10488312B2194
-:104EF0000408F85008CE386FB80263FEDF2C206635
-:104F0000B1CC0C0C472C24667CE30263FECE9D10D2
-:104F100060013100293108292504283014B0886443
-:104F200080A62B31092B240AC0812B30162FD423C5
-:104F30002B240BB4BC2C240C8D378B36292504DE96
-:104F4000D00D8E39DCB00B8C390ECC0264CE7808D3
-:104F50009C1101C4048F380DBE1800C4040DB8188C
-:104F600000881108FF02C08308CC0218DECC9CA187
-:104F700098A018DECB8C209EA39FA405CC110BCF4C
-:104F800053C1E09EA50CFF0208FF029FA218DE8914
-:104F90002624662C729D2684A22CCC182C769D6328
-:104FA000FE250000002D30121CDECD00DA3278DF45
-:104FB000050C9E0B2AE47F66B0050C9F0B2BF4803A
-:104FC0002A301100AA3263FEEC2E240A2B31099BF1
-:104FD0002B63FF5300CC57DA20DB30DC405807222C
-:104FE000C020D10F00DA20C0B65807B163FFE5003A
-:104FF00000DBF0DA205807AE63FFD9000058064006
-:105000001DDE70C0F126246663FE41008B20280A55
-:10501000FFB1CE23200A2C21040E0E472E24077840
-:1050200031359AD02CD50A96D319DEA62ED416C0C7
-:105030008398D1C0E309B80298D409390299D226DD
-:10504000240763FDC958062E8D102624662B2104E3
-:105050002F200C63FD86000008B81119DE6808EEE9
-:1050600002882B9ED59AD0C0EF09880298D204C935
-:10507000110E990299D4C0E49ED163FFC1000000D3
-:105080006C1004C020D10F006C100485210D381164
-:1050900014DE478622A42408660C962205330B935F
-:1050A00021743B13C862D230D10FC030BC29992182
-:1050B00099209322D230D10F233DF8932163FFE34F
-:1050C0006C100AD620941817DE3CD930B8389819DD
-:1050D0009914655256C0E1D2E02E61021DDE390EF0
-:1050E0000E4C65E1628F308E190F6F512FFCFD65FC
-:1050F000F1558EE129D0230E8F5077E66B8F181E65
-:10510000DE78B0FF0FF4110F1F146590CE18DE7516
-:105110008C60A8CCC0B119DE2728600B09CC0B0D20
-:10512000880929812028811E2A0A0009880C08BACA
-:10513000381BDE6B0CA90A2992947B9B0260008CC1
-:105140002B600C94160CBD11A7DD29D286B84879C6
-:1051500083026000D219DE1909B80A2882A39817C1
-:105160006880026000A36000A51ADE5F84180AEE62
-:1051700001CA981BDE108C192BB0008CC06EB313C3
-:105180001DDE0D0C1C520DCC0B2DC295C0A17EDB7B
-:10519000AE6000380C0C5360000900000018DE51AE
-:1051A0008C60A8CCC0B119DE0328600B09CC0B0DB4
-:1051B000880929812028811E2A0A0009880C08BA3A
-:1051C000380CA90A2992947E930263FF72DA60C0B8
-:1051D000BA58073764507360026A00001ADDF68C13
-:1051E000192AA0008CC06EA31A18DDF20C1C5208FC
-:1051F000CC0B18DE3B2BC295C0A178B30263FF3FF6
-:1052000063FFC9000C0C5363FF0989607899182962
-:10521000D285C9922B729E1DDDE76EB8232DD22652
-:10522000991369D00B60000DDA60580721600017F0
-:105230000088607D890A9A1A29729D9C129915CF5F
-:1052400095DA60C0B658071A6551F98D148C18DBD1
-:10525000D08DD0066A020D6D51580587D3A09A14DF
-:1052600064A1E182A085A1B8AF9F1905054702029C
-:10527000479518C05163FE602B6104C08B8931C013
-:10528000A009F950098A386EB81F2C6066A2CC0CB0
-:105290000C472C64667CAB119F119E1B8A15580528
-:1052A000988E1B8F11C0A02A64669F1164F0E58957
-:1052B0001388190FFD022E0A006DD9172F810300E4
-:1052C000908DAEFE0080889F9200908C008088B800
-:1052D0009900908C65514E8A10851A8B301FDDC85D
-:1052E000881229600708580A2C82942D61040ECC7C
-:1052F0000C2C86946FDB3C1CDDF4AC9C29C0800B2D
-:105300005D50A29909094729C48065D0DA2E600C46
-:10531000C0D01FDDB10CE811AFEEA7882282852D29
-:10532000E4CF02420B228685D2A0D10F8E300E0E22
-:105330004763FDA2A29C0C0C472C64077AB6CD8B68
-:10534000602E600A280AFF08E80C64810E18DDDD73
-:1053500083168213B33902330B2C34162D350AC051
-:105360002392319F30C020923308B20208E80292A3
-:10537000349832C0802864072B600CD2A01CDD96C4
-:105380000CBE11A7EE2DE285ACBB28B4CF0D9D0B52
-:105390002DE685D10F8B1888138D30B88C0D8F4773
-:1053A0000D4950B4990499100D0D5F04DD1009FFEB
-:1053B000029F800DBB029B8165508D851AB83AC053
-:1053C000F1C0800CF83808084264806B1BDD771947
-:1053D000DD7829B67E8D18B0DD6DDA0500A0880075
-:1053E000C08CC0A063FEF30082138B161DDD8828DD
-:1053F000600AC0E02EC4800D880202B20B99239F80
-:1054000020C0D298229D2122600CB2BB0C2D11A786
-:10541000DD28D28508BB0B18DD702BD685A8222E7F
-:1054200024CFD2A0D10F9E1B851A2A6C748B185BD7
-:10543000FF168E1B63FEA300C087C0900AF938795F
-:10544000809263FF86C020D10F9E1B2A6C74C0B16E
-:105450008D1858053B8E1B851A63FE7E886B821360
-:10546000891608BE110ECE0202920B9E25B4991E1B
-:10547000DD639F200E88029822C0EF04D8110E88A9
-:10548000029824C0E49E21C080D2A02B600C286426
-:10549000071CDD510CBE11A7EE2DE285ACBB28B474
-:1054A000CF0D9D0B2DE685D10F0000006C1004C0C0
-:1054B00020D10F006C10048633C071C03060000131
-:1054C000B13300310400741A0462017460F1D10F29
-:1054D0006C1004022A02033B025BFFF61CDD391B41
-:1054E000DD83C79F88B009A903098A019AB0798032
-:1054F0001EC0F00FE4311DDD300002002BD2821EF1
-:10550000DD7C2AC1020EBB022BD6820AE431D10F08
-:1055100028C102C19009880208084F28C50208E482
-:1055200031D10F006C1004C0C00CE43112DD251A1B
-:10553000DD2200020029A28218DD701BDD6E26210B
-:10554000020B990108660129A68226250206E4318C
-:1055500014DD6B15DD66236A9023261685502426FC
-:1055600015252617222C50D10F0000006C1008D6EC
-:10557000102B0A64291AB41ADD0F0D23111CDD103B
-:105580000F2511B81898130E551118DD5DAC55A8EC
-:1055900038AA332C80FF2A80FEA933288D01298068
-:1055A0000108AA112880000CAA02088811098802A3
-:1055B00008AA1C288C0828160458086814DD010A5B
-:1055C000A70224411A2A30802B120407AA2858085F
-:1055D00063B1338B13B4559A6004AC28B4662C566F
-:1055E0002B7B69E016DD3A9412C050C0D017DCF472
-:1055F0009D15D370D4102F60802E60829F169E1749
-:10560000881672891A8D128C402A607F0DCC282B47
-:105610003A200CAA28580851C0B10ABE372E354886
-:105620008F1772F91A8D128C402A60810DCC282BAD
-:105630003A200CAA28580849C0B10ABE372E354A6C
-:10564000B233B444B1556952B6B466C0508F15B880
-:1056500077D370B2FF9F156EF899D10F6C1004C00C
-:1056600021D10F006C1004270A001CDCD31FDCE4DE
-:105670001EDCE71DDCD01ADD141BDD22C02824B09F
-:10568000006D2A75AA48288080C09164806100411D
-:105690000415DCCBC03125503600361A06550105FD
-:1056A00095390C56110C66082962966E974D0D5966
-:1056B0000A29922468900812DD0602420872993B7A
-:1056C00023629512DCC8CB349F300282020E440262
-:1056D000C092993194329233AD52246295C0902495
-:1056E0004C1024669524B0002924A0AA42292480C5
-:1056F000B177B14404044224B400D10FD10FD10FCB
-:105700006C10041ADCAC2AA00058021C5BFFD50206
-:105710002A02033B025BFFD11BDCAAC9A12CB10208
-:10572000C0D40DCC020C0C4F2CB5020CE431D10FBF
-:10573000C0A00AE43118DCA00002002F828219DC2C
-:10574000B32EB10209FF022F86820EE431D10F0081
-:105750006C1004C02002E43114DC9A16DC970002BD
-:1057600000226282234102732F0603E431C020D15C
-:105770000F19DCE61ADCE52841020A2A0109880132
-:105780002A668228450208E43115DCDC12DCE125BA
-:105790004621D10F6C1004292006289CF96480A0B2
-:1057A0002A9CFD65A0968A288D262F0A087AD9049E
-:1057B0002B221FC8BD2C206464C0812E22090EAE8E
-:1057C0000C66E0782B200C1EDC7C0CBC11AECC28C7
-:1057D000C28619DC7A78F3026000AD09B90A299211
-:1057E000A36890082E220009EE0C65E09B29C28573
-:1057F0001FDC846490929F90C0E41FDC919E9128EE
-:10580000200AC0E09E930F8802989288200F880299
-:1058100098942F20079A979D962F950A2E24072853
-:10582000200629206468833328C28512DC6B288C0B
-:1058300020A2B22E24CF28C685C020D10FC020D1EF
-:105840000F2A206A0111020A2A4165AF52DA20C0EC
-:10585000B05805EA64AFE5C021D10F00649FC81FAE
-:10586000DC582D20168FF209DD0C00F10400DD1A42
-:10587000ADAD9D2912DC5928C285A2B22E24CF28B5
-:105880008C2028C685C020D10FC021D10F00000078
-:105890006C1004260A001BDC9F15DC4928206517C4
-:1058A000DC46288CFE6480940C4D110DBD082CD272
-:1058B000F52BD2F42ED2F77CB13DB4BB2BD6F47BC2
-:1058C000E9052BD2F62BD6F47CB92C2AD2F62AD6AF
-:1058D000F52AD6F406E4310002002872822AFAFF83
-:1058E000004104290A012F510200991A0A9903095B
-:1058F00088012876820FE4312624652BD2F48E5C51
-:105900002CD2F5B0EE9E5C7BCB1629D2F62FD2F7C7
-:105910000CB80C09FF0C08FF0C0F2F14C8F960001D
-:10592000320BCA0C0A2A14CEA92B5102C0C20CBBDE
-:10593000020B0B4F2B55020BE431D10F00DB30DA99
-:10594000205BFF941BDC7464AF5D0C4D11ADBD6337
-:10595000FFA8000006E4310002002F728218DC303C
-:105960002E510208FF022F76820EE431D10F000083
-:105970006C1004C03003E43116DC1015DC11000299
-:105980000024628274472118DC64875C084801287F
-:105990006682CD7319DC620C2A11AA99229283299E
-:1059A00092847291038220CC292B51020BE431C0E6
-:1059B00020D10F001FDC5B2E51020FEE012E55028D
-:1059C0000EE431B02DB17C9C5C12DC5608DD112D4B
-:1059D000561DD10F6C10061BDBF71EDBF922B00041
-:1059E0001ADC526F23721DDC39C04818DC511FDCF1
-:1059F0004FDC10D5C083F000808600508A6D4A4F7E
-:105A00000F35110D34092440800B560A296294B1D8
-:105A1000330E55092251480F44110C440A8740099E
-:105A2000A80C02883622514907883608770CA899B5
-:105A30002966949740296295874109A80C02883607
-:105A400007883608770CA899296695974103034281
-:105A5000B13808084298F0D10F1CDC3613DC372728
-:105A6000B0002332B5647057C091C0D016DC351534
-:105A7000DC33C0402AC00003884328C4006D793C51
-:105A8000004104B14400971A7780148E502FB295CC
-:105A90002DB695AFEE2EED2006EE369E5060001826
-:105AA00077A00983509D5023B69560000223B295DC
-:105AB000223D2006223622B695B455B8BBD10F0040
-:105AC00003884328C400D10F6C1004C04004E431A3
-:105AD00015DC1D000200885013DC1CCB815BFFBD70
-:105AE0001CDC1B0C2D11ADCC2BC2822AC28394501E
-:105AF0007BAB142EC28429C2850ABD0C0E990C0DF5
-:105B0000990C0929146000050BA90C092914993076
-:105B100015DBAC2A51020AE4312A2CFC58004B2B2D
-:105B200032000AA2022BBCFF9B30CCB6C8A4D2A084
-:105B3000D10F000004E4311EDBA00002002DE28240
-:105B40002FBAFF2C51020FDD012DE6820CE431D17A
-:105B50000F0000006C1004D10F0000006C1004C096
-:105B600020D10F006C100413DBFAC0D103230923EA
-:105B7000318FC0A06F340260008D19DB8F1BDB906A
-:105B800017DBF30C2811A8772672832572822CFA72
-:105B9000FF76514788502E7285255C0425768275E4
-:105BA000E9052572842576827659292E72842E760F
-:105BB000822E76830AE431000200239282002104BF
-:105BC0002FB10200D61A0C66030633012396820F0A
-:105BD000E43126728325728260000200D8A07659D3
-:105BE000220AE43100020023928200210400D21A2A
-:105BF0002FB1020C22030232012296820FE431D22D
-:105C000080D10F00D280D10FC020D10F6C1004DBE7
-:105C100030862015DB68280A00282502DA2028B003
-:105C2000002CB00705880A28824C2D0A010B800041
-:105C3000DBA065AFE61ADB610A4A0A29A2A3C7BF47
-:105C4000769101D10F2BA6A3D10F00006C1004C0D8
-:105C5000D1C7CF1BDB5B19DB5817DB560C2811A80B
-:105C60007786758574C0A076516288508E77B4555A
-:105C7000957475E903857695747659278F769F75A7
-:105C80009F740AE431000200239282B42E2FB102E5
-:105C900000E10400D61A0C66030633012396820F36
-:105CA000E431867583747639280AE4310002002EC7
-:105CB0009282B42200210424B10200DF1A0CFF03F7
-:105CC0000FEE012E968204E431D280D10FD8A07657
-:105CD00051D6D280D10F00006C1004290A801EDB3F
-:105CE0005D1FDB5D1CDB350C2B11ACBB2C2CFC2DA4
-:105CF000B2850FCC029ED19CD0C051C07013DB592D
-:105D000014DB5818DB562AB285A82804240A234637
-:105D100091A986B8AA2AB685A98827849F25649F59
-:105D2000D10F00006C100419DB8B0C2A11A9A98972
-:105D300090C484798B761BDB79ABAC2AC2832CC2EE
-:105D4000847AC1688AA02BBC30D3A064A05E0B2BE0
-:105D50000A2CB2A319DB4268C0071DDB7FD30F7D7D
-:105D6000C94AA929299D0129901F68913270A6036B
-:105D7000D3A0CA9E689210C7AF2AB6A32A2CFC5B98
-:105D8000FFB3D230D10F000013DB7503A3018C31B8
-:105D90001DDB130C8C140DCC012CB6A363FFDC00AF
-:105DA000C020D10FDA205BFFCCC020D10FC020D1A2
-:105DB0000F0000006C1004DB30C0D019DAFEDA20CE
-:105DC00028300022300708481209880A28824CDC53
-:105DD000200B80001BDAF90C4A11ABAA29A2840916
-:105DE000290B29A684D10F006C1004C04118DAF2E7
-:105DF00017DAF40C2611A727277038A866256286C3
-:105E0000007104A35500441A75414822628415DBD1
-:105E10001502320BC922882117DAF10884140744CD
-:105E200001754905C834C020D10FD10F0809471D9D
-:105E3000DB4AC0B28E201FDADF0E0E43AFEC2BC45C
-:105E4000A00FEE0A2DE6242A6284C0200A990B29AD
-:105E50006684D10FC020D10F6C1004DB30C0D01885
-:105E6000DAD5DA2025300022300708580A28824C7B
-:105E7000DC200B80008931709E121BDACF0C4A1196
-:105E8000ABAA29A28409290B29A684D10F09C952DA
-:105E900068532600910418DACAC0A12F811600AAFF
-:105EA0001A0AFF022F85161EDAC40C4D11AEDD2C26
-:105EB000D2840C2C0B2CD684D10FC0811FDAC1B830
-:105EC0009A0A0A472EF11600A10400881A08EE0269
-:105ED0002EF5161DDAB90C4C11ADCC2BC2840B2B50
-:105EE0000B2BC684D10F00006C1004DB30C0D0191E
-:105EF000DAB1DA2028300022300709880A28824CDB
-:105F0000DC200B80001CDAAC0C4B11ACBB2AB28439
-:105F10000A2A0B2AB684D10F6C1004C04118DAA6E5
-:105F200016DAA80C2711A626266038A87225228624
-:105F3000006104A35500441A7541082222840232EC
-:105F40000BD10F00C020D10F6C100415DB050249E6
-:105F5000142956112452120208430F8811C07300ED
-:105F6000810400361A008104C78F00771A0877036E
-:105F7000074401064402245612D10F006C10066E2D
-:105F800023026000AC6420A7C0A0851013DADD16E0
-:105F9000DAF4C040A6AA2BA2AE0B19416490666841
-:105FA000915D68925268933C2AA2AA283C7F288C73
-:105FB0007F0A0A4D2980012880002AACF208881146
-:105FC0000988027589462B3D0129B0002BB00108D4
-:105FD00099110B99027A9934B8332A2A00B1447284
-:105FE00049B160004A7FBF0715DADF63FFB90000DF
-:105FF000253AE863FFB10000253AE863FFA90000F5
-:10600000250A6463FFA1C05A63FF9C0000705F080B
-:106010002534FF058C142C34FE70AF0B0A8D142E22
-:106020003D012AE4012DE400DA405BFD5063FFA747
-:10603000D10FD10F6C10041ADA6219DA5F1CDACAB8
-:106040001BDACBC080C07160000D00000022A438B4
-:10605000B1AA299C107B915F26928679C2156E6247
-:1060600062C0206D080AB12200210400741A764B28
-:10607000DB63FFEE2292850D6311032514645FCF6D
-:10608000D650032D436DD9039820B4220644146DD5
-:106090004922982098219822982398249825982678
-:1060A000982798289829982A982B982C982D982EDC
-:1060B000982F222C4063FF971EDA4027E68027E6C0
-:1060C00081D10F00C02063FF830000006C1004C06A
-:1060D00062C04112DA3B1ADA3713DA522AA00023DF
-:1060E000322D19DA9F2BACFE2992AE6EA30260000E
-:1060F0008E090E402D1AC2C2CD0EDC392C251A6431
-:10610000B0895BFF9E15DA9A1ADA952B3AE80A3ABB
-:10611000015805922B211A0ABB28D3A09B50580581
-:10612000A92B52000ABB082A0A005805A815DA91C3
-:106130002D21022C3AE80C3C2804DD022D25029C7E
-:10614000505805A08B50AABBC0A15805A01CDA8AE4
-:106150002D21020C3C2806DD0213DA882D25029C35
-:10616000305805988B30AABBC0A25805982A210246
-:10617000C0B40BAA020A0A4F2A25025805ACD10F57
-:10618000242423C3CC2C251A63FF760018DA801C44
-:10619000DA7C19DA7D1BDA7B17DA4F85202E0AFDAF
-:1061A0001FDA7C2D203624F47A24F47E24F4820E27
-:1061B000DD0124F4862E0AF707552806DD02C07596
-:1061C0000EDD01050506AB5BA959C0E8AC5C24C433
-:1061D000AB0EDD0227C4AC2E0ADFA85527B4EC0EA7
-:1061E000DD0124B4EBC2E027942C0EDD0224942BB5
-:1061F0002E0A800D0D4627546C24546B0EDD022DA3
-:10620000243663FEFC0000006C10042A0A302B0ABE
-:10621000035BFF4D12DA53C390292616C3A1C0B306
-:10622000C08A2826175BFF48C03CC3B12B26161A2C
-:10623000D9E42AA02023261764A079C3A2C0B15BA9
-:10624000FF42C3A2C0B15BFF40C3C22C2616C2AF3F
-:10625000C0B12326175BFF3CC28F282616C0FE2F35
-:106260002617C2E22E26162A0AA1C0B1C0D82D26B2
-:10627000175BFF352A0AA12A2616C3A6C0B3C1920E
-:106280002926175BFF31C3C62C2616C1B32A0AA2E2
-:106290002B2617C0B35BFF2C290AA2292616C1851D
-:1062A000282617C2FB2F2616C0E72E26171DDA391F
-:1062B0002D2610D10FC3A2C0B35BFF2363FF820062
-:1062C0006C10041CDA031BD9ED18DA3317DA341614
-:1062D000DA3415DA34C0E0C0D414D9FF1FD9B9C0FC
-:1062E000288FF06D2A36DAC0D9C07C5B020FC90C4A
-:1062F0001CD9F90C9C28A8C3A6C22A36802A25845A
-:10630000A4C2A7CC2D248C2B248A2B24872E248B4B
-:10631000B1BB2E369F2C369E2C369DB1AC1CD9D7E6
-:106320001BDA22C0286D2A33DAC0D9C07C5B020F89
-:10633000C90C1CD9E80C9C28A8C3A6C22A36802BFD
-:106340002584A4C2B1BBA7CC2D248C2E248B2A2457
-:106350008A2E369F2C369E2C369DB1ACC07919D929
-:10636000D81BDA1413DA121ADA1218DA1314D9D97C
-:1063700016DA1304F42812DA1204660C040506A2D5
-:1063800052A858AA5AA3539B3029A50027848AC033
-:1063900091C0A52A848C29848B17DA0B18DA0AA7F6
-:1063A0005726361D26361E2E361F16DA0813DA0833
-:1063B000A65504330C2826C82E75002D54AC2E5437
-:1063C000AB2E54AA2326E62326E52E26E7D10F007E
-:1063D0006C100613D99417D9E224723D2232937FB0
-:1063E0002F0B6D08052832937F8F0263FFF3C0C423
-:1063F000C0B01AD973C051D94004593929A4206EAC
-:1064000044020BB502C3281ED96EDDB025E4220577
-:106410002D392DE421C0501ED9EF19D9DF18D9DF4D
-:1064200016D9E11DD9ED94102A724517D9AB6DA983
-:106430004BD450B3557A5B17DF50756B071FD9608B
-:106440008FF00F5F0C12D9A302F228AE2222D68160
-:10645000D54013D9A0746B0715D95A855005450C42
-:10646000035328B145A73FA832A93322369D2236CF
-:106470009E2436802B369F2BF48B2CF48C14D969F8
-:1064800024424DC030041414C84C6D0806B13304C6
-:106490001414C84263FFF20015D947C44000310408
-:1064A0001AD948C0D193A200DD1AC138B0DD9DA32E
-:1064B00018D95D2B824D29824E29A5202882537A36
-:1064C000871E2C54008E106FE45D12D93D2F2121C0
-:1064D0002321202F251F04330C23252023251ED103
-:1064E0000FC06218D99F88807E87D98910265400F2
-:1064F0006F94191BD9332AB1200A1A1404AA0C2A42
-:10650000B5202AB5212AB51E2AB51FD10F1BD92CBB
-:106510002AB1200A1A1403AA0C2AB5202AB5212A66
-:10652000B51E2AB51FD10F001CD9262BC1212DC1A4
-:10653000202BC51F03DD0C2DC5202DC51ED10F003E
-:106540006C100619D91F14D98612D93615D9A3C7CC
-:106550003FC0E02E56A82E56A92E56AA2E56AB2383
-:10656000262918D946DB101CD99DC0D42A42452DB6
-:1065700016012C160000B0890A880C98905BFF94D5
-:106580002C22E318D90F0C5C149C842B22E48C84FD
-:10659000B1BB0B5B140CBB0C9B852A22E50A5A1479
-:1065A0002A86062922CD0959142986072F22892FE8
-:1065B00086095BFF435BFF1423463BC1B01ED90035
-:1065C0001DD9602AE1022D463A0BAA020A0A4F2A77
-:1065D000E5025804965BFEBD5BFE96C050C0B01647
-:1065E000D8F614D8FE17D96FC0C0C73E93122C2618
-:1065F0002DC0306000440000007F9F0FB155091985
-:1066000014659FF4C0500AA9027FA7EF18D8EADAF0
-:106610005008580A28822C2B0A000B8000005104D5
-:10662000D2A0C091C7AF00991A0A99039912CE3827
-:1066300064206BD3202B20072516032C12022A621C
-:10664000827CA86318D8DC01110208580A28822C21
-:10665000DA500B8000D2A0643FD58A310A8A140434
-:10666000AA01C82A2B22010B8B1404BB017BA9456C
-:10667000DDA07A7B081DD8D22DD2000DAD0CDB3009
-:1066800019D8CD1AD91488130ADA28DC801DD951FB
-:1066900009880A28823C0DAA080B8000652F93D335
-:1066A00020C0B063FF9400007FAF34B155005004A8
-:1066B0000A091963FF42DAB07B7B081AD8C12AA203
-:1066C000000ABA0C1BD9048C310BAB280C8A141CA1
-:1066D000D941ACBB1CD94104AA012BC68163FF8FF1
-:1066E000645F60C050C0B0C7CE9C1263FF5500000D
-:1066F0006C100427221EC08008E4311BD8AF0002B2
-:10670000002AB28219D8AF003104C06100661A298C
-:1067100091020A6A022AB68209E43115D90C0C38B2
-:1067200011A8532832822432842A8CFC7841102903
-:1067300021022A368297A0096902292502D10F0079
-:106740002B21022C32850B6B022CCCFC2C36829731
-:10675000C02B2502D10F00006C1004C0E71DD89299
-:106760001CD8940D4911D7208B228A200B4B0BD2B9
-:10677000A007A80C9B72288CF4C8346F8E026000AE
-:10678000A31FD88AA298AF7B78B334C93DC081C01B
-:10679000F0028F380F0F42C9FA2CD67ED5206D4AF1
-:1067A0000500308800508C887008980878B16DD248
-:1067B000A09870D10FC0F0038F387FE0DE63FFD860
-:1067C000027B0CAFBB0B990C643047D830C0F1C0D2
-:1067D0005002F5380505426450792CD67E0B3612EE
-:1067E0002F6C100F4F366DFA0500808800208C0644
-:1067F000440CC081C05003B208237C0C03853805CB
-:10680000054264505A2CD67ED30F6D4A050020886D
-:1068100000308CD2A0A798BC889870D10FD2A0BCB1
-:10682000799970D10FD2302BAD08C0F1C0500BF563
-:1068300038050542CB542CD67E083F14260A100F8B
-:10684000660C0646366D6A0500208800B08C8270A2
-:1068500063FF2D00C05003F53875E08063FF7A00B8
-:10686000C06002863876E09F63FF9900C05003F550
-:106870003875E0C463FFBE006C1004D62068520F68
-:10688000695324DA20DB30DC405800F7D2A0D10F66
-:10689000DA20DB30DC405800F49A2424240EC02196
-:1068A00022640FC020D10F00B83BB04C2A2C748951
-:1068B000242D200E2E200FA4DDB1EE2E240FB0DDEE
-:1068C0002D240E2890072D9003A488B088B1DD2DCB
-:1068D00094032894075BFFA069511DC0E082242A1D
-:1068E000600F18D8BF2A240329600E8F202924079F
-:1068F00008FF029F209E64D10FC020D10F0000002E
-:106900006C1004942319D8B7C0B3083A110BAA022B
-:10691000992019D8299A2116D827C05028929D2548
-:1069200064A2288C1828969DD10F00006C100428B2
-:106930002066C038232406B788282466D10F0000BB
-:106940006C10060D3C111AD819D820035B0C862256
-:106950000D55118221AA8902320B928105630C9395
-:10696000820C550C792B54CB531CD8111DD80FC059
-:10697000F7A256C031C0A0043A380A0A42769343BF
-:10698000044302C9AB2CD67ED30F6DBA0500208814
-:1069900000308C8281A25272917D92818382C83EA6
-:1069A000D10FC071C06002763876F0DB63FFD5008E
-:1069B000C020BC89998199809282D10F222DF892B2
-:1069C0008163FFA219D7FA02860CA9669611D940F5
-:1069D000063612961006BB0C64A0442CD67E8A1094
-:1069E000D30F6DAA0500208800908CBC828311C053
-:1069F000E0A433240A01034E380E0E42CAEC2CD612
-:106A00007E6DBA0500208800308C821102520CA2E3
-:106A100082BC22928163FF83BC82928163FF7C00EF
-:106A2000C06002363876F0B563FFAF00C070024731
-:106A30003877F0CC63FFC6006C100414D7EBC1525A
-:106A4000A424CA3128221D73811C292102659016B5
-:106A50002A300075A912022A02033B022C3007C01B
-:106A6000D25801D5653FDCD10F2B300703BB0B0B90
-:106A7000BA0274B3022ABDF8D3A063FFC4000000B9
-:106A80006C1004292006C0706E9741292102C08F26
-:106A90002A2014C0B62B240606AA022A24147980C0
-:106AA000022725022A221E2C221D7AC10EC8ABDA2B
-:106AB00020DB302C0A00033D025BF7F96450892D7E
-:106AC00021020D0D4CC9D3C020D10F00002E9CFB1C
-:106AD00064E0962F21020F0F4C65F0A51AD7B71E60
-:106AE000D7B529A29EC08A798B712BE22668B004A3
-:106AF0008C207BC96629A29D1FD7B264905D9790B8
-:106B0000C0C31DD7C62B21049D9608BB110CBB0228
-:106B10009B919B971CD7C3C08527E4A22BA29D28DD
-:106B200024068DFA282102B0DD2BBC302BA69D9DBA
-:106B3000FA0C8802282502C8D2C020D10F8EF91283
-:106B4000D7B92E2689C020D10F283000688938DABD
-:106B500020DB30DC4058004463FF6300022A022B34
-:106B60000A065800D3220A00D10F655010293000C0
-:106B7000689924022A02033B02DC4058003BC020F3
-:106B8000D10FD270D10F00002A2C74033B02044CA9
-:106B9000025BFEF163FF2700DB30DC402A2C745BD4
-:106BA000FEEEC020D10F00006C1004C83F8926887B
-:106BB00029A399992609880C080848282525CC522C
-:106BC000C020D10FDB402A2C745BF92FD2A0D10F4B
-:106BD0006C1004D820D73082220D451105220C926A
-:106BE0008264207407420B13D771D420A3837323CC
-:106BF00002242DF8858074514CBC82C0906D08161B
-:106C000000408800708C773903D720C0918680744B
-:106C10003901D42074610263FFE2CA98C097C04171
-:106C20001BD7F2C0A00B8B0C0B4A380A0A42C9AA28
-:106C30001DD75E1CD75F2CD67EC140D30F6D4A0591
-:106C400000208800308C9780D270D10FBC8FC0E0BC
-:106C50000F4E387E90E263FFD6BC8292819280C054
-:106C6000209282D10F0000006C1006C0D71CD74EB6
-:106C70001BD7500D4911D7202E221F28221D0E4E42
-:106C80000BD280078A0C2E761F2AAC80C8346FAED8
-:106C9000026000CB2F0A801AD754A29EAA7A7EA344
-:106CA0003FC93FC0E1C05002E538050542CA552B37
-:106CB000C67EDB20D30F6D4A0500308800B08C2ED5
-:106CC000721DAE9E0EA50C645086D2802E761DC01D
-:106CD00091298403D10FC05003E53875D0D363FFE9
-:106CE000CD15D741027E0CA5EE643051C0A1250A16
-:106CF0000002A538033A020505426450922BC67E75
-:106D00000E35129510255C10054536D30F6D5A05CA
-:106D100000A08800208CC0A1A3E2C05023FA800309
-:106D2000730C03A538AF730505426450722BC67E01
-:106D3000851005450C6D5A0500208800308CD280E6
-:106D4000C0A10E9B0CAB7BAFBB2B761D2A8403D15D
-:106D50000FD280C0C1AF7D2D761D2C8403D10F00D2
-:106D6000D2302E8D08C0F1C0500EF538050542CB4B
-:106D7000592BC67E0A3F14C1600F660C064636D3F7
-:106D80000F6D6A0500208800E08C22721D63FF03EE
-:106D9000C061C05003653875D80263FF6263FF5C51
-:106DA000C05002A53875D08763FF8100C06003F62C
-:106DB0003876D0BF63FFB9006C10042A2015292053
-:106DC0001614D6FF0A990CCB9D2E200B04ED092B2F
-:106DD000D1208F2809BC36ACAA0CBB0C2BD5200ABD
-:106DE0000A472A2415CAAF8B438942B0A8009104F0
-:106DF00000881AA8FF0FBB029B278F260FB80C78BC
-:106E00003B1AC020D10F0000292102C0A20A99021A
-:106E1000292502C021D10F008B2763FFDC2BD12055
-:106E20000CAA0C0A0A472A2415ACBB2BD520C9AEE4
-:106E30008B438C288F42B0AD00F10400DD1AADCC3D
-:106E40000CBB029B27DA20B7EB580019C021D10FE9
-:106E50009F2763FFEF0000006C100428203C643083
-:106E60004705306000073E01053EB156076539050C
-:106E70004928C77FA933030641076603B1660606A2
-:106E800041A6337E871E222125291AFC732B150269
-:106E9000380C09816000063E01023EB124064239E9
-:106EA00003220AD10FD230D10FC05163FFC00000BE
-:106EB0006C100427221EC08008E4311DD6BF0002DA
-:106EC000002CD2821BD6BF003104C06100661A2B91
-:106ED000B1020C6C022CD6820BE43119D7440C3A67
-:106EE00011AA932832829780253282243284B455A5
-:106EF00025368275410A292102096902292502D114
-:106F00000F2A21022B32830A6A022B36822A25029B
-:106F1000D10F00006C100418D6A80C2711087708B0
-:106F2000267286253C04765B1315D6A405220A2218
-:106F300022A3682002742904227285D10FC020D1B7
-:106F40000F0000006C100419D6A727221EC080096C
-:106F5000770208E4311DD6980002002CD2821BD69D
-:106F600098003104C06100661A2BB1020C6C022C2F
-:106F7000D6820BE43119D71D0C3A11AA932832821C
-:106F80009780253282243284B45525368275410B90
-:106F90002A21020A6A022A2502D10F002B21022C83
-:106FA00032830B6B022C36822B2502D10F0000009E
-:106FB0006C10041BD6810C2A11ABAA29A286B43806
-:106FC000798B221BD67E19D6A50B2B0A2BB2A309CF
-:106FD000290868B00274B90D299D0129901F6E928D
-:106FE0000822A285D10FC020D10FC892C020D10F96
-:106FF000DA205BEE88C020D10F0000006C10041472
-:10700000D66E28429E19D66B6F88026000BA29920C
-:10701000266890078A2009AA0C65A0AC2A429DC068
-:10702000DC64A0A42B200C19D6650CBC11A4CC2EBA
-:10703000C28609B90A7ED30260009A2992A3689099
-:10704000078D2009DD0C65D08C25C2856450862D06
-:107050002104C0306ED80D2C2066B8CC0C0C472C07
-:10706000246665C07B1CD6E218D66B1AD66219D688
-:10707000731DD667C0E49E519D508F209357935542
-:1070800099539A569A5408FF021AD6839F5288261B
-:107090009F5A9E599D58935E9C5D935C9A5B08082D
-:1070A00048058811985FC0D81FD64C0CB911A49917
-:1070B000289285AFBF23F4CF288C402896858E2652
-:1070C0002D24069E29C020D10FCA33DA20C0B65B1A
-:1070D000FF78C72FD10FC93ADA205BFF75C72FD1D0
-:1070E0000FDBD05BFE072324662B200C63FF7500AB
-:1070F000C72FD10FC72FD10F6C1004C85B292006F2
-:1071000068941C689607C020D10FC020D10FDA20E8
-:10711000DB30DC40DD502E0A005BFE59D2A0D10FDF
-:107120002E200C18D6250CEF11A8FF29F286C08856
-:10713000798B791AD6220AEA0A2AA2A368A0048BBC
-:10714000207AB96823F2856430621BD62C290A8024
-:107150002C20682820672D21040B881104DD1108DC
-:10716000DD020DCC02C0842D4A100DCC021DD624A8
-:1071700098319D308A2B99379C340BAA02C0C09C51
-:10718000359C369A322A2C74DB4028F285C0D328ED
-:107190008C2028F6852C25042D24061FD60FDD40D3
-:1071A000AFEE2CE4CF5BFDE6D2A0D10F00DA20DBFE
-:1071B000E05BFF3FC020D10F6C100AD6302A2006BA
-:1071C00024160128ACF86583862B2122C0F22A21DF
-:1071D00024CC572AAC010A0A4F2A25247ABB026024
-:1071E000037F2C21020C0C4C65C3192E22158D3205
-:1071F000C0910EDD0C65D39088381ED5EF64836B8B
-:107200008C37C0B8C0960CB9399914B49A9A120D3B
-:10721000991199138F6718D5EAC9FB2880217F83BC
-:10722000168B142C22002A200C5BFF61D4A064A3CF
-:10723000B38F6760002800002B200C89120CBA1154
-:10724000AEAA2CA2861DD5DD7C9B3E0DBD0A2DD29B
-:10725000A368D00488207D893024A28564436427F4
-:10726000212E07F73607F90C6F9D01D7F0DA20DBE6
-:1072700070C1C42D211F5BFEF889268827DDA00977
-:10728000880C7A8B179A10600006C04063FFCC0010
-:1072900000DA208B105BFEC88D1065A267C0E09EEF
-:1072A000488C649C498B658A669B4A9A4B97458FAC
-:1072B000677F7302600120CD529D10DA20DB302CF5
-:1072C00012015BFE698D10C051D6A08FA7C0C08A85
-:1072D00068974D9A4C8869896A984E994F8E6A8A48
-:1072E00069AE7E77EB01B1AA9E6A9A698B60C0A0F5
-:1072F0000B8E1477B701C0A1C091C08493159D1760
-:107300009516C0D025203CC030085801089338C0DD
-:1073100082083310085B010535400B9D3807DD10EE
-:107320000BAB100E19402A211F07991003DD020D27
-:10733000BB020553100933020A55112921250A2AD7
-:10734000140929140499110A99020933028A2B2974
-:1073500021040BAA021BD6270899110955020855CA
-:10736000020BAA029A408920881408991109880200
-:1073700019D5A61DD62109880298418B2A934695D6
-:107380004783150DBB0285168D179B448A65896658
-:10739000AACAA97C77CB01B1AA07FB0C9C669A65A7
-:1073A00088268E29AD87972607EE0C0E0E482E25CF
-:1073B000259B672B200C87131ED5800CB911AE9925
-:1073C000289285A78828968517D584C090A7BB29C1
-:1073D000B4CF871863FE3C008C60C0E0C091C0F061
-:1073E000C034C0B82A210428203C08AA110B8B0104
-:1073F000038301039F380B9B39C03208FF100388B9
-:1074000001089E380C881407EE100FEE0203880165
-:1074100008983905BF1029211F0ABB1107881008D9
-:10742000FF020BAA0218D57809291403AA022B21FE
-:107430002583200B2B1404BB110833110FBB020B47
-:1074400099028B148F2A0B33020833028B2B647042
-:10745000868868974D984C8769886A9341994697C2
-:107460004E984FC07077C701C0719A4718D5E30B8B
-:107470007C100CEC0208F802984418D5E00CBC0211
-:1074800008CC029C402A200C295CFEC0801FD54AF3
-:107490001CD5520CAE112B2124ACAAAFEEB0BB8F81
-:1074A000132CE28528A4CFAFCC2CE6852A22152BFD
-:1074B0002524B1AA2A26156490DBC9D28F262E2254
-:1074C000090DFF082F26060FEE0C0E0E482E25255F
-:1074D0006550E4C020D10F00C07093419F4499468D
-:1074E0009A4777C70A1CD5362CC022C0810C873832
-:1074F0001CD5C40B781008E80208B8020C88029862
-:107500004063FF8000CC57DA20DB608C115BFDD636
-:10751000292102689806689403C020D10F2B221EEF
-:10752000C0A029221D2A25027B9901C0B064BFE8B2
-:1075300013D5212CB00728B000DA2003880A28824E
-:107540004CC0D10B8000DBA065AFE763FFCA000031
-:1075500068A779DA20DB30DC40DD505BFEE7D2A0A3
-:10756000D10FC16DC19D29252C60000429252CD681
-:10757000902624672F2468DA20DB308C11DD502E12
-:107580000A805BFD3FD2A0D10FC168C1A82A252C7B
-:1075900063FFDD000000C8DF8C268B29ADCC9C2664
-:1075A0000CBB0C0B0B482B25252A2C74DB602C12F2
-:1075B000015BFD87D2A0D10F2A2C748B115BF6B230
-:1075C000D2A0D10FDA205BFE3A63FF3800DA20C088
-:1075D000B15BFE8A64ABF1655F352D2124B1DD2DF1
-:1075E000252463FF1FDA202B200C5BFE5663FF145B
-:1075F00012D5858220028257C82163FFFC12D581F3
-:1076000003E83004EE3005B13093209421952263D5
-:10761000FFFC000010D57D910092019302940311AC
-:10762000D554821001EA30A21101F031C04004E4C7
-:107630001600020011D5768210234A00032202921E
-:107640001011D540C021921004E4318403830282DA
-:1076500001810000D23001230000000010D56D919F
-:107660000092019302940311D543821001EA30A2E3
-:107670001101F131C04004E41600020011D564820A
-:107680001013D4E7032202921004E431840383022E
-:107690008201810000D330013300000010D55E91DB
-:1076A00000810165104981026510448103CF1F925A
-:1076B000019302940311D531821001EA30A2110125
-:1076C000F231C04004E41600020011D550821013BC
-:1076D000D4CF032202921004E43184038302820196
-:1076E000C010910391029101810000D43001430048
-:1076F00012D500C03028374028374428374828376B
-:107700004C233D017233ED03020063FFFC000000D7
-:1077100010D542910092019302940311D54082103A
-:10772000921011D4F28310032202921011D53D124F
-:10773000D5049210C04004E41600020011D5348232
-:107740001013D4EB032202921004E4318403830269
-:107750008201810000D53001530000006C10026EE0
-:10776000322FD620056F04043F04745B2A05440CB5
-:1077700000410400331A220A006D490D73630403AB
-:10778000660CB1220F2211031314736302222C0121
-:10779000D10FC83BD10F000073630CC021D10F0083
-:1077A0000000000044495630C020D10F6C10020088
-:1077B00040046B4C07032318020219D10F0203196E
-:1077C000C020D10F6C100202EA30D10F6C1002CC35
-:1077D0002503F03160000F006F220503F1316000D6
-:1077E000056F230503F231000200D10F6C1002CCAB
-:1077F0002502F030D10F00006F220402F130D10FCA
-:107800006F230402F230D10FC020D10F6C1002227E
-:107810000A20230A006D280E2837402837442837CD
-:107820004828374C233D01030200D10F6C1002029F
-:10783000E431D10F0A0000004368656C73696F2062
-:1078400046572044454255473D3020284275696CD3
-:1078500074204D6F6E204D61722020382031373AF0
-:1078600032383A3135205053542032303130206F85
-:107870006E20636C656F70617472612E61736963F1
-:1078800064657369676E6572732E636F6D3A2F68F6
-:107890006F6D652F66656C69782F772F66775F3718
-:1078A0002E392D6977617270292C205665727369A3
-:1078B0006F6E2054337878203030372E30612E3080
-:1078C00030202D20313030373061303010070A0041
-:0478D0000BDFE8756D
-:00000001FF
index ef17e0169da187ed209164a7881befcf00eec97f..60a327863b1122e246b79bf91ecdf23136eccac9 100644 (file)
@@ -14,7 +14,7 @@
 #include "dlm_internal.h"
 
 static uint32_t dlm_nl_seqnum;
-static uint32_t listener_nlpid;
+static uint32_t listener_nlportid;
 
 static struct genl_family family = {
        .id             = GENL_ID_GENERATE,
@@ -64,13 +64,13 @@ static int send_data(struct sk_buff *skb)
                return rv;
        }
 
-       return genlmsg_unicast(&init_net, skb, listener_nlpid);
+       return genlmsg_unicast(&init_net, skb, listener_nlportid);
 }
 
 static int user_cmd(struct sk_buff *skb, struct genl_info *info)
 {
-       listener_nlpid = info->snd_pid;
-       printk("user_cmd nlpid %u\n", listener_nlpid);
+       listener_nlportid = info->snd_portid;
+       printk("user_cmd nlpid %u\n", listener_nlportid);
        return 0;
 }
 
index c57e064666e4c5b6ead64191063b0ab36dc922a9..7f1c0f00db9bc337cd32b5083c4ff8ef1343c3ca 100644 (file)
@@ -192,6 +192,7 @@ header-y += in_route.h
 header-y += sock_diag.h
 header-y += inet_diag.h
 header-y += unix_diag.h
+header-y += packet_diag.h
 header-y += inotify.h
 header-y += input.h
 header-y += ioctl.h
@@ -359,6 +360,7 @@ header-y += sysctl.h
 header-y += sysinfo.h
 header-y += taskstats.h
 header-y += tcp.h
+header-y += tcp_metrics.h
 header-y += telephony.h
 header-y += termios.h
 header-y += time.h
index d323a4b4143c6e2d1f43b4a82e459fad157d5f04..6ba45d2b99db2e153be43225addaabdb39060072 100644 (file)
 #define  BCMA_CC_CHIPST_4706_SFLASH_TYPE       BIT(2) /* 0: 8b-p/ST-s flash, 1: 16b-p/Atmal-s flash */
 #define  BCMA_CC_CHIPST_4706_MIPS_BENDIAN      BIT(3) /* 0: little, 1: big endian */
 #define  BCMA_CC_CHIPST_4706_PCIE1_DISABLE     BIT(5) /* PCIE1 enable strap pin */
+#define  BCMA_CC_CHIPST_5357_NAND_BOOT         BIT(4) /* NAND boot, valid for CC rev 38 and/or BCM5357 */
 #define BCMA_CC_JCMD                   0x0030          /* Rev >= 10 only */
 #define  BCMA_CC_JCMD_START            0x80000000
 #define  BCMA_CC_JCMD_BUSY             0x80000000
 #define  BCMA_CC_SROM_CONTROL_SIZE_16K 0x00000004
 #define  BCMA_CC_SROM_CONTROL_SIZE_SHIFT       1
 #define  BCMA_CC_SROM_CONTROL_PRESENT  0x00000001
+/* Block 0x140 - 0x190 registers are chipset specific */
+#define BCMA_CC_4706_FLASHSCFG         0x18C           /* Flash struct configuration */
+#define  BCMA_CC_4706_FLASHSCFG_MASK   0x000000ff
+#define  BCMA_CC_4706_FLASHSCFG_SF1    0x00000001      /* 2nd serial flash present */
+#define  BCMA_CC_4706_FLASHSCFG_PF1    0x00000002      /* 2nd parallel flash present */
+#define  BCMA_CC_4706_FLASHSCFG_SF1_TYPE       0x00000004      /* 2nd serial flash type : 0 : ST, 1 : Atmel */
+#define  BCMA_CC_4706_FLASHSCFG_NF1    0x00000008      /* 2nd NAND flash present */
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_MASK     0x000000f0
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_4MB      0x00000010      /* 4MB */
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_8MB      0x00000020      /* 8MB */
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_16MB     0x00000030      /* 16MB */
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_32MB     0x00000040      /* 32MB */
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_64MB     0x00000050      /* 64MB */
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_128MB    0x00000060      /* 128MB */
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_256MB    0x00000070      /* 256MB */
+/* NAND flash registers for BCM4706 (corerev = 31) */
+#define BCMA_CC_NFLASH_CTL             0x01A0
+#define  BCMA_CC_NFLASH_CTL_ERR                0x08000000
+#define BCMA_CC_NFLASH_CONF            0x01A4
+#define BCMA_CC_NFLASH_COL_ADDR                0x01A8
+#define BCMA_CC_NFLASH_ROW_ADDR                0x01AC
+#define BCMA_CC_NFLASH_DATA            0x01B0
+#define BCMA_CC_NFLASH_WAITCNT0                0x01B4
 /* 0x1E0 is defined as shared BCMA_CLKCTLST */
 #define BCMA_CC_HW_WORKAROUND          0x01E4 /* Hardware workaround (rev >= 20) */
 #define BCMA_CC_UART0_DATA             0x0300
 #define BCMA_CC_PLLCTL_ADDR            0x0660
 #define BCMA_CC_PLLCTL_DATA            0x0664
 #define BCMA_CC_SPROM                  0x0800 /* SPROM beginning */
+/* NAND flash MLC controller registers (corerev >= 38) */
+#define BCMA_CC_NAND_REVISION          0x0C00
+#define BCMA_CC_NAND_CMD_START         0x0C04
+#define BCMA_CC_NAND_CMD_ADDR_X                0x0C08
+#define BCMA_CC_NAND_CMD_ADDR          0x0C0C
+#define BCMA_CC_NAND_CMD_END_ADDR      0x0C10
+#define BCMA_CC_NAND_CS_NAND_SELECT    0x0C14
+#define BCMA_CC_NAND_CS_NAND_XOR       0x0C18
+#define BCMA_CC_NAND_SPARE_RD0         0x0C20
+#define BCMA_CC_NAND_SPARE_RD4         0x0C24
+#define BCMA_CC_NAND_SPARE_RD8         0x0C28
+#define BCMA_CC_NAND_SPARE_RD12                0x0C2C
+#define BCMA_CC_NAND_SPARE_WR0         0x0C30
+#define BCMA_CC_NAND_SPARE_WR4         0x0C34
+#define BCMA_CC_NAND_SPARE_WR8         0x0C38
+#define BCMA_CC_NAND_SPARE_WR12                0x0C3C
+#define BCMA_CC_NAND_ACC_CONTROL       0x0C40
+#define BCMA_CC_NAND_CONFIG            0x0C48
+#define BCMA_CC_NAND_TIMING_1          0x0C50
+#define BCMA_CC_NAND_TIMING_2          0x0C54
+#define BCMA_CC_NAND_SEMAPHORE         0x0C58
+#define BCMA_CC_NAND_DEVID             0x0C60
+#define BCMA_CC_NAND_DEVID_X           0x0C64
+#define BCMA_CC_NAND_BLOCK_LOCK_STATUS 0x0C68
+#define BCMA_CC_NAND_INTFC_STATUS      0x0C6C
+#define BCMA_CC_NAND_ECC_CORR_ADDR_X   0x0C70
+#define BCMA_CC_NAND_ECC_CORR_ADDR     0x0C74
+#define BCMA_CC_NAND_ECC_UNC_ADDR_X    0x0C78
+#define BCMA_CC_NAND_ECC_UNC_ADDR      0x0C7C
+#define BCMA_CC_NAND_READ_ERROR_COUNT  0x0C80
+#define BCMA_CC_NAND_CORR_STAT_THRESHOLD       0x0C84
+#define BCMA_CC_NAND_READ_ADDR_X       0x0C90
+#define BCMA_CC_NAND_READ_ADDR         0x0C94
+#define BCMA_CC_NAND_PAGE_PROGRAM_ADDR_X       0x0C98
+#define BCMA_CC_NAND_PAGE_PROGRAM_ADDR 0x0C9C
+#define BCMA_CC_NAND_COPY_BACK_ADDR_X  0x0CA0
+#define BCMA_CC_NAND_COPY_BACK_ADDR    0x0CA4
+#define BCMA_CC_NAND_BLOCK_ERASE_ADDR_X        0x0CA8
+#define BCMA_CC_NAND_BLOCK_ERASE_ADDR  0x0CAC
+#define BCMA_CC_NAND_INV_READ_ADDR_X   0x0CB0
+#define BCMA_CC_NAND_INV_READ_ADDR     0x0CB4
+#define BCMA_CC_NAND_BLK_WR_PROTECT    0x0CC0
+#define BCMA_CC_NAND_ACC_CONTROL_CS1   0x0CD0
+#define BCMA_CC_NAND_CONFIG_CS1                0x0CD4
+#define BCMA_CC_NAND_TIMING_1_CS1      0x0CD8
+#define BCMA_CC_NAND_TIMING_2_CS1      0x0CDC
+#define BCMA_CC_NAND_SPARE_RD16                0x0D30
+#define BCMA_CC_NAND_SPARE_RD20                0x0D34
+#define BCMA_CC_NAND_SPARE_RD24                0x0D38
+#define BCMA_CC_NAND_SPARE_RD28                0x0D3C
+#define BCMA_CC_NAND_CACHE_ADDR                0x0D40
+#define BCMA_CC_NAND_CACHE_DATA                0x0D44
+#define BCMA_CC_NAND_CTRL_CONFIG       0x0D48
+#define BCMA_CC_NAND_CTRL_STATUS       0x0D4C
 
 /* Divider allocation in 4716/47162/5356 */
 #define BCMA_CC_PMU5_MAINPLL_CPU       1
 /* 4313 Chip specific ChipControl register bits */
 #define BCMA_CCTRL_4313_12MA_LED_DRIVE         0x00000007      /* 12 mA drive strengh for later 4313 */
 
+/* BCM5357 ChipControl register bits */
+#define BCMA_CHIPCTL_5357_EXTPA                        BIT(14)
+#define BCMA_CHIPCTL_5357_ANT_MUX_2O3          BIT(15)
+#define BCMA_CHIPCTL_5357_NFLASH               BIT(16)
+#define BCMA_CHIPCTL_5357_I2S_PINS_ENABLE      BIT(18)
+#define BCMA_CHIPCTL_5357_I2CSPI_PINS_ENABLE   BIT(19)
+
 /* Data for the PMU, if available.
  * Check availability with ((struct bcma_chipcommon)->capabilities & BCMA_CC_CAP_PMU)
  */
@@ -430,6 +515,26 @@ struct bcma_pflash {
        u32 window_size;
 };
 
+#ifdef CONFIG_BCMA_SFLASH
+struct bcma_sflash {
+       bool present;
+       u32 window;
+       u32 blocksize;
+       u16 numblocks;
+       u32 size;
+};
+#endif
+
+#ifdef CONFIG_BCMA_NFLASH
+struct mtd_info;
+
+struct bcma_nflash {
+       bool present;
+
+       struct mtd_info *mtd;
+};
+#endif
+
 struct bcma_serial_port {
        void *regs;
        unsigned long clockspeed;
@@ -450,6 +555,12 @@ struct bcma_drv_cc {
        struct bcma_chipcommon_pmu pmu;
 #ifdef CONFIG_BCMA_DRIVER_MIPS
        struct bcma_pflash pflash;
+#ifdef CONFIG_BCMA_SFLASH
+       struct bcma_sflash sflash;
+#endif
+#ifdef CONFIG_BCMA_NFLASH
+       struct bcma_nflash nflash;
+#endif
 
        int nr_serial_ports;
        struct bcma_serial_port serial_ports[4];
index 5a71d57196404780ab587458ac1826343fd74c1c..6c9cb93ae3de4bb3210f5037dbd3865fe18288d3 100644 (file)
 #define  BCMA_CLKCTLST_HAVEHTREQ       0x00000010 /* HT available request */
 #define  BCMA_CLKCTLST_HWCROFF         0x00000020 /* Force HW clock request off */
 #define  BCMA_CLKCTLST_EXTRESREQ       0x00000700 /* Mask of external resource requests */
+#define  BCMA_CLKCTLST_EXTRESREQ_SHIFT 8
 #define  BCMA_CLKCTLST_HAVEALP         0x00010000 /* ALP available */
 #define  BCMA_CLKCTLST_HAVEHT          0x00020000 /* HT available */
 #define  BCMA_CLKCTLST_BP_ON_ALP       0x00040000 /* RO: running on ALP clock */
 #define  BCMA_CLKCTLST_BP_ON_HT                0x00080000 /* RO: running on HT clock */
 #define  BCMA_CLKCTLST_EXTRESST                0x07000000 /* Mask of external resource status */
+#define  BCMA_CLKCTLST_EXTRESST_SHIFT  24
 /* Is there any BCM4328 on BCMA bus? */
 #define  BCMA_CLKCTLST_4328A0_HAVEHT   0x00010000 /* 4328a0 has reversed bits */
 #define  BCMA_CLKCTLST_4328A0_HAVEALP  0x00020000 /* 4328a0 has reversed bits */
@@ -83,4 +85,6 @@
                                                         * (2 ZettaBytes), high 32 bits
                                                         */
 
+#define BCMA_SFLASH                    0x1c000000
+
 #endif /* LINUX_BCMA_REGS_H_ */
index d426336d92d9e4bb34f5f049bc7f4f05d49303bb..b006ba0a9f4269e79d8bfae11415e8b3c8785f69 100644 (file)
@@ -150,6 +150,17 @@ static inline void eth_broadcast_addr(u8 *addr)
        memset(addr, 0xff, ETH_ALEN);
 }
 
+/**
+ * eth_zero_addr - Assign zero address
+ * @addr: Pointer to a six-byte array containing the Ethernet address
+ *
+ * Assign the zero address to the given address array.
+ */
+static inline void eth_zero_addr(u8 *addr)
+{
+       memset(addr, 0x00, ETH_ALEN);
+}
+
 /**
  * eth_hw_addr_random - Generate software assigned random Ethernet and
  * set device flag
index 21eff418091bb6d943740990cd24a7cf9d39342e..fcb4f8e60c1cbe8aa4b0f190cdd7815d25057f2c 100644 (file)
@@ -45,8 +45,10 @@ struct ethtool_cmd {
                                 * bits) in Mbps. Please use
                                 * ethtool_cmd_speed()/_set() to
                                 * access it */
-       __u8    eth_tp_mdix;
-       __u8    reserved2;
+       __u8    eth_tp_mdix;    /* twisted pair MDI-X status */
+       __u8    eth_tp_mdix_ctrl; /* twisted pair MDI-X control, when set,
+                                  * link should be renegotiated if necessary
+                                  */
        __u32   lp_advertising; /* Features the link partner advertises */
        __u32   reserved[2];
 };
@@ -1229,10 +1231,13 @@ struct ethtool_ops {
 #define AUTONEG_DISABLE                0x00
 #define AUTONEG_ENABLE         0x01
 
-/* Mode MDI or MDI-X */
-#define ETH_TP_MDI_INVALID     0x00
-#define ETH_TP_MDI             0x01
-#define ETH_TP_MDI_X           0x02
+/* MDI or MDI-X status/control - if MDI/MDI_X/AUTO is set then
+ * the driver is required to renegotiate link
+ */
+#define ETH_TP_MDI_INVALID     0x00 /* status: unknown; control: unsupported */
+#define ETH_TP_MDI             0x01 /* status: MDI;     control: force MDI */
+#define ETH_TP_MDI_X           0x02 /* status: MDI-X;   control: force MDI-X */
+#define ETH_TP_MDI_AUTO                0x03 /*                  control: auto-select */
 
 /* Wake-On-Lan options. */
 #define WAKE_PHY               (1 << 0)
index 82b01357af8b0672c330c648f3b01f5aa65134d2..2ded090e10f4e511461cacdbfcdc031dc1dd0b3e 100644 (file)
@@ -74,6 +74,9 @@ struct sock_fprog {   /* Required for SO_ATTACH_FILTER. */
 #define         BPF_LSH         0x60
 #define         BPF_RSH         0x70
 #define         BPF_NEG         0x80
+#define                BPF_MOD         0x90
+#define                BPF_XOR         0xa0
+
 #define         BPF_JA          0x00
 #define         BPF_JEQ         0x10
 #define         BPF_JGT         0x20
@@ -196,10 +199,14 @@ enum {
        BPF_S_ALU_MUL_K,
        BPF_S_ALU_MUL_X,
        BPF_S_ALU_DIV_X,
+       BPF_S_ALU_MOD_K,
+       BPF_S_ALU_MOD_X,
        BPF_S_ALU_AND_K,
        BPF_S_ALU_AND_X,
        BPF_S_ALU_OR_K,
        BPF_S_ALU_OR_X,
+       BPF_S_ALU_XOR_K,
+       BPF_S_ALU_XOR_X,
        BPF_S_ALU_LSH_K,
        BPF_S_ALU_LSH_X,
        BPF_S_ALU_RSH_K,
index b80506bdd733ee181f202ddb6322529d42299d1b..24df9e70406ffb94fb98faf91c174e236155a639 100644 (file)
@@ -67,4 +67,14 @@ static inline unsigned long hash_ptr(const void *ptr, unsigned int bits)
 {
        return hash_long((unsigned long)ptr, bits);
 }
+
+static inline u32 hash32_ptr(const void *ptr)
+{
+       unsigned long val = (unsigned long)ptr;
+
+#if BITS_PER_LONG == 64
+       val ^= (val >> 32);
+#endif
+       return (u32)val;
+}
 #endif /* _LINUX_HASH_H */
index e02fc682bb6850600258b622da1a80ef52260dd7..2385119f8bb016c003f5bf2b638b412a6ddde7f9 100644 (file)
@@ -1934,36 +1934,6 @@ static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr,
        return mgmt->u.action.category == WLAN_CATEGORY_PUBLIC;
 }
 
-/**
- * ieee80211_fhss_chan_to_freq - get channel frequency
- * @channel: the FHSS channel
- *
- * Convert IEEE802.11 FHSS channel to frequency (MHz)
- * Ref IEEE 802.11-2007 section 14.6
- */
-static inline int ieee80211_fhss_chan_to_freq(int channel)
-{
-       if ((channel > 1) && (channel < 96))
-               return channel + 2400;
-       else
-               return -1;
-}
-
-/**
- * ieee80211_freq_to_fhss_chan - get channel
- * @freq: the channels frequency
- *
- * Convert frequency (MHz) to IEEE802.11 FHSS channel
- * Ref IEEE 802.11-2007 section 14.6
- */
-static inline int ieee80211_freq_to_fhss_chan(int freq)
-{
-       if ((freq > 2401) && (freq < 2496))
-               return freq - 2400;
-       else
-               return -1;
-}
-
 /**
  * ieee80211_dsss_chan_to_freq - get channel center frequency
  * @channel: the DSSS channel
@@ -2000,56 +1970,6 @@ static inline int ieee80211_freq_to_dsss_chan(int freq)
                return -1;
 }
 
-/* Convert IEEE802.11 HR DSSS channel to frequency (MHz) and back
- * Ref IEEE 802.11-2007 section 18.4.6.2
- *
- * The channels and frequencies are the same as those defined for DSSS
- */
-#define ieee80211_hr_chan_to_freq(chan) ieee80211_dsss_chan_to_freq(chan)
-#define ieee80211_freq_to_hr_chan(freq) ieee80211_freq_to_dsss_chan(freq)
-
-/* Convert IEEE802.11 ERP channel to frequency (MHz) and back
- * Ref IEEE 802.11-2007 section 19.4.2
- */
-#define ieee80211_erp_chan_to_freq(chan) ieee80211_hr_chan_to_freq(chan)
-#define ieee80211_freq_to_erp_chan(freq) ieee80211_freq_to_hr_chan(freq)
-
-/**
- * ieee80211_ofdm_chan_to_freq - get channel center frequency
- * @s_freq: starting frequency == (dotChannelStartingFactor/2) MHz
- * @channel: the OFDM channel
- *
- * Convert IEEE802.11 OFDM channel to center frequency (MHz)
- * Ref IEEE 802.11-2007 section 17.3.8.3.2
- */
-static inline int ieee80211_ofdm_chan_to_freq(int s_freq, int channel)
-{
-       if ((channel > 0) && (channel <= 200) &&
-           (s_freq >= 4000))
-               return s_freq + (channel * 5);
-       else
-               return -1;
-}
-
-/**
- * ieee80211_freq_to_ofdm_channel - get channel
- * @s_freq: starting frequency == (dotChannelStartingFactor/2) MHz
- * @freq: the frequency
- *
- * Convert frequency (MHz) to IEEE802.11 OFDM channel
- * Ref IEEE 802.11-2007 section 17.3.8.3.2
- *
- * This routine selects the channel with the closest center frequency.
- */
-static inline int ieee80211_freq_to_ofdm_chan(int s_freq, int freq)
-{
-       if ((freq > (s_freq + 2)) && (freq <= (s_freq + 1202)) &&
-           (s_freq >= 4000))
-               return (freq + 2 - s_freq) / 5;
-       else
-               return -1;
-}
-
 /**
  * ieee80211_tu_to_usec - convert time units (TU) to microseconds
  * @tu: the TUs
index f0e69c6e82083c33eb799cbba0eb2ba789699a56..9adcc29f084af485a8b674dd6c6b3b51532ce68b 100644 (file)
@@ -92,6 +92,7 @@
 #define ARPHRD_PHONET  820             /* PhoNet media type            */
 #define ARPHRD_PHONET_PIPE 821         /* PhoNet pipe header           */
 #define ARPHRD_CAIF    822             /* CAIF media type              */
+#define ARPHRD_IP6GRE  823             /* GRE over IPv6                */
 
 #define ARPHRD_VOID      0xFFFF        /* Void type, nothing is known */
 #define ARPHRD_NONE      0xFFFE        /* zero header length */
index ac173bd2ab65e40c8759eb181460b79dd588b5c6..e4dad4ddf0855b43ed30747ed565203d273acf2d 100644 (file)
@@ -272,6 +272,22 @@ enum macvlan_mode {
 
 #define MACVLAN_FLAG_NOPROMISC 1
 
+/* VXLAN section */
+enum {
+       IFLA_VXLAN_UNSPEC,
+       IFLA_VXLAN_ID,
+       IFLA_VXLAN_GROUP,
+       IFLA_VXLAN_LINK,
+       IFLA_VXLAN_LOCAL,
+       IFLA_VXLAN_TTL,
+       IFLA_VXLAN_TOS,
+       IFLA_VXLAN_LEARNING,
+       IFLA_VXLAN_AGEING,
+       IFLA_VXLAN_LIMIT,
+       __IFLA_VXLAN_MAX
+};
+#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
+
 /* SR-IOV virtual function management section */
 
 enum {
@@ -398,4 +414,22 @@ struct ifla_port_vsi {
        __u8 pad[3];
 };
 
+
+/* IPoIB section */
+
+enum {
+       IFLA_IPOIB_UNSPEC,
+       IFLA_IPOIB_PKEY,
+       IFLA_IPOIB_MODE,
+       IFLA_IPOIB_UMCAST,
+       __IFLA_IPOIB_MAX
+};
+
+enum {
+       IPOIB_MODE_DATAGRAM  = 0, /* using unreliable datagram QPs */
+       IPOIB_MODE_CONNECTED = 1, /* using connected QPs */
+};
+
+#define IFLA_IPOIB_MAX (__IFLA_IPOIB_MAX - 1)
+
 #endif /* _LINUX_IF_LINK_H */
index aa2e167e1ef434a696a55322bd941bdb3983bf50..6d88a7f576808cbd743ca1c4ad42d2cb5b6ba8cd 100644 (file)
@@ -67,6 +67,9 @@ struct team_port {
        struct netpoll *np;
 #endif
 
+       s32 priority; /* lower number ~ higher priority */
+       u16 queue_id;
+       struct list_head qom_list; /* node in queue override mapping list */
        long mode_priv[0];
 };
 
@@ -105,7 +108,7 @@ struct team_mode_ops {
        bool (*transmit)(struct team *team, struct sk_buff *skb);
        int (*port_enter)(struct team *team, struct team_port *port);
        void (*port_leave)(struct team *team, struct team_port *port);
-       void (*port_change_mac)(struct team *team, struct team_port *port);
+       void (*port_change_dev_addr)(struct team *team, struct team_port *port);
        void (*port_enabled)(struct team *team, struct team_port *port);
        void (*port_disabled)(struct team *team, struct team_port *port);
 };
@@ -115,6 +118,7 @@ enum team_option_type {
        TEAM_OPTION_TYPE_STRING,
        TEAM_OPTION_TYPE_BINARY,
        TEAM_OPTION_TYPE_BOOL,
+       TEAM_OPTION_TYPE_S32,
 };
 
 struct team_option_inst_info {
@@ -131,6 +135,7 @@ struct team_gsetter_ctx {
                        u32 len;
                } bin_val;
                bool bool_val;
+               s32 s32_val;
        } data;
        struct team_option_inst_info *info;
 };
@@ -182,6 +187,8 @@ struct team {
 
        const struct team_mode *mode;
        struct team_mode_ops ops;
+       bool queue_override_enabled;
+       struct list_head *qom_lists; /* array of queue override mapping lists */
        long mode_priv[TEAM_MODE_PRIV_LONGS];
 };
 
@@ -231,7 +238,7 @@ static inline struct team_port *team_get_port_by_index_rcu(struct team *team,
        return NULL;
 }
 
-extern int team_port_set_team_mac(struct team_port *port);
+extern int team_port_set_team_dev_addr(struct team_port *port);
 extern int team_options_register(struct team *team,
                                 const struct team_option *option,
                                 size_t option_count);
index 5efff60b6f56906112b5c71dffbdf47b2b22cc8d..8c5035ac31421aa1bee89a34c342de34ff63131a 100644 (file)
@@ -75,6 +75,9 @@ enum {
        IFLA_GRE_TTL,
        IFLA_GRE_TOS,
        IFLA_GRE_PMTUDISC,
+       IFLA_GRE_ENCAP_LIMIT,
+       IFLA_GRE_FLOWINFO,
+       IFLA_GRE_FLAGS,
        __IFLA_GRE_MAX,
 };
 
index a810987cb80e47cd2a344dd1e3d289a6d3b46371..e6ff12dd717baf338d2e8ef88cb524c912304ad0 100644 (file)
@@ -74,8 +74,6 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
 /* found in socket.c */
 extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
 
-struct vlan_info;
-
 static inline int is_vlan_dev(struct net_device *dev)
 {
         return dev->priv_flags & IFF_802_1Q_VLAN;
@@ -101,6 +99,8 @@ extern int vlan_vids_add_by_dev(struct net_device *dev,
                                const struct net_device *by_dev);
 extern void vlan_vids_del_by_dev(struct net_device *dev,
                                 const struct net_device *by_dev);
+
+extern bool vlan_uses_dev(const struct net_device *dev);
 #else
 static inline struct net_device *
 __vlan_find_dev_deep(struct net_device *real_dev, u16 vlan_id)
@@ -151,6 +151,11 @@ static inline void vlan_vids_del_by_dev(struct net_device *dev,
                                        const struct net_device *by_dev)
 {
 }
+
+static inline bool vlan_uses_dev(const struct net_device *dev)
+{
+       return false;
+}
 #endif
 
 /**
index 67f9ddacb70c327e6576b47d0c103cddb752dd18..d032780d0ce50849c5441aae24025b2cc2f80cc3 100644 (file)
@@ -104,9 +104,14 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
 #define IN_DEV_ANDCONF(in_dev, attr) \
        (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr) && \
         IN_DEV_CONF_GET((in_dev), attr))
-#define IN_DEV_ORCONF(in_dev, attr) \
-       (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr) || \
+
+#define IN_DEV_NET_ORCONF(in_dev, net, attr) \
+       (IPV4_DEVCONF_ALL(net, attr) || \
         IN_DEV_CONF_GET((in_dev), attr))
+
+#define IN_DEV_ORCONF(in_dev, attr) \
+       IN_DEV_NET_ORCONF(in_dev, dev_net(in_dev->dev), attr)
+
 #define IN_DEV_MAXCONF(in_dev, attr) \
        (max(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr), \
             IN_DEV_CONF_GET((in_dev), attr)))
@@ -133,6 +138,8 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
                                        IN_DEV_ORCONF((in_dev), \
                                                      PROMOTE_SECONDARIES)
 #define IN_DEV_ROUTE_LOCALNET(in_dev)  IN_DEV_ORCONF(in_dev, ROUTE_LOCALNET)
+#define IN_DEV_NET_ROUTE_LOCALNET(in_dev, net) \
+       IN_DEV_NET_ORCONF(in_dev, net, ROUTE_LOCALNET)
 
 #define IN_DEV_RX_REDIRECTS(in_dev) \
        ((IN_DEV_FORWARD(in_dev) && \
index bf22b03179022da13565642026213601e879fa88..48af63c9a48d25f5f6d9e2681e6eeeec5a4fef8c 100644 (file)
@@ -31,4 +31,21 @@ struct ip6_tnl_parm {
        struct in6_addr raddr;  /* remote tunnel end-point address */
 };
 
+struct ip6_tnl_parm2 {
+       char name[IFNAMSIZ];    /* name of tunnel device */
+       int link;               /* ifindex of underlying L2 interface */
+       __u8 proto;             /* tunnel protocol */
+       __u8 encap_limit;       /* encapsulation limit for tunnel */
+       __u8 hop_limit;         /* hop limit for tunnel */
+       __be32 flowinfo;        /* traffic class and flowlabel for tunnel */
+       __u32 flags;            /* tunnel flags */
+       struct in6_addr laddr;  /* local tunnel end-point address */
+       struct in6_addr raddr;  /* remote tunnel end-point address */
+
+       __be16                  i_flags;
+       __be16                  o_flags;
+       __be32                  i_key;
+       __be32                  o_key;
+};
+
 #endif
index 879db26ec4013297fb76f16143b87ec9db52b5e4..0b94e91ed68529ef74573b4c170eb96f5afc8175 100644 (file)
@@ -256,6 +256,7 @@ struct inet6_skb_parm {
 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
        __u16                   dsthao;
 #endif
+       __u16                   frag_max_size;
 
 #define IP6SKB_XFRM_TRANSFORMED        1
 #define IP6SKB_FORWARDED       2
index 82680541576d2f2d307ba9bfb1847681f290f49b..05e3c2c7a8cf81e2184f49a531a8158fa45639b4 100644 (file)
@@ -312,7 +312,13 @@ extern void jiffies_to_timespec(const unsigned long jiffies,
 extern unsigned long timeval_to_jiffies(const struct timeval *value);
 extern void jiffies_to_timeval(const unsigned long jiffies,
                               struct timeval *value);
+
 extern clock_t jiffies_to_clock_t(unsigned long x);
+static inline clock_t jiffies_delta_to_clock_t(long delta)
+{
+       return jiffies_to_clock_t(max(0L, delta));
+}
+
 extern unsigned long clock_t_to_jiffies(unsigned long x);
 extern u64 jiffies_64_to_clock_t(u64 x);
 extern u64 nsec_to_clock_t(u64 x);
index 7cccafe50e7bd6629b10623240fb6b08e79b088c..6c406845f7e29d166667ebc1587fe41eb5cb2448 100644 (file)
@@ -377,5 +377,88 @@ static inline void mdio45_ethtool_gset(const struct mdio_if_info *mdio,
 extern int mdio_mii_ioctl(const struct mdio_if_info *mdio,
                          struct mii_ioctl_data *mii_data, int cmd);
 
+/**
+ * mmd_eee_cap_to_ethtool_sup_t
+ * @eee_cap: value of the MMD EEE Capability register
+ *
+ * A small helper function that translates MMD EEE Capability (3.20) bits
+ * to ethtool supported settings.
+ */
+static inline u32 mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap)
+{
+       u32 supported = 0;
+
+       if (eee_cap & MDIO_EEE_100TX)
+               supported |= SUPPORTED_100baseT_Full;
+       if (eee_cap & MDIO_EEE_1000T)
+               supported |= SUPPORTED_1000baseT_Full;
+       if (eee_cap & MDIO_EEE_10GT)
+               supported |= SUPPORTED_10000baseT_Full;
+       if (eee_cap & MDIO_EEE_1000KX)
+               supported |= SUPPORTED_1000baseKX_Full;
+       if (eee_cap & MDIO_EEE_10GKX4)
+               supported |= SUPPORTED_10000baseKX4_Full;
+       if (eee_cap & MDIO_EEE_10GKR)
+               supported |= SUPPORTED_10000baseKR_Full;
+
+       return supported;
+}
+
+/**
+ * mmd_eee_adv_to_ethtool_adv_t
+ * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers
+ *
+ * A small helper function that translates the MMD EEE Advertisment (7.60)
+ * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement
+ * settings.
+ */
+static inline u32 mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv)
+{
+       u32 adv = 0;
+
+       if (eee_adv & MDIO_EEE_100TX)
+               adv |= ADVERTISED_100baseT_Full;
+       if (eee_adv & MDIO_EEE_1000T)
+               adv |= ADVERTISED_1000baseT_Full;
+       if (eee_adv & MDIO_EEE_10GT)
+               adv |= ADVERTISED_10000baseT_Full;
+       if (eee_adv & MDIO_EEE_1000KX)
+               adv |= ADVERTISED_1000baseKX_Full;
+       if (eee_adv & MDIO_EEE_10GKX4)
+               adv |= ADVERTISED_10000baseKX4_Full;
+       if (eee_adv & MDIO_EEE_10GKR)
+               adv |= ADVERTISED_10000baseKR_Full;
+
+       return adv;
+}
+
+/**
+ * ethtool_adv_to_mmd_eee_adv_t
+ * @adv: the ethtool advertisement settings
+ *
+ * A small helper function that translates ethtool advertisement settings
+ * to EEE advertisements for the MMD EEE Advertisement (7.60) and
+ * MMD EEE Link Partner Ability (7.61) registers.
+ */
+static inline u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv)
+{
+       u16 reg = 0;
+
+       if (adv & ADVERTISED_100baseT_Full)
+               reg |= MDIO_EEE_100TX;
+       if (adv & ADVERTISED_1000baseT_Full)
+               reg |= MDIO_EEE_1000T;
+       if (adv & ADVERTISED_10000baseT_Full)
+               reg |= MDIO_EEE_10GT;
+       if (adv & ADVERTISED_1000baseKX_Full)
+               reg |= MDIO_EEE_1000KX;
+       if (adv & ADVERTISED_10000baseKX4_Full)
+               reg |= MDIO_EEE_10GKX4;
+       if (adv & ADVERTISED_10000baseKR_Full)
+               reg |= MDIO_EEE_10GKR;
+
+       return reg;
+}
+
 #endif /* __KERNEL__ */
 #endif /* __LINUX_MDIO_H__ */
index 5f49cc0a107e2f75eafbedffd152f03565e16a15..01646aa53b0e6c30be6de8afb406516a2f4b69a5 100644 (file)
@@ -338,18 +338,16 @@ struct napi_struct {
 
        unsigned long           state;
        int                     weight;
+       unsigned int            gro_count;
        int                     (*poll)(struct napi_struct *, int);
 #ifdef CONFIG_NETPOLL
        spinlock_t              poll_lock;
        int                     poll_owner;
 #endif
-
-       unsigned int            gro_count;
-
        struct net_device       *dev;
-       struct list_head        dev_list;
        struct sk_buff          *gro_list;
        struct sk_buff          *skb;
+       struct list_head        dev_list;
 };
 
 enum {
@@ -906,11 +904,12 @@ struct netdev_fcoe_hbainfo {
  *     feature set might be less than what was returned by ndo_fix_features()).
  *     Must return >0 or -errno if it changed dev->features itself.
  *
- * int (*ndo_fdb_add)(struct ndmsg *ndm, struct net_device *dev,
- *                   unsigned char *addr, u16 flags)
+ * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
+ *                   struct net_device *dev,
+ *                   const unsigned char *addr, u16 flags)
  *     Adds an FDB entry to dev for addr.
  * int (*ndo_fdb_del)(struct ndmsg *ndm, struct net_device *dev,
- *                   unsigned char *addr)
+ *                   const unsigned char *addr)
  *     Deletes the FDB entry from dev coresponding to addr.
  * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
  *                    struct net_device *dev, int idx)
@@ -1016,12 +1015,13 @@ struct net_device_ops {
        void                    (*ndo_neigh_destroy)(struct neighbour *n);
 
        int                     (*ndo_fdb_add)(struct ndmsg *ndm,
+                                              struct nlattr *tb[],
                                               struct net_device *dev,
-                                              unsigned char *addr,
+                                              const unsigned char *addr,
                                               u16 flags);
        int                     (*ndo_fdb_del)(struct ndmsg *ndm,
                                               struct net_device *dev,
-                                              unsigned char *addr);
+                                              const unsigned char *addr);
        int                     (*ndo_fdb_dump)(struct sk_buff *skb,
                                                struct netlink_callback *cb,
                                                struct net_device *dev,
@@ -1322,6 +1322,8 @@ struct net_device {
        /* phy device may attach itself for hardware timestamping */
        struct phy_device *phydev;
 
+       struct lock_class_key *qdisc_tx_busylock;
+
        /* group the device belongs to */
        int group;
 
@@ -1401,6 +1403,9 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
                f(dev, &dev->_tx[i], arg);
 }
 
+extern struct netdev_queue *netdev_pick_tx(struct net_device *dev,
+                                          struct sk_buff *skb);
+
 /*
  * Net namespace inlines
  */
@@ -1553,7 +1558,7 @@ struct packet_type {
 #define NETDEV_PRE_TYPE_CHANGE 0x000E
 #define NETDEV_POST_TYPE_CHANGE        0x000F
 #define NETDEV_POST_INIT       0x0010
-#define NETDEV_UNREGISTER_BATCH 0x0011
+#define NETDEV_UNREGISTER_FINAL 0x0011
 #define NETDEV_RELEASE         0x0012
 #define NETDEV_NOTIFY_PEERS    0x0013
 #define NETDEV_JOIN            0x0014
@@ -2227,6 +2232,7 @@ static inline void dev_hold(struct net_device *dev)
  * kind of lower layer not just hardware media.
  */
 
+extern void linkwatch_init_dev(struct net_device *dev);
 extern void linkwatch_fire_event(struct net_device *dev);
 extern void linkwatch_forget_dev(struct net_device *dev);
 
@@ -2249,8 +2255,6 @@ extern void netif_carrier_on(struct net_device *dev);
 
 extern void netif_carrier_off(struct net_device *dev);
 
-extern void netif_notify_peers(struct net_device *dev);
-
 /**
  *     netif_dormant_on - mark device as dormant.
  *     @dev: network device
@@ -2560,9 +2564,9 @@ extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
 extern void __hw_addr_init(struct netdev_hw_addr_list *list);
 
 /* Functions used for device addresses handling */
-extern int dev_addr_add(struct net_device *dev, unsigned char *addr,
+extern int dev_addr_add(struct net_device *dev, const unsigned char *addr,
                        unsigned char addr_type);
-extern int dev_addr_del(struct net_device *dev, unsigned char *addr,
+extern int dev_addr_del(struct net_device *dev, const unsigned char *addr,
                        unsigned char addr_type);
 extern int dev_addr_add_multiple(struct net_device *to_dev,
                                 struct net_device *from_dev,
@@ -2574,20 +2578,20 @@ extern void dev_addr_flush(struct net_device *dev);
 extern int dev_addr_init(struct net_device *dev);
 
 /* Functions used for unicast addresses handling */
-extern int dev_uc_add(struct net_device *dev, unsigned char *addr);
-extern int dev_uc_add_excl(struct net_device *dev, unsigned char *addr);
-extern int dev_uc_del(struct net_device *dev, unsigned char *addr);
+extern int dev_uc_add(struct net_device *dev, const unsigned char *addr);
+extern int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
+extern int dev_uc_del(struct net_device *dev, const unsigned char *addr);
 extern int dev_uc_sync(struct net_device *to, struct net_device *from);
 extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
 extern void dev_uc_flush(struct net_device *dev);
 extern void dev_uc_init(struct net_device *dev);
 
 /* Functions used for multicast addresses handling */
-extern int dev_mc_add(struct net_device *dev, unsigned char *addr);
-extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr);
-extern int dev_mc_add_excl(struct net_device *dev, unsigned char *addr);
-extern int dev_mc_del(struct net_device *dev, unsigned char *addr);
-extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr);
+extern int dev_mc_add(struct net_device *dev, const unsigned char *addr);
+extern int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
+extern int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
+extern int dev_mc_del(struct net_device *dev, const unsigned char *addr);
+extern int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
 extern int dev_mc_sync(struct net_device *to, struct net_device *from);
 extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
 extern void dev_mc_flush(struct net_device *dev);
@@ -2599,8 +2603,7 @@ extern void               __dev_set_rx_mode(struct net_device *dev);
 extern int             dev_set_promiscuity(struct net_device *dev, int inc);
 extern int             dev_set_allmulti(struct net_device *dev, int inc);
 extern void            netdev_state_change(struct net_device *dev);
-extern int             netdev_bonding_change(struct net_device *dev,
-                                             unsigned long event);
+extern void            netdev_notify_peers(struct net_device *dev);
 extern void            netdev_features_change(struct net_device *dev);
 /* Load a device via the kmod */
 extern void            dev_load(struct net *net, const char *name);
index c613cf0d7884f8d3f4cf85c188a17c04f8f5e054..1dcf2a38e51f69770395db885ab2aebc39558abe 100644 (file)
@@ -342,7 +342,7 @@ extern int nf_register_afinfo(const struct nf_afinfo *afinfo);
 extern void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
 
 #include <net/flow.h>
-extern void (*ip_nat_decode_session)(struct sk_buff *, struct flowi *);
+extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
 
 static inline void
 nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
@@ -350,13 +350,11 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
 #ifdef CONFIG_NF_NAT_NEEDED
        void (*decodefn)(struct sk_buff *, struct flowi *);
 
-       if (family == AF_INET) {
-               rcu_read_lock();
-               decodefn = rcu_dereference(ip_nat_decode_session);
-               if (decodefn)
-                       decodefn(skb, fl);
-               rcu_read_unlock();
-       }
+       rcu_read_lock();
+       decodefn = rcu_dereference(nf_nat_decode_session_hook);
+       if (decodefn)
+               decodefn(skb, fl);
+       rcu_read_unlock();
 #endif
 }
 
index 2edc64cab7395afa707edc7cca024a1b27b19e09..528697b3c1524d1e7e071986231e2d14979018c2 100644 (file)
@@ -190,6 +190,7 @@ enum ip_set_dim {
         * If changed, new revision of iptables match/target is required.
         */
        IPSET_DIM_MAX = 6,
+       IPSET_BIT_RETURN_NOMATCH = 7,
 };
 
 /* Option flags for kernel operations */
@@ -198,6 +199,7 @@ enum ip_set_kopt {
        IPSET_DIM_ONE_SRC = (1 << IPSET_DIM_ONE),
        IPSET_DIM_TWO_SRC = (1 << IPSET_DIM_TWO),
        IPSET_DIM_THREE_SRC = (1 << IPSET_DIM_THREE),
+       IPSET_RETURN_NOMATCH = (1 << IPSET_BIT_RETURN_NOMATCH),
 };
 
 #ifdef __KERNEL__
@@ -206,9 +208,15 @@ enum ip_set_kopt {
 #include <linux/netlink.h>
 #include <linux/netfilter.h>
 #include <linux/netfilter/x_tables.h>
+#include <linux/stringify.h>
 #include <linux/vmalloc.h>
 #include <net/netlink.h>
 
+#define _IP_SET_MODULE_DESC(a, b, c)           \
+       MODULE_DESCRIPTION(a " type of IP sets, revisions " b "-" c)
+#define IP_SET_MODULE_DESC(a, b, c)            \
+       _IP_SET_MODULE_DESC(a, __stringify(b), __stringify(c))
+
 /* Set features */
 enum ip_set_feature {
        IPSET_TYPE_IP_FLAG = 0,
@@ -223,6 +231,8 @@ enum ip_set_feature {
        IPSET_TYPE_NAME = (1 << IPSET_TYPE_NAME_FLAG),
        IPSET_TYPE_IFACE_FLAG = 5,
        IPSET_TYPE_IFACE = (1 << IPSET_TYPE_IFACE_FLAG),
+       IPSET_TYPE_NOMATCH_FLAG = 6,
+       IPSET_TYPE_NOMATCH = (1 << IPSET_TYPE_NOMATCH_FLAG),
        /* Strictly speaking not a feature, but a flag for dumping:
         * this settype must be dumped last */
        IPSET_DUMP_LAST_FLAG = 7,
@@ -249,7 +259,7 @@ struct ip_set_type_variant {
         *              returns negative error code,
         *                      zero for no match/success to add/delete
         *                      positive for matching element */
-       int (*kadt)(struct ip_set *set, const struct sk_buff * skb,
+       int (*kadt)(struct ip_set *set, const struct sk_buff *skb,
                    const struct xt_action_param *par,
                    enum ipset_adt adt, const struct ip_set_adt_opt *opt);
 
@@ -424,7 +434,8 @@ static inline int nla_put_ipaddr4(struct sk_buff *skb, int type, __be32 ipaddr)
        return ret;
 }
 
-static inline int nla_put_ipaddr6(struct sk_buff *skb, int type, const struct in6_addr *ipaddrptr)
+static inline int nla_put_ipaddr6(struct sk_buff *skb, int type,
+                                 const struct in6_addr *ipaddrptr)
 {
        struct nlattr *__nested = ipset_nest_start(skb, type);
        int ret;
index b114d35aea5e652c90b13ef94e1ef00f6d864c90..ef9acd3c84506fcd3525166501387445181e92c0 100644 (file)
@@ -137,50 +137,59 @@ htable_bits(u32 hashsize)
 #endif
 
 #define SET_HOST_MASK(family)  (family == AF_INET ? 32 : 128)
+#ifdef IP_SET_HASH_WITH_MULTI
+#define NETS_LENGTH(family)    (SET_HOST_MASK(family) + 1)
+#else
+#define NETS_LENGTH(family)    SET_HOST_MASK(family)
+#endif
 
 /* Network cidr size book keeping when the hash stores different
  * sized networks */
 static void
-add_cidr(struct ip_set_hash *h, u8 cidr, u8 host_mask)
+add_cidr(struct ip_set_hash *h, u8 cidr, u8 nets_length)
 {
-       u8 i;
-
-       ++h->nets[cidr-1].nets;
-
-       pr_debug("add_cidr added %u: %u\n", cidr, h->nets[cidr-1].nets);
+       int i, j;
 
-       if (h->nets[cidr-1].nets > 1)
-               return;
-
-       /* New cidr size */
-       for (i = 0; i < host_mask && h->nets[i].cidr; i++) {
-               /* Add in increasing prefix order, so larger cidr first */
-               if (h->nets[i].cidr < cidr)
-                       swap(h->nets[i].cidr, cidr);
+       /* Add in increasing prefix order, so larger cidr first */
+       for (i = 0, j = -1; i < nets_length && h->nets[i].nets; i++) {
+               if (j != -1)
+                       continue;
+               else if (h->nets[i].cidr < cidr)
+                       j = i;
+               else if (h->nets[i].cidr == cidr) {
+                       h->nets[i].nets++;
+                       return;
+               }
+       }
+       if (j != -1) {
+               for (; i > j; i--) {
+                       h->nets[i].cidr = h->nets[i - 1].cidr;
+                       h->nets[i].nets = h->nets[i - 1].nets;
+               }
        }
-       if (i < host_mask)
-               h->nets[i].cidr = cidr;
+       h->nets[i].cidr = cidr;
+       h->nets[i].nets = 1;
 }
 
 static void
-del_cidr(struct ip_set_hash *h, u8 cidr, u8 host_mask)
+del_cidr(struct ip_set_hash *h, u8 cidr, u8 nets_length)
 {
-       u8 i;
-
-       --h->nets[cidr-1].nets;
+       u8 i, j;
 
-       pr_debug("del_cidr deleted %u: %u\n", cidr, h->nets[cidr-1].nets);
+       for (i = 0; i < nets_length - 1 && h->nets[i].cidr != cidr; i++)
+               ;
+       h->nets[i].nets--;
 
-       if (h->nets[cidr-1].nets != 0)
+       if (h->nets[i].nets != 0)
                return;
 
-       /* All entries with this cidr size deleted, so cleanup h->cidr[] */
-       for (i = 0; i < host_mask - 1 && h->nets[i].cidr; i++) {
-               if (h->nets[i].cidr == cidr)
-                       h->nets[i].cidr = cidr = h->nets[i+1].cidr;
+       for (j = i; j < nets_length - 1 && h->nets[j].nets; j++) {
+               h->nets[j].cidr = h->nets[j + 1].cidr;
+               h->nets[j].nets = h->nets[j + 1].nets;
        }
-       h->nets[i - 1].cidr = 0;
 }
+#else
+#define NETS_LENGTH(family)            0
 #endif
 
 /* Destroy the hashtable part of the set */
@@ -202,14 +211,14 @@ ahash_destroy(struct htable *t)
 
 /* Calculate the actual memory size of the set data */
 static size_t
-ahash_memsize(const struct ip_set_hash *h, size_t dsize, u8 host_mask)
+ahash_memsize(const struct ip_set_hash *h, size_t dsize, u8 nets_length)
 {
        u32 i;
        struct htable *t = h->table;
        size_t memsize = sizeof(*h)
                         + sizeof(*t)
 #ifdef IP_SET_HASH_WITH_NETS
-                        + sizeof(struct ip_set_hash_nets) * host_mask
+                        + sizeof(struct ip_set_hash_nets) * nets_length
 #endif
                         + jhash_size(t->htable_bits) * sizeof(struct hbucket);
 
@@ -238,7 +247,7 @@ ip_set_hash_flush(struct ip_set *set)
        }
 #ifdef IP_SET_HASH_WITH_NETS
        memset(h->nets, 0, sizeof(struct ip_set_hash_nets)
-                          * SET_HOST_MASK(set->family));
+                          * NETS_LENGTH(set->family));
 #endif
        h->elements = 0;
 }
@@ -271,9 +280,6 @@ ip_set_hash_destroy(struct ip_set *set)
 (jhash2((u32 *)(data), HKEY_DATALEN/sizeof(u32), initval)      \
        & jhash_mask(htable_bits))
 
-#define CONCAT(a, b, c)                a##b##c
-#define TOKEN(a, b, c)         CONCAT(a, b, c)
-
 /* Type/family dependent function prototypes */
 
 #define type_pf_data_equal     TOKEN(TYPE, PF, _data_equal)
@@ -478,7 +484,7 @@ type_pf_add(struct ip_set *set, void *value, u32 timeout, u32 flags)
        }
 
 #ifdef IP_SET_HASH_WITH_NETS
-       add_cidr(h, CIDR(d->cidr), HOST_MASK);
+       add_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
 #endif
        h->elements++;
 out:
@@ -513,7 +519,7 @@ type_pf_del(struct ip_set *set, void *value, u32 timeout, u32 flags)
                n->pos--;
                h->elements--;
 #ifdef IP_SET_HASH_WITH_NETS
-               del_cidr(h, CIDR(d->cidr), HOST_MASK);
+               del_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
 #endif
                if (n->pos + AHASH_INIT_SIZE < n->size) {
                        void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
@@ -546,10 +552,10 @@ type_pf_test_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout)
        const struct type_pf_elem *data;
        int i, j = 0;
        u32 key, multi = 0;
-       u8 host_mask = SET_HOST_MASK(set->family);
+       u8 nets_length = NETS_LENGTH(set->family);
 
        pr_debug("test by nets\n");
-       for (; j < host_mask && h->nets[j].cidr && !multi; j++) {
+       for (; j < nets_length && h->nets[j].nets && !multi; j++) {
                type_pf_data_netmask(d, h->nets[j].cidr);
                key = HKEY(d, h->initval, t->htable_bits);
                n = hbucket(t, key);
@@ -604,7 +610,7 @@ type_pf_head(struct ip_set *set, struct sk_buff *skb)
        memsize = ahash_memsize(h, with_timeout(h->timeout)
                                        ? sizeof(struct type_pf_telem)
                                        : sizeof(struct type_pf_elem),
-                               set->family == AF_INET ? 32 : 128);
+                               NETS_LENGTH(set->family));
        read_unlock_bh(&set->lock);
 
        nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
@@ -690,7 +696,7 @@ nla_put_failure:
 }
 
 static int
-type_pf_kadt(struct ip_set *set, const struct sk_buff * skb,
+type_pf_kadt(struct ip_set *set, const struct sk_buff *skb,
             const struct xt_action_param *par,
             enum ipset_adt adt, const struct ip_set_adt_opt *opt);
 static int
@@ -783,7 +789,7 @@ type_pf_elem_tadd(struct hbucket *n, const struct type_pf_elem *value,
 
 /* Delete expired elements from the hashtable */
 static void
-type_pf_expire(struct ip_set_hash *h)
+type_pf_expire(struct ip_set_hash *h, u8 nets_length)
 {
        struct htable *t = h->table;
        struct hbucket *n;
@@ -798,7 +804,7 @@ type_pf_expire(struct ip_set_hash *h)
                        if (type_pf_data_expired(data)) {
                                pr_debug("expired %u/%u\n", i, j);
 #ifdef IP_SET_HASH_WITH_NETS
-                               del_cidr(h, CIDR(data->cidr), HOST_MASK);
+                               del_cidr(h, CIDR(data->cidr), nets_length);
 #endif
                                if (j != n->pos - 1)
                                        /* Not last one */
@@ -839,7 +845,7 @@ type_pf_tresize(struct ip_set *set, bool retried)
        if (!retried) {
                i = h->elements;
                write_lock_bh(&set->lock);
-               type_pf_expire(set->data);
+               type_pf_expire(set->data, NETS_LENGTH(set->family));
                write_unlock_bh(&set->lock);
                if (h->elements <  i)
                        return 0;
@@ -904,7 +910,7 @@ type_pf_tadd(struct ip_set *set, void *value, u32 timeout, u32 flags)
 
        if (h->elements >= h->maxelem)
                /* FIXME: when set is full, we slow down here */
-               type_pf_expire(h);
+               type_pf_expire(h, NETS_LENGTH(set->family));
        if (h->elements >= h->maxelem) {
                if (net_ratelimit())
                        pr_warning("Set %s is full, maxelem %u reached\n",
@@ -933,8 +939,8 @@ type_pf_tadd(struct ip_set *set, void *value, u32 timeout, u32 flags)
        if (j != AHASH_MAX(h) + 1) {
                data = ahash_tdata(n, j);
 #ifdef IP_SET_HASH_WITH_NETS
-               del_cidr(h, CIDR(data->cidr), HOST_MASK);
-               add_cidr(h, CIDR(d->cidr), HOST_MASK);
+               del_cidr(h, CIDR(data->cidr), NETS_LENGTH(set->family));
+               add_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
 #endif
                type_pf_data_copy(data, d);
                type_pf_data_timeout_set(data, timeout);
@@ -952,7 +958,7 @@ type_pf_tadd(struct ip_set *set, void *value, u32 timeout, u32 flags)
        }
 
 #ifdef IP_SET_HASH_WITH_NETS
-       add_cidr(h, CIDR(d->cidr), HOST_MASK);
+       add_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
 #endif
        h->elements++;
 out:
@@ -986,7 +992,7 @@ type_pf_tdel(struct ip_set *set, void *value, u32 timeout, u32 flags)
                n->pos--;
                h->elements--;
 #ifdef IP_SET_HASH_WITH_NETS
-               del_cidr(h, CIDR(d->cidr), HOST_MASK);
+               del_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
 #endif
                if (n->pos + AHASH_INIT_SIZE < n->size) {
                        void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
@@ -1016,9 +1022,9 @@ type_pf_ttest_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout)
        struct hbucket *n;
        int i, j = 0;
        u32 key, multi = 0;
-       u8 host_mask = SET_HOST_MASK(set->family);
+       u8 nets_length = NETS_LENGTH(set->family);
 
-       for (; j < host_mask && h->nets[j].cidr && !multi; j++) {
+       for (; j < nets_length && h->nets[j].nets && !multi; j++) {
                type_pf_data_netmask(d, h->nets[j].cidr);
                key = HKEY(d, h->initval, t->htable_bits);
                n = hbucket(t, key);
@@ -1147,7 +1153,7 @@ type_pf_gc(unsigned long ul_set)
 
        pr_debug("called\n");
        write_lock_bh(&set->lock);
-       type_pf_expire(h);
+       type_pf_expire(h, NETS_LENGTH(set->family));
        write_unlock_bh(&set->lock);
 
        h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ;
index 0bb5a6976bf380fc3e2686f1014960f1ae44eb1f..4b59a15849592333cc09f107c3407f712a5360f3 100644 (file)
@@ -4,6 +4,7 @@
 
 extern unsigned int (*nf_nat_amanda_hook)(struct sk_buff *skb,
                                          enum ip_conntrack_info ctinfo,
+                                         unsigned int protoff,
                                          unsigned int matchoff,
                                          unsigned int matchlen,
                                          struct nf_conntrack_expect *exp);
index 3e3aa08980c31f7bac7aca80ff3d473c25f4bdcc..8faf3f792d13034179d64b82e9ecc57014c6c3bb 100644 (file)
@@ -18,13 +18,17 @@ enum nf_ct_ftp_type {
 
 #define FTP_PORT       21
 
+#define NF_CT_FTP_SEQ_PICKUP   (1 << 0)
+
 #define NUM_SEQ_TO_REMEMBER 2
 /* This structure exists only once per master */
 struct nf_ct_ftp_master {
        /* Valid seq positions for cmd matching after newline */
        u_int32_t seq_aft_nl[IP_CT_DIR_MAX][NUM_SEQ_TO_REMEMBER];
        /* 0 means seq_match_aft_nl not set */
-       int seq_aft_nl_num[IP_CT_DIR_MAX];
+       u_int16_t seq_aft_nl_num[IP_CT_DIR_MAX];
+       /* pickup sequence tracking, useful for conntrackd */
+       u_int16_t flags[IP_CT_DIR_MAX];
 };
 
 struct nf_conntrack_expect;
@@ -34,6 +38,7 @@ struct nf_conntrack_expect;
 extern unsigned int (*nf_nat_ftp_hook)(struct sk_buff *skb,
                                       enum ip_conntrack_info ctinfo,
                                       enum nf_ct_ftp_type type,
+                                      unsigned int protoff,
                                       unsigned int matchoff,
                                       unsigned int matchlen,
                                       struct nf_conntrack_expect *exp);
index 26f9226ea72b18ff379e2d553ccdc00112c95617..f381020eee92835fa68ae19858e1a4ca0437138c 100644 (file)
@@ -36,12 +36,12 @@ extern void nf_conntrack_h245_expect(struct nf_conn *new,
                                     struct nf_conntrack_expect *this);
 extern void nf_conntrack_q931_expect(struct nf_conn *new,
                                     struct nf_conntrack_expect *this);
-extern int (*set_h245_addr_hook) (struct sk_buff *skb,
+extern int (*set_h245_addr_hook) (struct sk_buff *skb, unsigned int protoff,
                                  unsigned char **data, int dataoff,
                                  H245_TransportAddress *taddr,
                                  union nf_inet_addr *addr,
                                  __be16 port);
-extern int (*set_h225_addr_hook) (struct sk_buff *skb,
+extern int (*set_h225_addr_hook) (struct sk_buff *skb, unsigned int protoff,
                                  unsigned char **data, int dataoff,
                                  TransportAddress *taddr,
                                  union nf_inet_addr *addr,
@@ -49,40 +49,45 @@ extern int (*set_h225_addr_hook) (struct sk_buff *skb,
 extern int (*set_sig_addr_hook) (struct sk_buff *skb,
                                 struct nf_conn *ct,
                                 enum ip_conntrack_info ctinfo,
-                                unsigned char **data,
+                                unsigned int protoff, unsigned char **data,
                                 TransportAddress *taddr, int count);
 extern int (*set_ras_addr_hook) (struct sk_buff *skb,
                                 struct nf_conn *ct,
                                 enum ip_conntrack_info ctinfo,
-                                unsigned char **data,
+                                unsigned int protoff, unsigned char **data,
                                 TransportAddress *taddr, int count);
 extern int (*nat_rtp_rtcp_hook) (struct sk_buff *skb,
                                 struct nf_conn *ct,
                                 enum ip_conntrack_info ctinfo,
-                                unsigned char **data, int dataoff,
+                                unsigned int protoff, unsigned char **data,
+                                int dataoff,
                                 H245_TransportAddress *taddr,
                                 __be16 port, __be16 rtp_port,
                                 struct nf_conntrack_expect *rtp_exp,
                                 struct nf_conntrack_expect *rtcp_exp);
 extern int (*nat_t120_hook) (struct sk_buff *skb, struct nf_conn *ct,
                             enum ip_conntrack_info ctinfo,
+                            unsigned int protoff,
                             unsigned char **data, int dataoff,
                             H245_TransportAddress *taddr, __be16 port,
                             struct nf_conntrack_expect *exp);
 extern int (*nat_h245_hook) (struct sk_buff *skb, struct nf_conn *ct,
                             enum ip_conntrack_info ctinfo,
+                            unsigned int protoff,
                             unsigned char **data, int dataoff,
                             TransportAddress *taddr, __be16 port,
                             struct nf_conntrack_expect *exp);
 extern int (*nat_callforwarding_hook) (struct sk_buff *skb,
                                       struct nf_conn *ct,
                                       enum ip_conntrack_info ctinfo,
+                                      unsigned int protoff,
                                       unsigned char **data, int dataoff,
                                       TransportAddress *taddr,
                                       __be16 port,
                                       struct nf_conntrack_expect *exp);
 extern int (*nat_q931_hook) (struct sk_buff *skb, struct nf_conn *ct,
                             enum ip_conntrack_info ctinfo,
+                            unsigned int protoff,
                             unsigned char **data, TransportAddress *taddr,
                             int idx, __be16 port,
                             struct nf_conntrack_expect *exp);
index 36282bf71b63f3afb68fe11f467ed4ff95d7c5fd..4bb9bae671763d211a3f7efe9115db4965e5c85b 100644 (file)
@@ -7,6 +7,7 @@
 
 extern unsigned int (*nf_nat_irc_hook)(struct sk_buff *skb,
                                       enum ip_conntrack_info ctinfo,
+                                      unsigned int protoff,
                                       unsigned int matchoff,
                                       unsigned int matchlen,
                                       struct nf_conntrack_expect *exp);
index 3bbde0c3a8a62cae3a01bb060cf0ffddbf93287f..2ab2830316b730d3ca9f29572500b43371aad641 100644 (file)
@@ -303,12 +303,14 @@ struct nf_conntrack_expect;
 extern int
 (*nf_nat_pptp_hook_outbound)(struct sk_buff *skb,
                             struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+                            unsigned int protoff,
                             struct PptpControlHeader *ctlh,
                             union pptp_ctrl_union *pptpReq);
 
 extern int
 (*nf_nat_pptp_hook_inbound)(struct sk_buff *skb,
                            struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+                           unsigned int protoff,
                            struct PptpControlHeader *ctlh,
                            union pptp_ctrl_union *pptpReq);
 
index 89f2a627f3f086febbcf1a93ab0fe9f42b6eac30..387bdd02945d13555d79e0becd9cb0bc1fb5737f 100644 (file)
@@ -37,10 +37,12 @@ struct sdp_media_type {
 struct sip_handler {
        const char      *method;
        unsigned int    len;
-       int             (*request)(struct sk_buff *skb, unsigned int dataoff,
+       int             (*request)(struct sk_buff *skb, unsigned int protoff,
+                                  unsigned int dataoff,
                                   const char **dptr, unsigned int *datalen,
                                   unsigned int cseq);
-       int             (*response)(struct sk_buff *skb, unsigned int dataoff,
+       int             (*response)(struct sk_buff *skb, unsigned int protoff,
+                                   unsigned int dataoff,
                                    const char **dptr, unsigned int *datalen,
                                    unsigned int cseq, unsigned int code);
 };
@@ -97,19 +99,20 @@ enum sip_header_types {
 enum sdp_header_types {
        SDP_HDR_UNSPEC,
        SDP_HDR_VERSION,
-       SDP_HDR_OWNER_IP4,
-       SDP_HDR_CONNECTION_IP4,
-       SDP_HDR_OWNER_IP6,
-       SDP_HDR_CONNECTION_IP6,
+       SDP_HDR_OWNER,
+       SDP_HDR_CONNECTION,
        SDP_HDR_MEDIA,
 };
 
 extern unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb,
+                                      unsigned int protoff,
                                       unsigned int dataoff,
                                       const char **dptr,
                                       unsigned int *datalen);
-extern void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb, s16 off);
+extern void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb,
+                                         unsigned int protoff, s16 off);
 extern unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
+                                             unsigned int protoff,
                                              unsigned int dataoff,
                                              const char **dptr,
                                              unsigned int *datalen,
@@ -117,6 +120,7 @@ extern unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
                                              unsigned int matchoff,
                                              unsigned int matchlen);
 extern unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb,
+                                           unsigned int protoff,
                                            unsigned int dataoff,
                                            const char **dptr,
                                            unsigned int *datalen,
@@ -125,6 +129,7 @@ extern unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb,
                                            enum sdp_header_types term,
                                            const union nf_inet_addr *addr);
 extern unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb,
+                                           unsigned int protoff,
                                            unsigned int dataoff,
                                            const char **dptr,
                                            unsigned int *datalen,
@@ -132,12 +137,14 @@ extern unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb,
                                            unsigned int matchlen,
                                            u_int16_t port);
 extern unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb,
+                                              unsigned int protoff,
                                               unsigned int dataoff,
                                               const char **dptr,
                                               unsigned int *datalen,
                                               unsigned int sdpoff,
                                               const union nf_inet_addr *addr);
 extern unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb,
+                                            unsigned int protoff,
                                             unsigned int dataoff,
                                             const char **dptr,
                                             unsigned int *datalen,
index 8df2d13730b2b75fff7262c0cf144e3a13c5f84f..bf0cc373ffb6ae02487e74ba478c024c3be3b477 100644 (file)
@@ -22,4 +22,12 @@ struct nf_nat_ipv4_multi_range_compat {
        struct nf_nat_ipv4_range        range[1];
 };
 
+struct nf_nat_range {
+       unsigned int                    flags;
+       union nf_inet_addr              min_addr;
+       union nf_inet_addr              max_addr;
+       union nf_conntrack_man_proto    min_proto;
+       union nf_conntrack_man_proto    max_proto;
+};
+
 #endif /* _NETFILTER_NF_NAT_H */
index f649f7423ca2d97210b55a8756e33a7a7fc3eeb7..43bfe3e1685b21d7dcf5bbb254a269ec941e5523 100644 (file)
@@ -142,9 +142,13 @@ enum ctattr_tstamp {
 
 enum ctattr_nat {
        CTA_NAT_UNSPEC,
-       CTA_NAT_MINIP,
-       CTA_NAT_MAXIP,
+       CTA_NAT_V4_MINIP,
+#define CTA_NAT_MINIP CTA_NAT_V4_MINIP
+       CTA_NAT_V4_MAXIP,
+#define CTA_NAT_MAXIP CTA_NAT_V4_MAXIP
        CTA_NAT_PROTO,
+       CTA_NAT_V6_MINIP,
+       CTA_NAT_V6_MAXIP,
        __CTA_NAT_MAX
 };
 #define CTA_NAT_MAX (__CTA_NAT_MAX - 1)
index 3b1c1360aedfffb906c0b90c52dfc86a5a26db25..70ec8c2bc11a6c97cdf1866aa5e9b0743938feff 100644 (file)
@@ -44,6 +44,7 @@ enum nfqnl_attr_type {
        NFQA_PAYLOAD,                   /* opaque data payload */
        NFQA_CT,                        /* nf_conntrack_netlink.h */
        NFQA_CT_INFO,                   /* enum ip_conntrack_info */
+       NFQA_CAP_LEN,                   /* __u32 length of captured packet */
 
        __NFQA_MAX
 };
index 7c37fac576c440d34c650dbc352bcceded891808..0958860193963e3b4290c0e053932edc6274ea65 100644 (file)
@@ -17,6 +17,9 @@ enum {
        /* Match against local time (instead of UTC) */
        XT_TIME_LOCAL_TZ = 1 << 0,
 
+       /* treat timestart > timestop (e.g. 23:00-01:00) as single period */
+       XT_TIME_CONTIGUOUS = 1 << 1,
+
        /* Shortcuts */
        XT_TIME_ALL_MONTHDAYS = 0xFFFFFFFE,
        XT_TIME_ALL_WEEKDAYS  = 0xFE,
@@ -24,4 +27,6 @@ enum {
        XT_TIME_MAX_DAYTIME   = 24 * 60 * 60 - 1,
 };
 
+#define XT_TIME_ALL_FLAGS (XT_TIME_LOCAL_TZ|XT_TIME_CONTIGUOUS)
+
 #endif /* _XT_TIME_H */
index e2b12801378d8bd2e239a9516aea967328fe7ca4..b962dfc695ae0e5e5787627468bdff6911dd146d 100644 (file)
@@ -79,7 +79,6 @@ enum nf_ip_hook_priorities {
 
 #ifdef __KERNEL__
 extern int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type);
-extern int ip_xfrm_me_harder(struct sk_buff *skb);
 extern __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
                                   unsigned int dataoff, u_int8_t protocol);
 #endif /*__KERNEL__*/
index bd095bc075e9f832cc8ac659034e6040e17014f7..b88c0058bf73ad02a02229606cca38e6611652dc 100644 (file)
@@ -1,6 +1,7 @@
 header-y += ip6_tables.h
 header-y += ip6t_HL.h
 header-y += ip6t_LOG.h
+header-y += ip6t_NPT.h
 header-y += ip6t_REJECT.h
 header-y += ip6t_ah.h
 header-y += ip6t_frag.h
diff --git a/include/linux/netfilter_ipv6/ip6t_NPT.h b/include/linux/netfilter_ipv6/ip6t_NPT.h
new file mode 100644 (file)
index 0000000..f763355
--- /dev/null
@@ -0,0 +1,16 @@
+#ifndef __NETFILTER_IP6T_NPT
+#define __NETFILTER_IP6T_NPT
+
+#include <linux/types.h>
+#include <linux/netfilter.h>
+
+struct ip6t_npt_tginfo {
+       union nf_inet_addr      src_pfx;
+       union nf_inet_addr      dst_pfx;
+       __u8                    src_pfx_len;
+       __u8                    dst_pfx_len;
+       /* Used internally by the kernel */
+       __sum16                 adjustment;
+};
+
+#endif /* __NETFILTER_IP6T_NPT */
index c9fdde2bc73f422951da472cf1636da900a93ad2..f80c56ac4d82a822efe44f0dbbd87cfba819dd92 100644 (file)
@@ -153,6 +153,8 @@ struct nlattr {
 
 #include <linux/capability.h>
 #include <linux/skbuff.h>
+#include <linux/export.h>
+#include <net/scm.h>
 
 struct net;
 
@@ -162,8 +164,8 @@ static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
 }
 
 struct netlink_skb_parms {
-       struct ucred            creds;          /* Skb credentials      */
-       __u32                   pid;
+       struct scm_creds        creds;          /* Skb credentials      */
+       __u32                   portid;
        __u32                   dst_group;
        struct sock             *ssk;
 };
@@ -175,17 +177,27 @@ struct netlink_skb_parms {
 extern void netlink_table_grab(void);
 extern void netlink_table_ungrab(void);
 
+#define NL_CFG_F_NONROOT_RECV  (1 << 0)
+#define NL_CFG_F_NONROOT_SEND  (1 << 1)
+
 /* optional Netlink kernel configuration parameters */
 struct netlink_kernel_cfg {
        unsigned int    groups;
+       unsigned int    flags;
        void            (*input)(struct sk_buff *skb);
        struct mutex    *cb_mutex;
        void            (*bind)(int group);
 };
 
-extern struct sock *netlink_kernel_create(struct net *net, int unit,
-                                         struct module *module,
-                                         struct netlink_kernel_cfg *cfg);
+extern struct sock *__netlink_kernel_create(struct net *net, int unit,
+                                           struct module *module,
+                                           struct netlink_kernel_cfg *cfg);
+static inline struct sock *
+netlink_kernel_create(struct net *net, int unit, struct netlink_kernel_cfg *cfg)
+{
+       return __netlink_kernel_create(net, unit, THIS_MODULE, cfg);
+}
+
 extern void netlink_kernel_release(struct sock *sk);
 extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups);
 extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
@@ -193,14 +205,14 @@ extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group)
 extern void netlink_clear_multicast_users(struct sock *sk, unsigned int group);
 extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
 extern int netlink_has_listeners(struct sock *sk, unsigned int group);
-extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock);
-extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 pid,
+extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
+extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
                             __u32 group, gfp_t allocation);
 extern int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb,
-       __u32 pid, __u32 group, gfp_t allocation,
+       __u32 portid, __u32 group, gfp_t allocation,
        int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
        void *filter_data);
-extern int netlink_set_err(struct sock *ssk, __u32 pid, __u32 group, int code);
+extern int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code);
 extern int netlink_register_notifier(struct notifier_block *nb);
 extern int netlink_unregister_notifier(struct notifier_block *nb);
 
@@ -241,12 +253,12 @@ struct netlink_callback {
 
 struct netlink_notify {
        struct net *net;
-       int pid;
+       int portid;
        int protocol;
 };
 
 struct nlmsghdr *
-__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags);
+__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags);
 
 struct netlink_dump_control {
        int (*dump)(struct sk_buff *skb, struct netlink_callback *);
@@ -259,11 +271,6 @@ extern int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
                              const struct nlmsghdr *nlh,
                              struct netlink_dump_control *control);
 
-
-#define NL_NONROOT_RECV 0x1
-#define NL_NONROOT_SEND 0x2
-extern void netlink_set_nonroot(int protocol, unsigned flag);
-
 #endif /* __KERNEL__ */
 
 #endif /* __LINUX_NETLINK_H */
index 6189f27e305b551a3d47bbbc8d9935041917e246..d908d17da56d677e62a07011c852cc38cdf824cf 100644 (file)
@@ -183,4 +183,15 @@ struct sockaddr_nfc_llcp {
 
 #define NFC_HEADER_SIZE 1
 
+/**
+ * Pseudo-header info for raw socket packets
+ * First byte is the adapter index
+ * Second byte contains flags
+ *  - 0x01 - Direction (0=RX, 1=TX)
+ *  - 0x02-0x80 - Reserved
+ **/
+#define NFC_LLCP_RAW_HEADER_SIZE       2
+#define NFC_LLCP_DIRECTION_RX          0x00
+#define NFC_LLCP_DIRECTION_TX          0x01
+
 #endif /*__LINUX_NFC_H */
index 2f38788064032c9ca40bb59208ac3a0519acc4ec..7df9b500c80493e944591a2192b07253f1b42669 100644 (file)
  *     %NL80211_ATTR_IFINDEX is now on %NL80211_ATTR_WIPHY_FREQ with
  *     %NL80211_ATTR_WIPHY_CHANNEL_TYPE.
  *
+ * @NL80211_CMD_START_P2P_DEVICE: Start the given P2P Device, identified by
+ *     its %NL80211_ATTR_WDEV identifier. It must have been created with
+ *     %NL80211_CMD_NEW_INTERFACE previously. After it has been started, the
+ *     P2P Device can be used for P2P operations, e.g. remain-on-channel and
+ *     public action frame TX.
+ * @NL80211_CMD_STOP_P2P_DEVICE: Stop the given P2P Device, identified by
+ *     its %NL80211_ATTR_WDEV identifier.
+ *
+ * @NL80211_CMD_CONN_FAILED: connection request to an AP failed; used to
+ *     notify userspace that AP has rejected the connection request from a
+ *     station, due to particular reason. %NL80211_ATTR_CONN_FAILED_REASON
+ *     is used for this.
+ *
  * @NL80211_CMD_MAX: highest used command number
  * @__NL80211_CMD_AFTER_LAST: internal use
  */
@@ -708,6 +721,11 @@ enum nl80211_commands {
 
        NL80211_CMD_CH_SWITCH_NOTIFY,
 
+       NL80211_CMD_START_P2P_DEVICE,
+       NL80211_CMD_STOP_P2P_DEVICE,
+
+       NL80211_CMD_CONN_FAILED,
+
        /* add new commands above here */
 
        /* used to define NL80211_CMD_MAX below */
@@ -1251,6 +1269,10 @@ enum nl80211_commands {
  *     was used to provide the hint. For the different types of
  *     allowed user regulatory hints see nl80211_user_reg_hint_type.
  *
+ * @NL80211_ATTR_CONN_FAILED_REASON: The reason for which AP has rejected
+ *     the connection request from a station. nl80211_connect_failed_reason
+ *     enum has different reasons of connection failure.
+ *
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
  */
@@ -1506,6 +1528,8 @@ enum nl80211_attrs {
 
        NL80211_ATTR_USER_REG_HINT_TYPE,
 
+       NL80211_ATTR_CONN_FAILED_REASON,
+
        /* add attributes here, update the policy in nl80211.c */
 
        __NL80211_ATTR_AFTER_LAST,
@@ -1575,6 +1599,10 @@ enum nl80211_attrs {
  * @NL80211_IFTYPE_MESH_POINT: mesh point
  * @NL80211_IFTYPE_P2P_CLIENT: P2P client
  * @NL80211_IFTYPE_P2P_GO: P2P group owner
+ * @NL80211_IFTYPE_P2P_DEVICE: P2P device interface type, this is not a netdev
+ *     and therefore can't be created in the normal ways, use the
+ *     %NL80211_CMD_START_P2P_DEVICE and %NL80211_CMD_STOP_P2P_DEVICE
+ *     commands to create and destroy one
  * @NL80211_IFTYPE_MAX: highest interface type number currently defined
  * @NUM_NL80211_IFTYPES: number of defined interface types
  *
@@ -1593,6 +1621,7 @@ enum nl80211_iftype {
        NL80211_IFTYPE_MESH_POINT,
        NL80211_IFTYPE_P2P_CLIENT,
        NL80211_IFTYPE_P2P_GO,
+       NL80211_IFTYPE_P2P_DEVICE,
 
        /* keep last */
        NUM_NL80211_IFTYPES,
@@ -2994,12 +3023,18 @@ enum nl80211_ap_sme_features {
  * @NL80211_FEATURE_CELL_BASE_REG_HINTS: This driver has been tested
  *     to work properly to suppport receiving regulatory hints from
  *     cellular base stations.
+ * @NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL: If this is set, an active
+ *     P2P Device (%NL80211_IFTYPE_P2P_DEVICE) requires its own channel
+ *     in the interface combinations, even when it's only used for scan
+ *     and remain-on-channel. This could be due to, for example, the
+ *     remain-on-channel implementation requiring a channel context.
  */
 enum nl80211_feature_flags {
-       NL80211_FEATURE_SK_TX_STATUS    = 1 << 0,
-       NL80211_FEATURE_HT_IBSS         = 1 << 1,
-       NL80211_FEATURE_INACTIVITY_TIMER = 1 << 2,
-       NL80211_FEATURE_CELL_BASE_REG_HINTS = 1 << 3,
+       NL80211_FEATURE_SK_TX_STATUS                    = 1 << 0,
+       NL80211_FEATURE_HT_IBSS                         = 1 << 1,
+       NL80211_FEATURE_INACTIVITY_TIMER                = 1 << 2,
+       NL80211_FEATURE_CELL_BASE_REG_HINTS             = 1 << 3,
+       NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL        = 1 << 4,
 };
 
 /**
@@ -3023,4 +3058,15 @@ enum nl80211_probe_resp_offload_support_attr {
        NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U =     1<<3,
 };
 
+/**
+ * enum nl80211_connect_failed_reason - connection request failed reasons
+ * @NL80211_CONN_FAIL_MAX_CLIENTS: Maximum number of clients that can be
+ *     handled by the AP is reached.
+ * @NL80211_CONN_FAIL_BLOCKED_CLIENT: Client's MAC is in the AP's blocklist.
+ */
+enum nl80211_connect_failed_reason {
+       NL80211_CONN_FAIL_MAX_CLIENTS,
+       NL80211_CONN_FAIL_BLOCKED_CLIENT,
+};
+
 #endif /* __LINUX_NL80211_H */
index 912c27a0f7eeede26c7659956b11c70f7255f78e..6ef49b803efb1fee5fdb9bdc02d6b34a4d445a89 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/phy.h>
 #include <linux/of.h>
 
+#ifdef CONFIG_OF
 extern int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np);
 extern struct phy_device *of_phy_find_device(struct device_node *phy_np);
 extern struct phy_device *of_phy_connect(struct net_device *dev,
@@ -24,4 +25,36 @@ extern struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
 
 extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
 
+#else /* CONFIG_OF */
+int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
+{
+       return -ENOSYS;
+}
+
+struct phy_device *of_phy_find_device(struct device_node *phy_np)
+{
+       return NULL;
+}
+
+struct phy_device *of_phy_connect(struct net_device *dev,
+                                        struct device_node *phy_np,
+                                        void (*hndlr)(struct net_device *),
+                                        u32 flags, phy_interface_t iface)
+{
+       return NULL;
+}
+
+struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
+                                        void (*hndlr)(struct net_device *),
+                                        phy_interface_t iface)
+{
+       return NULL;
+}
+
+struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
+{
+       return NULL;
+}
+#endif /* CONFIG_OF */
+
 #endif /* __LINUX_OF_MDIO_H */
diff --git a/include/linux/packet_diag.h b/include/linux/packet_diag.h
new file mode 100644 (file)
index 0000000..93f5fa9
--- /dev/null
@@ -0,0 +1,72 @@
+#ifndef __PACKET_DIAG_H__
+#define __PACKET_DIAG_H__
+
+#include <linux/types.h>
+
+struct packet_diag_req {
+       __u8    sdiag_family;
+       __u8    sdiag_protocol;
+       __u16   pad;
+       __u32   pdiag_ino;
+       __u32   pdiag_show;
+       __u32   pdiag_cookie[2];
+};
+
+#define PACKET_SHOW_INFO       0x00000001 /* Basic packet_sk information */
+#define PACKET_SHOW_MCLIST     0x00000002 /* A set of packet_diag_mclist-s */
+#define PACKET_SHOW_RING_CFG   0x00000004 /* Rings configuration parameters */
+#define PACKET_SHOW_FANOUT     0x00000008
+
+struct packet_diag_msg {
+       __u8    pdiag_family;
+       __u8    pdiag_type;
+       __u16   pdiag_num;
+
+       __u32   pdiag_ino;
+       __u32   pdiag_cookie[2];
+};
+
+enum {
+       PACKET_DIAG_INFO,
+       PACKET_DIAG_MCLIST,
+       PACKET_DIAG_RX_RING,
+       PACKET_DIAG_TX_RING,
+       PACKET_DIAG_FANOUT,
+
+       PACKET_DIAG_MAX,
+};
+
+struct packet_diag_info {
+       __u32   pdi_index;
+       __u32   pdi_version;
+       __u32   pdi_reserve;
+       __u32   pdi_copy_thresh;
+       __u32   pdi_tstamp;
+       __u32   pdi_flags;
+
+#define PDI_RUNNING    0x1
+#define PDI_AUXDATA    0x2
+#define PDI_ORIGDEV    0x4
+#define PDI_VNETHDR    0x8
+#define PDI_LOSS       0x10
+};
+
+struct packet_diag_mclist {
+       __u32   pdmc_index;
+       __u32   pdmc_count;
+       __u16   pdmc_type;
+       __u16   pdmc_alen;
+       __u8    pdmc_addr[MAX_ADDR_LEN];
+};
+
+struct packet_diag_ring {
+       __u32   pdr_block_size;
+       __u32   pdr_block_nr;
+       __u32   pdr_frame_size;
+       __u32   pdr_frame_nr;
+       __u32   pdr_retire_tmo;
+       __u32   pdr_sizeof_priv;
+       __u32   pdr_features;
+};
+
+#endif
index 94048547f29ad424e27013c4eaa17ca6ca28c422..0cc45ae1afd54aca12247724c73972a5ab8f6cc4 100644 (file)
@@ -116,5 +116,14 @@ static inline void pps_get_ts(struct pps_event_time *ts)
 
 #endif /* CONFIG_NTP_PPS */
 
+/* Subtract known time delay from PPS event time(s) */
+static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec delta)
+{
+       ts->ts_real = timespec_sub(ts->ts_real, delta);
+#ifdef CONFIG_NTP_PPS
+       ts->ts_raw = timespec_sub(ts->ts_raw, delta);
+#endif
+}
+
 #endif /* LINUX_PPS_KERNEL_H */
 
index 945704c2ed65307bb8ec47c39cbd24d364b40fa3..f2dc6d8fc680f7ae02596558ce4571a675fb3bce 100644 (file)
@@ -21,6 +21,8 @@
 #ifndef _PTP_CLOCK_KERNEL_H_
 #define _PTP_CLOCK_KERNEL_H_
 
+#include <linux/device.h>
+#include <linux/pps_kernel.h>
 #include <linux/ptp_clock.h>
 
 
@@ -40,7 +42,9 @@ struct ptp_clock_request {
  * struct ptp_clock_info - decribes a PTP hardware clock
  *
  * @owner:     The clock driver should set to THIS_MODULE.
- * @name:      A short name to identify the clock.
+ * @name:      A short "friendly name" to identify the clock and to
+ *             help distinguish PHY based devices from MAC based ones.
+ *             The string is not meant to be a unique id.
  * @max_adj:   The maximum possible frequency adjustment, in parts per billon.
  * @n_alarm:   The number of programmable alarms.
  * @n_ext_ts:  The number of external time stamp channels.
@@ -92,10 +96,12 @@ struct ptp_clock;
 /**
  * ptp_clock_register() - register a PTP hardware clock driver
  *
- * @info:  Structure describing the new clock.
+ * @info:   Structure describing the new clock.
+ * @parent: Pointer to the parent device of the new clock.
  */
 
-extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info);
+extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
+                                           struct device *parent);
 
 /**
  * ptp_clock_unregister() - unregister a PTP hardware clock driver
@@ -110,6 +116,7 @@ enum ptp_clock_events {
        PTP_CLOCK_ALARM,
        PTP_CLOCK_EXTTS,
        PTP_CLOCK_PPS,
+       PTP_CLOCK_PPSUSR,
 };
 
 /**
@@ -117,13 +124,17 @@ enum ptp_clock_events {
  *
  * @type:  One of the ptp_clock_events enumeration values.
  * @index: Identifies the source of the event.
- * @timestamp: When the event occured.
+ * @timestamp: When the event occurred (%PTP_CLOCK_EXTTS only).
+ * @pps_times: When the event occurred (%PTP_CLOCK_PPSUSR only).
  */
 
 struct ptp_clock_event {
        int type;
        int index;
-       u64 timestamp;
+       union {
+               u64 timestamp;
+               struct pps_event_time pps_times;
+       };
 };
 
 /**
index 6fdf02737e9decd563ac8616fb510d4951c11515..0ec590bb361119ea10825c211f376d902ad294be 100644 (file)
@@ -354,6 +354,37 @@ static inline bool rfkill_blocked(struct rfkill *rfkill)
 }
 #endif /* RFKILL || RFKILL_MODULE */
 
+
+#ifdef CONFIG_RFKILL_LEDS
+/**
+ * rfkill_get_led_trigger_name - Get the LED trigger name for the button's LED.
+ * This function might return a NULL pointer if registering of the
+ * LED trigger failed. Use this as "default_trigger" for the LED.
+ */
+const char *rfkill_get_led_trigger_name(struct rfkill *rfkill);
+
+/**
+ * rfkill_set_led_trigger_name -- set the LED trigger name
+ * @rfkill: rfkill struct
+ * @name: LED trigger name
+ *
+ * This function sets the LED trigger name of the radio LED
+ * trigger that rfkill creates. It is optional, but if called
+ * must be called before rfkill_register() to be effective.
+ */
+void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name);
+#else
+static inline const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
+{
+       return NULL;
+}
+
+static inline void
+rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
+{
+}
+#endif
+
 #endif /* __KERNEL__ */
 
 #endif /* RFKILL_H */
index d23ca6245d54331466d4ad7c32e2533bf4cdcae8..9c5612f0374b3b419b70066dd61599b32d6b0b9d 100644 (file)
@@ -1526,6 +1526,9 @@ struct task_struct {
         * cache last used pipe for splice
         */
        struct pipe_inode_info *splice_pipe;
+
+       struct page_frag task_frag;
+
 #ifdef CONFIG_TASK_DELAY_ACCT
        struct task_delay_info *delays;
 #endif
index 7632c87da2c9fd66eab61b6c7415d5669b7aeefd..b33a3a1f205e45c747c9125a5b5920365ca2f8be 100644 (file)
@@ -846,13 +846,16 @@ static inline int skb_shared(const struct sk_buff *skb)
  *
  *     NULL is returned on a memory allocation failure.
  */
-static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
-                                             gfp_t pri)
+static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
 {
        might_sleep_if(pri & __GFP_WAIT);
        if (skb_shared(skb)) {
                struct sk_buff *nskb = skb_clone(skb, pri);
-               kfree_skb(skb);
+
+               if (likely(nskb))
+                       consume_skb(skb);
+               else
+                       kfree_skb(skb);
                skb = nskb;
        }
        return skb;
index 00bc189cb3955b57384bbcf3e458b3b593c94774..fdfba235f9f1a9bb6b8fc03958ae1982404dd125 100644 (file)
 enum
 {
        IPSTATS_MIB_NUM = 0,
+/* frequently written fields in fast path, kept in same cache line */
        IPSTATS_MIB_INPKTS,                     /* InReceives */
+       IPSTATS_MIB_INOCTETS,                   /* InOctets */
+       IPSTATS_MIB_INDELIVERS,                 /* InDelivers */
+       IPSTATS_MIB_OUTFORWDATAGRAMS,           /* OutForwDatagrams */
+       IPSTATS_MIB_OUTPKTS,                    /* OutRequests */
+       IPSTATS_MIB_OUTOCTETS,                  /* OutOctets */
+/* other fields */
        IPSTATS_MIB_INHDRERRORS,                /* InHdrErrors */
        IPSTATS_MIB_INTOOBIGERRORS,             /* InTooBigErrors */
        IPSTATS_MIB_INNOROUTES,                 /* InNoRoutes */
@@ -26,9 +33,6 @@ enum
        IPSTATS_MIB_INUNKNOWNPROTOS,            /* InUnknownProtos */
        IPSTATS_MIB_INTRUNCATEDPKTS,            /* InTruncatedPkts */
        IPSTATS_MIB_INDISCARDS,                 /* InDiscards */
-       IPSTATS_MIB_INDELIVERS,                 /* InDelivers */
-       IPSTATS_MIB_OUTFORWDATAGRAMS,           /* OutForwDatagrams */
-       IPSTATS_MIB_OUTPKTS,                    /* OutRequests */
        IPSTATS_MIB_OUTDISCARDS,                /* OutDiscards */
        IPSTATS_MIB_OUTNOROUTES,                /* OutNoRoutes */
        IPSTATS_MIB_REASMTIMEOUT,               /* ReasmTimeout */
@@ -42,8 +46,6 @@ enum
        IPSTATS_MIB_OUTMCASTPKTS,               /* OutMcastPkts */
        IPSTATS_MIB_INBCASTPKTS,                /* InBcastPkts */
        IPSTATS_MIB_OUTBCASTPKTS,               /* OutBcastPkts */
-       IPSTATS_MIB_INOCTETS,                   /* InOctets */
-       IPSTATS_MIB_OUTOCTETS,                  /* OutOctets */
        IPSTATS_MIB_INMCASTOCTETS,              /* InMcastOctets */
        IPSTATS_MIB_OUTMCASTOCTETS,             /* OutMcastOctets */
        IPSTATS_MIB_INBCASTOCTETS,              /* InBcastOctets */
@@ -239,6 +241,10 @@ enum
        LINUX_MIB_TCPCHALLENGEACK,              /* TCPChallengeACK */
        LINUX_MIB_TCPSYNCHALLENGE,              /* TCPSYNChallenge */
        LINUX_MIB_TCPFASTOPENACTIVE,            /* TCPFastOpenActive */
+       LINUX_MIB_TCPFASTOPENPASSIVE,           /* TCPFastOpenPassive*/
+       LINUX_MIB_TCPFASTOPENPASSIVEFAIL,       /* TCPFastOpenPassiveFail */
+       LINUX_MIB_TCPFASTOPENLISTENOVERFLOW,    /* TCPFastOpenListenOverflow */
+       LINUX_MIB_TCPFASTOPENCOOKIEREQD,        /* TCPFastOpenCookieReqd */
        __LINUX_MIB_MAX
 };
 
index 1a6b0045b06b63a616946bd421ce3f7f8e530160..c2b02a5c86ae0bb03798d9221c6199dc8be63d4b 100644 (file)
 #define SSB_CHIPCO_FLASHCTL_ST_SE      0x02D8          /* Sector Erase */
 #define SSB_CHIPCO_FLASHCTL_ST_BE      0x00C7          /* Bulk Erase */
 #define SSB_CHIPCO_FLASHCTL_ST_DP      0x00B9          /* Deep Power-down */
-#define SSB_CHIPCO_FLASHCTL_ST_RSIG    0x03AB          /* Read Electronic Signature */
+#define SSB_CHIPCO_FLASHCTL_ST_RES     0x03AB          /* Read Electronic Signature */
+#define SSB_CHIPCO_FLASHCTL_ST_CSA     0x1000          /* Keep chip select asserted */
+#define SSB_CHIPCO_FLASHCTL_ST_SSE     0x0220          /* Sub-sector Erase */
 
 /* Status register bits for ST flashes */
 #define SSB_CHIPCO_FLASHSTA_ST_WIP     0x01            /* Write In Progress */
index b69bdb1e08b674f81b372a0dcab0ed2b5ec47038..a1547ea3920d226cf56286b119c0e72d7a5b8712 100644 (file)
@@ -76,7 +76,6 @@
 /* Platfrom data for platform device structure's platform_data field */
 
 struct stmmac_mdio_bus_data {
-       int bus_id;
        int (*phy_reset)(void *priv);
        unsigned int phy_mask;
        int *irqs;
index eb125a4c30b334b63ca9d06983a125360839d90e..67c789ae719c7f6216870db74df3afc0223de66b 100644 (file)
@@ -110,6 +110,7 @@ enum {
 #define TCP_REPAIR_QUEUE       20
 #define TCP_QUEUE_SEQ          21
 #define TCP_REPAIR_OPTIONS     22
+#define TCP_FASTOPEN           23      /* Enable FastOpen on listeners */
 
 struct tcp_repair_opt {
        __u32   opt_code;
@@ -246,6 +247,7 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb)
 /* TCP Fast Open */
 #define TCP_FASTOPEN_COOKIE_MIN        4       /* Min Fast Open Cookie size in bytes */
 #define TCP_FASTOPEN_COOKIE_MAX        16      /* Max Fast Open Cookie size in bytes */
+#define TCP_FASTOPEN_COOKIE_SIZE 8     /* the size employed by this impl. */
 
 /* TCP Fast Open Cookie as stored in memory */
 struct tcp_fastopen_cookie {
@@ -312,9 +314,14 @@ struct tcp_request_sock {
        /* Only used by TCP MD5 Signature so far. */
        const struct tcp_request_sock_ops *af_specific;
 #endif
+       struct sock                     *listener; /* needed for TFO */
        u32                             rcv_isn;
        u32                             snt_isn;
        u32                             snt_synack; /* synack sent time */
+       u32                             rcv_nxt; /* the ack # by SYNACK. For
+                                                 * FastOpen it's the seq#
+                                                 * after data-in-SYN.
+                                                 */
 };
 
 static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
@@ -505,14 +512,18 @@ struct tcp_sock {
        struct tcp_md5sig_info  __rcu *md5sig_info;
 #endif
 
-/* TCP fastopen related information */
-       struct tcp_fastopen_request *fastopen_req;
-
        /* When the cookie options are generated and exchanged, then this
         * object holds a reference to them (cookie_values->kref).  Also
         * contains related tcp_cookie_transactions fields.
         */
        struct tcp_cookie_values  *cookie_values;
+
+/* TCP fastopen related information */
+       struct tcp_fastopen_request *fastopen_req;
+       /* fastopen_rsk points to request_sock that resulted in this big
+        * socket. Used to retransmit SYNACKs etc.
+        */
+       struct request_sock *fastopen_rsk;
 };
 
 enum tsq_flags {
@@ -552,6 +563,38 @@ static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk)
        return (struct tcp_timewait_sock *)sk;
 }
 
+static inline bool tcp_passive_fastopen(const struct sock *sk)
+{
+       return (sk->sk_state == TCP_SYN_RECV &&
+               tcp_sk(sk)->fastopen_rsk != NULL);
+}
+
+static inline bool fastopen_cookie_present(struct tcp_fastopen_cookie *foc)
+{
+       return foc->len != -1;
+}
+
+extern void tcp_sock_destruct(struct sock *sk);
+
+static inline int fastopen_init_queue(struct sock *sk, int backlog)
+{
+       struct request_sock_queue *queue =
+           &inet_csk(sk)->icsk_accept_queue;
+
+       if (queue->fastopenq == NULL) {
+               queue->fastopenq = kzalloc(
+                   sizeof(struct fastopen_queue),
+                   sk->sk_allocation);
+               if (queue->fastopenq == NULL)
+                       return -ENOMEM;
+
+               sk->sk_destruct = tcp_sock_destruct;
+               spin_lock_init(&queue->fastopenq->lock);
+       }
+       queue->fastopenq->max_qlen = backlog;
+       return 0;
+}
+
 #endif /* __KERNEL__ */
 
 #endif /* _LINUX_TCP_H */
diff --git a/include/linux/tcp_metrics.h b/include/linux/tcp_metrics.h
new file mode 100644 (file)
index 0000000..cb5157b
--- /dev/null
@@ -0,0 +1,54 @@
+/* tcp_metrics.h - TCP Metrics Interface */
+
+#ifndef _LINUX_TCP_METRICS_H
+#define _LINUX_TCP_METRICS_H
+
+#include <linux/types.h>
+
+/* NETLINK_GENERIC related info
+ */
+#define TCP_METRICS_GENL_NAME          "tcp_metrics"
+#define TCP_METRICS_GENL_VERSION       0x1
+
+enum tcp_metric_index {
+       TCP_METRIC_RTT,
+       TCP_METRIC_RTTVAR,
+       TCP_METRIC_SSTHRESH,
+       TCP_METRIC_CWND,
+       TCP_METRIC_REORDERING,
+
+       /* Always last.  */
+       __TCP_METRIC_MAX,
+};
+
+#define TCP_METRIC_MAX (__TCP_METRIC_MAX - 1)
+
+enum {
+       TCP_METRICS_ATTR_UNSPEC,
+       TCP_METRICS_ATTR_ADDR_IPV4,             /* u32 */
+       TCP_METRICS_ATTR_ADDR_IPV6,             /* binary */
+       TCP_METRICS_ATTR_AGE,                   /* msecs */
+       TCP_METRICS_ATTR_TW_TSVAL,              /* u32, raw, rcv tsval */
+       TCP_METRICS_ATTR_TW_TS_STAMP,           /* s32, sec age */
+       TCP_METRICS_ATTR_VALS,                  /* nested +1, u32 */
+       TCP_METRICS_ATTR_FOPEN_MSS,             /* u16 */
+       TCP_METRICS_ATTR_FOPEN_SYN_DROPS,       /* u16, count of drops */
+       TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,     /* msecs age */
+       TCP_METRICS_ATTR_FOPEN_COOKIE,          /* binary */
+
+       __TCP_METRICS_ATTR_MAX,
+};
+
+#define TCP_METRICS_ATTR_MAX   (__TCP_METRICS_ATTR_MAX - 1)
+
+enum {
+       TCP_METRICS_CMD_UNSPEC,
+       TCP_METRICS_CMD_GET,
+       TCP_METRICS_CMD_DEL,
+
+       __TCP_METRICS_CMD_MAX,
+};
+
+#define TCP_METRICS_CMD_MAX    (__TCP_METRICS_CMD_MAX - 1)
+
+#endif /* _LINUX_TCP_METRICS_H */
index c98928420100962e005aabc73ecfd9b73d395fea..0b1e3f218a36b95ff1f8163e89a365ae9d705dc6 100644 (file)
@@ -89,8 +89,8 @@
 
 #define  TIPC_CMD_GET_REMOTE_MNG    0x4003    /* tx none, rx unsigned */
 #define  TIPC_CMD_GET_MAX_PORTS     0x4004    /* tx none, rx unsigned */
-#define  TIPC_CMD_GET_MAX_PUBL      0x4005    /* tx none, rx unsigned */
-#define  TIPC_CMD_GET_MAX_SUBSCR    0x4006    /* tx none, rx unsigned */
+#define  TIPC_CMD_GET_MAX_PUBL      0x4005    /* obsoleted */
+#define  TIPC_CMD_GET_MAX_SUBSCR    0x4006    /* obsoleted */
 #define  TIPC_CMD_GET_MAX_ZONES     0x4007    /* obsoleted */
 #define  TIPC_CMD_GET_MAX_CLUSTERS  0x4008    /* obsoleted */
 #define  TIPC_CMD_GET_MAX_NODES     0x4009    /* obsoleted */
 #define  TIPC_CMD_SET_NODE_ADDR     0x8001    /* tx net_addr, rx none */
 #define  TIPC_CMD_SET_REMOTE_MNG    0x8003    /* tx unsigned, rx none */
 #define  TIPC_CMD_SET_MAX_PORTS     0x8004    /* tx unsigned, rx none */
-#define  TIPC_CMD_SET_MAX_PUBL      0x8005    /* tx unsigned, rx none */
-#define  TIPC_CMD_SET_MAX_SUBSCR    0x8006    /* tx unsigned, rx none */
+#define  TIPC_CMD_SET_MAX_PUBL      0x8005    /* obsoleted */
+#define  TIPC_CMD_SET_MAX_SUBSCR    0x8006    /* obsoleted */
 #define  TIPC_CMD_SET_MAX_ZONES     0x8007    /* obsoleted */
 #define  TIPC_CMD_SET_MAX_CLUSTERS  0x8008    /* obsoleted */
 #define  TIPC_CMD_SET_MAX_NODES     0x8009    /* obsoleted */
index 089a09d001d12be5c7c99e32ac66614b0125bc82..9e63e76b20e7e0ce4b60aa5daf0e113b6985ae16 100644 (file)
@@ -78,7 +78,7 @@ extern struct inet6_ifaddr      *ipv6_get_ifaddr(struct net *net,
                                                 int strict);
 
 extern int                     ipv6_dev_get_saddr(struct net *net,
-                                              struct net_device *dev,
+                                              const struct net_device *dev,
                                               const struct in6_addr *daddr,
                                               unsigned int srcprefs,
                                               struct in6_addr *saddr);
index 7f7df93f37cd0d3b3259775ebf6ab23743b3c564..b630dae03411ae69694e842d75108a254c0584aa 100644 (file)
@@ -3,6 +3,7 @@
 #define _ARP_H
 
 #include <linux/if_arp.h>
+#include <linux/hash.h>
 #include <net/neighbour.h>
 
 
@@ -10,7 +11,7 @@ extern struct neigh_table arp_tbl;
 
 static inline u32 arp_hashfn(u32 key, const struct net_device *dev, u32 hash_rnd)
 {
-       u32 val = key ^ dev->ifindex;
+       u32 val = key ^ hash32_ptr(dev);
 
        return val * hash_rnd;
 }
index 565d4bee1e493bbaa145f5e3621bc27e4295727f..ede036977ae8b6debe3ee4560f7e93905bd92f9a 100644 (file)
@@ -27,6 +27,7 @@
 
 #include <linux/poll.h>
 #include <net/sock.h>
+#include <linux/seq_file.h>
 
 #ifndef AF_BLUETOOTH
 #define AF_BLUETOOTH   31
@@ -202,6 +203,10 @@ enum {
 struct bt_sock_list {
        struct hlist_head head;
        rwlock_t          lock;
+#ifdef CONFIG_PROC_FS
+        struct file_operations   fops;
+        int (* custom_seq_show)(struct seq_file *, void *);
+#endif
 };
 
 int  bt_sock_register(int proto, const struct net_proto_family *ops);
@@ -292,6 +297,11 @@ extern void hci_sock_cleanup(void);
 extern int bt_sysfs_init(void);
 extern void bt_sysfs_cleanup(void);
 
+extern int  bt_procfs_init(struct module* module, struct net *net, const char *name,
+                          struct bt_sock_list* sk_list,
+                          int (* seq_show)(struct seq_file *, void *));
+extern void bt_procfs_cleanup(struct net *net, const char *name);
+
 extern struct dentry *bt_debugfs;
 
 int l2cap_init(void);
index ccd723e0f783e34a14a382e116a72e2a0369becb..76b2b6bdcf36a281d558925a2a7a707ca2e1dddf 100644 (file)
 /* First BR/EDR Controller shall have ID = 0 */
 #define HCI_BREDR_ID   0
 
+/* AMP controller status */
+#define AMP_CTRL_POWERED_DOWN                  0x00
+#define AMP_CTRL_BLUETOOTH_ONLY                        0x01
+#define AMP_CTRL_NO_CAPACITY                   0x02
+#define AMP_CTRL_LOW_CAPACITY                  0x03
+#define AMP_CTRL_MEDIUM_CAPACITY               0x04
+#define AMP_CTRL_HIGH_CAPACITY                 0x05
+#define AMP_CTRL_FULL_CAPACITY                 0x06
+
 /* HCI device quirks */
 enum {
        HCI_QUIRK_RESET_ON_CLOSE,
@@ -293,8 +302,11 @@ enum {
 
 /* ---- HCI Error Codes ---- */
 #define HCI_ERROR_AUTH_FAILURE         0x05
+#define HCI_ERROR_CONNECTION_TIMEOUT   0x08
 #define HCI_ERROR_REJ_BAD_ADDR         0x0f
 #define HCI_ERROR_REMOTE_USER_TERM     0x13
+#define HCI_ERROR_REMOTE_LOW_RESOURCES 0x14
+#define HCI_ERROR_REMOTE_POWER_OFF     0x15
 #define HCI_ERROR_LOCAL_HOST_TERM      0x16
 #define HCI_ERROR_PAIRING_NOT_ALLOWED  0x18
 
@@ -1237,6 +1249,24 @@ struct hci_ev_simple_pair_complete {
        bdaddr_t bdaddr;
 } __packed;
 
+#define HCI_EV_USER_PASSKEY_NOTIFY     0x3b
+struct hci_ev_user_passkey_notify {
+       bdaddr_t        bdaddr;
+       __le32          passkey;
+} __packed;
+
+#define HCI_KEYPRESS_STARTED           0
+#define HCI_KEYPRESS_ENTERED           1
+#define HCI_KEYPRESS_ERASED            2
+#define HCI_KEYPRESS_CLEARED           3
+#define HCI_KEYPRESS_COMPLETED         4
+
+#define HCI_EV_KEYPRESS_NOTIFY         0x3c
+struct hci_ev_keypress_notify {
+       bdaddr_t        bdaddr;
+       __u8            type;
+} __packed;
+
 #define HCI_EV_REMOTE_HOST_FEATURES    0x3d
 struct hci_ev_remote_host_features {
        bdaddr_t bdaddr;
@@ -1295,6 +1325,8 @@ struct hci_ev_num_comp_blocks {
 } __packed;
 
 /* Low energy meta events */
+#define LE_CONN_ROLE_MASTER    0x00
+
 #define HCI_EV_LE_CONN_COMPLETE                0x01
 struct hci_ev_le_conn_complete {
        __u8     status;
index 475b8c04ba52c01530f9a17a50f2f895211c7e2b..e7d454609881a30d929ec8aa613390df157c8b43 100644 (file)
@@ -115,12 +115,6 @@ struct oob_data {
        u8 randomizer[16];
 };
 
-struct adv_entry {
-       struct list_head list;
-       bdaddr_t bdaddr;
-       u8 bdaddr_type;
-};
-
 struct le_scan_params {
        u8 type;
        u16 interval;
@@ -309,6 +303,8 @@ struct hci_conn {
        __u8            pin_length;
        __u8            enc_key_size;
        __u8            io_capability;
+       __u32           passkey_notify;
+       __u8            passkey_entered;
        __u16           disc_timeout;
        unsigned long   flags;
 
@@ -356,16 +352,16 @@ extern rwlock_t hci_cb_list_lock;
 
 /* ----- HCI interface to upper protocols ----- */
 extern int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
-extern int l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
+extern void l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
 extern int l2cap_disconn_ind(struct hci_conn *hcon);
-extern int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
+extern void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
 extern int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt);
 extern int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb,
                              u16 flags);
 
 extern int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
-extern int sco_connect_cfm(struct hci_conn *hcon, __u8 status);
-extern int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason);
+extern void sco_connect_cfm(struct hci_conn *hcon, __u8 status);
+extern void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason);
 extern int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
 
 /* ----- Inquiry cache ----- */
@@ -434,15 +430,6 @@ static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
               test_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
 }
 
-static inline void hci_conn_hash_init(struct hci_dev *hdev)
-{
-       struct hci_conn_hash *h = &hdev->conn_hash;
-       INIT_LIST_HEAD(&h->list);
-       h->acl_num = 0;
-       h->sco_num = 0;
-       h->le_num = 0;
-}
-
 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
 {
        struct hci_conn_hash *h = &hdev->conn_hash;
@@ -557,9 +544,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
        return NULL;
 }
 
-void hci_acl_connect(struct hci_conn *conn);
 void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
-void hci_add_sco(struct hci_conn *conn, __u16 handle);
 void hci_setup_sync(struct hci_conn *conn, __u16 handle);
 void hci_sco_setup(struct hci_conn *conn, __u8 status);
 
@@ -569,7 +554,7 @@ void hci_conn_hash_flush(struct hci_dev *hdev);
 void hci_conn_check_pending(struct hci_dev *hdev);
 
 struct hci_chan *hci_chan_create(struct hci_conn *conn);
-int hci_chan_del(struct hci_chan *chan);
+void hci_chan_del(struct hci_chan *chan);
 void hci_chan_list_flush(struct hci_conn *conn);
 
 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
@@ -587,8 +572,7 @@ void hci_conn_put_device(struct hci_conn *conn);
 
 static inline void hci_conn_hold(struct hci_conn *conn)
 {
-       BT_DBG("hcon %p refcnt %d -> %d", conn, atomic_read(&conn->refcnt),
-              atomic_read(&conn->refcnt) + 1);
+       BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
 
        atomic_inc(&conn->refcnt);
        cancel_delayed_work(&conn->disc_work);
@@ -596,8 +580,7 @@ static inline void hci_conn_hold(struct hci_conn *conn)
 
 static inline void hci_conn_put(struct hci_conn *conn)
 {
-       BT_DBG("hcon %p refcnt %d -> %d", conn, atomic_read(&conn->refcnt),
-              atomic_read(&conn->refcnt) - 1);
+       BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
 
        if (atomic_dec_and_test(&conn->refcnt)) {
                unsigned long timeo;
@@ -622,11 +605,17 @@ static inline void hci_conn_put(struct hci_conn *conn)
 /* ----- HCI Devices ----- */
 static inline void hci_dev_put(struct hci_dev *d)
 {
+       BT_DBG("%s orig refcnt %d", d->name,
+              atomic_read(&d->dev.kobj.kref.refcount));
+
        put_device(&d->dev);
 }
 
 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
 {
+       BT_DBG("%s orig refcnt %d", d->name,
+              atomic_read(&d->dev.kobj.kref.refcount));
+
        get_device(&d->dev);
        return d;
 }
@@ -1012,7 +1001,7 @@ int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                          u8 addr_type, u32 flags, u8 *name, u8 name_len,
                          u8 *dev_class);
 int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
-                            u8 link_type, u8 addr_type);
+                            u8 link_type, u8 addr_type, u8 reason);
 int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
                           u8 link_type, u8 addr_type, u8 status);
 int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
@@ -1035,6 +1024,9 @@ int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
                                     u8 link_type, u8 addr_type, u8 status);
 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
                                         u8 link_type, u8 addr_type, u8 status);
+int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                            u8 link_type, u8 addr_type, u32 passkey,
+                            u8 entered);
 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                     u8 addr_type, u8 status);
 int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
@@ -1056,7 +1048,7 @@ int mgmt_discovering(struct hci_dev *hdev, u8 discovering);
 int mgmt_interleaved_discovery(struct hci_dev *hdev);
 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
-
+bool mgmt_valid_hdev(struct hci_dev *hdev);
 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent);
 
 /* HCI info for socket */
index a7679f8913d2e9e8b9c14a0807bedd12c607b9cc..7ed8e356425a16dc33c5afd5e4a80eaafdd6ea64 100644 (file)
@@ -433,11 +433,10 @@ struct l2cap_chan {
        struct sock *sk;
 
        struct l2cap_conn       *conn;
+       struct kref     kref;
 
        __u8            state;
 
-       atomic_t        refcnt;
-
        __le16          psm;
        __u16           dcid;
        __u16           scid;
@@ -671,20 +670,8 @@ enum {
        L2CAP_EV_RECV_FRAME,
 };
 
-static inline void l2cap_chan_hold(struct l2cap_chan *c)
-{
-       BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->refcnt));
-
-       atomic_inc(&c->refcnt);
-}
-
-static inline void l2cap_chan_put(struct l2cap_chan *c)
-{
-       BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->refcnt));
-
-       if (atomic_dec_and_test(&c->refcnt))
-               kfree(c);
-}
+void l2cap_chan_hold(struct l2cap_chan *c);
+void l2cap_chan_put(struct l2cap_chan *c);
 
 static inline void l2cap_chan_lock(struct l2cap_chan *chan)
 {
@@ -771,7 +758,6 @@ int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid);
 
 struct l2cap_chan *l2cap_chan_create(void);
 void l2cap_chan_close(struct l2cap_chan *chan, int reason);
-void l2cap_chan_destroy(struct l2cap_chan *chan);
 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
                       bdaddr_t *dst, u8 dst_type);
 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
index 4348ee8bda6993a15193a4e1e6adb640c9512e1f..22980a7c38730f53305b181e338dc29009cf62d6 100644 (file)
@@ -405,7 +405,16 @@ struct mgmt_ev_device_connected {
        __u8    eir[0];
 } __packed;
 
+#define MGMT_DEV_DISCONN_UNKNOWN       0x00
+#define MGMT_DEV_DISCONN_TIMEOUT       0x01
+#define MGMT_DEV_DISCONN_LOCAL_HOST    0x02
+#define MGMT_DEV_DISCONN_REMOTE                0x03
+
 #define MGMT_EV_DEVICE_DISCONNECTED    0x000C
+struct mgmt_ev_device_disconnected {
+       struct mgmt_addr_info addr;
+       __u8    reason;
+} __packed;
 
 #define MGMT_EV_CONNECT_FAILED         0x000D
 struct mgmt_ev_connect_failed {
@@ -469,3 +478,10 @@ struct mgmt_ev_device_unblocked {
 struct mgmt_ev_device_unpaired {
        struct mgmt_addr_info addr;
 } __packed;
+
+#define MGMT_EV_PASSKEY_NOTIFY         0x0017
+struct mgmt_ev_passkey_notify {
+       struct mgmt_addr_info addr;
+       __le32  passkey;
+       __u8    entered;
+} __packed;
index 8b27927b2a55de3dfd5f94c5a40ef3c3b886eb06..f8ba07f3e5fa19427573128f07764e8733977b29 100644 (file)
@@ -108,8 +108,8 @@ struct smp_cmd_security_req {
 #define SMP_CONFIRM_FAILED             0x04
 #define SMP_PAIRING_NOTSUPP            0x05
 #define SMP_ENC_KEY_SIZE               0x06
-#define SMP_CMD_NOTSUPP                0x07
-#define SMP_UNSPECIFIED                0x08
+#define SMP_CMD_NOTSUPP                        0x07
+#define SMP_UNSPECIFIED                        0x08
 #define SMP_REPEATED_ATTEMPTS          0x09
 
 #define SMP_MIN_ENC_KEY_SIZE           7
@@ -123,8 +123,8 @@ struct smp_chan {
        struct l2cap_conn *conn;
        u8              preq[7]; /* SMP Pairing Request */
        u8              prsp[7]; /* SMP Pairing Response */
-       u8              prnd[16]; /* SMP Pairing Random (local) */
-       u8              rrnd[16]; /* SMP Pairing Random (remote) */
+       u8              prnd[16]; /* SMP Pairing Random (local) */
+       u8              rrnd[16]; /* SMP Pairing Random (remote) */
        u8              pcnf[16]; /* SMP Pairing Confirm */
        u8              tk[16]; /* SMP Temporary Key */
        u8              enc_key_size;
index 3d254e10ff30e7ab3c5a4fee2ee0b38f1309bd94..1b49890822449df661d576219181b10c64d57a6c 100644 (file)
@@ -245,6 +245,7 @@ struct ieee80211_sta_vht_cap {
  *     rates" IE, i.e. CCK rates first, then OFDM.
  * @n_bitrates: Number of bitrates in @bitrates
  * @ht_cap: HT capabilities in this band
+ * @vht_cap: VHT capabilities in this band
  */
 struct ieee80211_supported_band {
        struct ieee80211_channel *channels;
@@ -1439,7 +1440,8 @@ struct cfg80211_gtk_rekey_data {
  * @add_virtual_intf: create a new virtual interface with the given name,
  *     must set the struct wireless_dev's iftype. Beware: You must create
  *     the new netdev in the wiphy's network namespace! Returns the struct
- *     wireless_dev, or an ERR_PTR.
+ *     wireless_dev, or an ERR_PTR. For P2P device wdevs, the driver must
+ *     also set the address member in the wdev.
  *
  * @del_virtual_intf: remove the virtual interface
  *
@@ -1578,9 +1580,7 @@ struct cfg80211_gtk_rekey_data {
  * @set_cqm_txe_config: Configure connection quality monitor TX error
  *     thresholds.
  * @sched_scan_start: Tell the driver to start a scheduled scan.
- * @sched_scan_stop: Tell the driver to stop an ongoing scheduled
- *     scan.  The driver_initiated flag specifies whether the driver
- *     itself has informed that the scan has stopped.
+ * @sched_scan_stop: Tell the driver to stop an ongoing scheduled scan.
  *
  * @mgmt_frame_register: Notify driver that a management frame type was
  *     registered. Note that this callback may not sleep, and cannot run
@@ -1618,6 +1618,9 @@ struct cfg80211_gtk_rekey_data {
  * @get_channel: Get the current operating channel for the virtual interface.
  *     For monitor interfaces, it should return %NULL unless there's a single
  *     current monitoring channel.
+ *
+ * @start_p2p_device: Start the given P2P device.
+ * @stop_p2p_device: Stop the given P2P device.
  */
 struct cfg80211_ops {
        int     (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -1625,7 +1628,7 @@ struct cfg80211_ops {
        void    (*set_wakeup)(struct wiphy *wiphy, bool enabled);
 
        struct wireless_dev * (*add_virtual_intf)(struct wiphy *wiphy,
-                                                 char *name,
+                                                 const char *name,
                                                  enum nl80211_iftype type,
                                                  u32 *flags,
                                                  struct vif_params *params);
@@ -1834,6 +1837,11 @@ struct cfg80211_ops {
                (*get_channel)(struct wiphy *wiphy,
                               struct wireless_dev *wdev,
                               enum nl80211_channel_type *type);
+
+       int     (*start_p2p_device)(struct wiphy *wiphy,
+                                   struct wireless_dev *wdev);
+       void    (*stop_p2p_device)(struct wiphy *wiphy,
+                                  struct wireless_dev *wdev);
 };
 
 /*
@@ -2397,6 +2405,8 @@ struct cfg80211_cached_keys;
  * @cleanup_work: work struct used for cleanup that can't be done directly
  * @beacon_interval: beacon interval used on this device for transmitting
  *     beacons, 0 when not valid
+ * @address: The address for this device, valid only if @netdev is %NULL
+ * @p2p_started: true if this is a P2P Device that has been started
  */
 struct wireless_dev {
        struct wiphy *wiphy;
@@ -2415,7 +2425,9 @@ struct wireless_dev {
 
        struct work_struct cleanup_work;
 
-       bool use_4addr;
+       bool use_4addr, p2p_started;
+
+       u8 address[ETH_ALEN] __aligned(sizeof(u16));
 
        /* currently used for IBSS and SME - might be rearranged later */
        u8 ssid[IEEE80211_MAX_SSID_LEN];
@@ -2445,7 +2457,7 @@ struct wireless_dev {
 
        int beacon_interval;
 
-       u32 ap_unexpected_nlpid;
+       u32 ap_unexpected_nlportid;
 
 #ifdef CONFIG_CFG80211_WEXT
        /* wext data */
@@ -2463,6 +2475,13 @@ struct wireless_dev {
 #endif
 };
 
+static inline u8 *wdev_address(struct wireless_dev *wdev)
+{
+       if (wdev->netdev)
+               return wdev->netdev->dev_addr;
+       return wdev->address;
+}
+
 /**
  * wdev_priv - return wiphy priv from wireless_dev
  *
@@ -3341,6 +3360,25 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
  */
 void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp);
 
+/**
+ * cfg80211_conn_failed - connection request failed notification
+ *
+ * @dev: the netdev
+ * @mac_addr: the station's address
+ * @reason: the reason for connection failure
+ * @gfp: allocation flags
+ *
+ * Whenever a station tries to connect to an AP and if the station
+ * could not connect to the AP as the AP has rejected the connection
+ * for some reasons, this function is called.
+ *
+ * The reason for connection failure can be any of the value from
+ * nl80211_connect_failed_reason enum
+ */
+void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
+                         enum nl80211_connect_failed_reason reason,
+                         gfp_t gfp);
+
 /**
  * cfg80211_rx_mgmt - notification of received, unprocessed management frame
  * @wdev: wireless device receiving the frame
@@ -3530,6 +3568,22 @@ void cfg80211_ch_switch_notify(struct net_device *dev, int freq,
  */
 u32 cfg80211_calculate_bitrate(struct rate_info *rate);
 
+/**
+ * cfg80211_unregister_wdev - remove the given wdev
+ * @wdev: struct wireless_dev to remove
+ *
+ * Call this function only for wdevs that have no netdev assigned,
+ * e.g. P2P Devices. It removes the device from the list so that
+ * it can no longer be used. It is necessary to call this function
+ * even when cfg80211 requests the removal of the interface by
+ * calling the del_virtual_intf() callback. The function must also
+ * be called when the driver wishes to unregister the wdev, e.g.
+ * when the device is unbound from the driver.
+ *
+ * Requires the RTNL to be held.
+ */
+void cfg80211_unregister_wdev(struct wireless_dev *wdev);
+
 /* Logging, debugging and troubleshooting/diagnostic helpers. */
 
 /* wiphy_printk helpers, similar to dev_printk */
index ba55d8b8c87cb5438f87fad5ae01d2aee77d59ab..600d1d705bb86f23b00a8d0feebabe6ea303934d 100644 (file)
@@ -109,6 +109,9 @@ static inline void csum_replace2(__sum16 *sum, __be16 from, __be16 to)
 struct sk_buff;
 extern void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
                                     __be32 from, __be32 to, int pseudohdr);
+extern void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
+                                     const __be32 *from, const __be32 *to,
+                                     int pseudohdr);
 
 static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
                                            __be16 from, __be16 to,
index 621e3513ef5ed2c833a16902761b3d5c1ed2ebaf..9a7881066fb316b02fdd7ed52aaf84d038ded1f6 100644 (file)
@@ -396,11 +396,15 @@ static inline void dst_confirm(struct dst_entry *dst)
 static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
                                   struct sk_buff *skb)
 {
-       struct hh_cache *hh;
+       const struct hh_cache *hh;
+
+       if (dst->pending_confirm) {
+               unsigned long now = jiffies;
 
-       if (unlikely(dst->pending_confirm)) {
-               n->confirmed = jiffies;
                dst->pending_confirm = 0;
+               /* avoid dirtying neighbour */
+               if (n->confirmed != now)
+                       n->confirmed = now;
        }
 
        hh = &n->hh;
index 48905cd3884c8d687ba3f109e31f5268ee42feb1..bdfbe68c1c3b271bf20b2ef23d5d0325bda2f906 100644 (file)
@@ -65,7 +65,7 @@ struct genl_family {
 /**
  * struct genl_info - receiving information
  * @snd_seq: sending sequence number
- * @snd_pid: netlink pid of sender
+ * @snd_portid: netlink portid of sender
  * @nlhdr: netlink message header
  * @genlhdr: generic netlink message header
  * @userhdr: user specific header
@@ -75,7 +75,7 @@ struct genl_family {
  */
 struct genl_info {
        u32                     snd_seq;
-       u32                     snd_pid;
+       u32                     snd_portid;
        struct nlmsghdr *       nlhdr;
        struct genlmsghdr *     genlhdr;
        void *                  userhdr;
@@ -130,10 +130,10 @@ extern int genl_register_mc_group(struct genl_family *family,
                                  struct genl_multicast_group *grp);
 extern void genl_unregister_mc_group(struct genl_family *family,
                                     struct genl_multicast_group *grp);
-extern void genl_notify(struct sk_buff *skb, struct net *net, u32 pid,
+extern void genl_notify(struct sk_buff *skb, struct net *net, u32 portid,
                        u32 group, struct nlmsghdr *nlh, gfp_t flags);
 
-void *genlmsg_put(struct sk_buff *skb, u32 pid, u32 seq,
+void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
                                struct genl_family *family, int flags, u8 cmd);
 
 /**
@@ -183,7 +183,7 @@ static inline void *genlmsg_put_reply(struct sk_buff *skb,
                                      struct genl_family *family,
                                      int flags, u8 cmd)
 {
-       return genlmsg_put(skb, info->snd_pid, info->snd_seq, family,
+       return genlmsg_put(skb, info->snd_portid, info->snd_seq, family,
                           flags, cmd);
 }
 
@@ -212,49 +212,49 @@ static inline void genlmsg_cancel(struct sk_buff *skb, void *hdr)
  * genlmsg_multicast_netns - multicast a netlink message to a specific netns
  * @net: the net namespace
  * @skb: netlink message as socket buffer
- * @pid: own netlink pid to avoid sending to yourself
+ * @portid: own netlink portid to avoid sending to yourself
  * @group: multicast group id
  * @flags: allocation flags
  */
 static inline int genlmsg_multicast_netns(struct net *net, struct sk_buff *skb,
-                                         u32 pid, unsigned int group, gfp_t flags)
+                                         u32 portid, unsigned int group, gfp_t flags)
 {
-       return nlmsg_multicast(net->genl_sock, skb, pid, group, flags);
+       return nlmsg_multicast(net->genl_sock, skb, portid, group, flags);
 }
 
 /**
  * genlmsg_multicast - multicast a netlink message to the default netns
  * @skb: netlink message as socket buffer
- * @pid: own netlink pid to avoid sending to yourself
+ * @portid: own netlink portid to avoid sending to yourself
  * @group: multicast group id
  * @flags: allocation flags
  */
-static inline int genlmsg_multicast(struct sk_buff *skb, u32 pid,
+static inline int genlmsg_multicast(struct sk_buff *skb, u32 portid,
                                    unsigned int group, gfp_t flags)
 {
-       return genlmsg_multicast_netns(&init_net, skb, pid, group, flags);
+       return genlmsg_multicast_netns(&init_net, skb, portid, group, flags);
 }
 
 /**
  * genlmsg_multicast_allns - multicast a netlink message to all net namespaces
  * @skb: netlink message as socket buffer
- * @pid: own netlink pid to avoid sending to yourself
+ * @portid: own netlink portid to avoid sending to yourself
  * @group: multicast group id
  * @flags: allocation flags
  *
  * This function must hold the RTNL or rcu_read_lock().
  */
-int genlmsg_multicast_allns(struct sk_buff *skb, u32 pid,
+int genlmsg_multicast_allns(struct sk_buff *skb, u32 portid,
                            unsigned int group, gfp_t flags);
 
 /**
  * genlmsg_unicast - unicast a netlink message
  * @skb: netlink message as socket buffer
- * @pid: netlink pid of the destination socket
+ * @portid: netlink portid of the destination socket
  */
-static inline int genlmsg_unicast(struct net *net, struct sk_buff *skb, u32 pid)
+static inline int genlmsg_unicast(struct net *net, struct sk_buff *skb, u32 portid)
 {
-       return nlmsg_unicast(net->genl_sock, skb, pid);
+       return nlmsg_unicast(net->genl_sock, skb, portid);
 }
 
 /**
@@ -264,7 +264,7 @@ static inline int genlmsg_unicast(struct net *net, struct sk_buff *skb, u32 pid)
  */
 static inline int genlmsg_reply(struct sk_buff *skb, struct genl_info *info)
 {
-       return genlmsg_unicast(genl_info_net(info), skb, info->snd_pid);
+       return genlmsg_unicast(genl_info_net(info), skb, info->snd_portid);
 }
 
 /**
diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
new file mode 100644 (file)
index 0000000..4fd8a4b
--- /dev/null
@@ -0,0 +1,103 @@
+#ifndef _NET_GRO_CELLS_H
+#define _NET_GRO_CELLS_H
+
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+
+struct gro_cell {
+       struct sk_buff_head     napi_skbs;
+       struct napi_struct      napi;
+} ____cacheline_aligned_in_smp;
+
+struct gro_cells {
+       unsigned int            gro_cells_mask;
+       struct gro_cell         *cells;
+};
+
+static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
+{
+       unsigned long flags;
+       struct gro_cell *cell = gcells->cells;
+       struct net_device *dev = skb->dev;
+
+       if (!cell || skb_cloned(skb) || !(dev->features & NETIF_F_GRO)) {
+               netif_rx(skb);
+               return;
+       }
+
+       if (skb_rx_queue_recorded(skb))
+               cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
+
+       if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
+               atomic_long_inc(&dev->rx_dropped);
+               kfree_skb(skb);
+               return;
+       }
+
+       spin_lock_irqsave(&cell->napi_skbs.lock, flags);
+
+       __skb_queue_tail(&cell->napi_skbs, skb);
+       if (skb_queue_len(&cell->napi_skbs) == 1)
+               napi_schedule(&cell->napi);
+
+       spin_unlock_irqrestore(&cell->napi_skbs.lock, flags);
+}
+
+static inline int gro_cell_poll(struct napi_struct *napi, int budget)
+{
+       struct gro_cell *cell = container_of(napi, struct gro_cell, napi);
+       struct sk_buff *skb;
+       int work_done = 0;
+
+       while (work_done < budget) {
+               skb = skb_dequeue(&cell->napi_skbs);
+               if (!skb)
+                       break;
+
+               napi_gro_receive(napi, skb);
+               work_done++;
+       }
+
+       if (work_done < budget)
+               napi_complete(napi);
+       return work_done;
+}
+
+static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *dev)
+{
+       int i;
+
+       gcells->gro_cells_mask = roundup_pow_of_two(netif_get_num_default_rss_queues()) - 1;
+       gcells->cells = kcalloc(sizeof(struct gro_cell),
+                               gcells->gro_cells_mask + 1,
+                               GFP_KERNEL);
+       if (!gcells->cells)
+               return -ENOMEM;
+
+       for (i = 0; i <= gcells->gro_cells_mask; i++) {
+               struct gro_cell *cell = gcells->cells + i;
+
+               skb_queue_head_init(&cell->napi_skbs);
+               netif_napi_add(dev, &cell->napi, gro_cell_poll, 64);
+               napi_enable(&cell->napi);
+       }
+       return 0;
+}
+
+static inline void gro_cells_destroy(struct gro_cells *gcells)
+{
+       struct gro_cell *cell = gcells->cells;
+       int i;
+
+       if (!cell)
+               return;
+       for (i = 0; i <= gcells->gro_cells_mask; i++,cell++) {
+               netif_napi_del(&cell->napi);
+               skb_queue_purge(&cell->napi_skbs);
+       }
+       kfree(gcells->cells);
+       gcells->cells = NULL;
+}
+
+#endif
index 71392545d0a110906abbe4e51b8cb367cd4fc70f..7f0df133d1197cf7c449d841d9ea81a5f3a07d70 100644 (file)
@@ -183,6 +183,9 @@ struct ieee80211_radiotap_header {
  *     Contains a bitmap of known fields/flags, the flags, and
  *     the MCS index.
  *
+ * IEEE80211_RADIOTAP_AMPDU_STATUS     u32, u16, u8, u8        unitless
+ *
+ *     Contains the AMPDU information for the subframe.
  */
 enum ieee80211_radiotap_type {
        IEEE80211_RADIOTAP_TSFT = 0,
@@ -205,6 +208,7 @@ enum ieee80211_radiotap_type {
        IEEE80211_RADIOTAP_DATA_RETRIES = 17,
 
        IEEE80211_RADIOTAP_MCS = 19,
+       IEEE80211_RADIOTAP_AMPDU_STATUS = 20,
 
        /* valid in every it_present bitmap, even vendor namespaces */
        IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29,
@@ -270,6 +274,13 @@ enum ieee80211_radiotap_type {
 #define IEEE80211_RADIOTAP_MCS_FMT_GF          0x08
 #define IEEE80211_RADIOTAP_MCS_FEC_LDPC                0x10
 
+/* For IEEE80211_RADIOTAP_AMPDU_STATUS */
+#define IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN                0x0001
+#define IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN            0x0002
+#define IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN            0x0004
+#define IEEE80211_RADIOTAP_AMPDU_IS_LAST               0x0008
+#define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR         0x0010
+#define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN       0x0020
 
 /* helpers */
 static inline int ieee80211_get_radiotap_len(unsigned char *data)
index 2fa14691869ca0299d86be8ed87626e297eb808d..aab73757bc4da4c4b5fce8390ec1234a86ea55c0 100644 (file)
@@ -15,6 +15,8 @@ enum {
        INET_ECN_MASK = 3,
 };
 
+extern int sysctl_tunnel_ecn_log;
+
 static inline int INET_ECN_is_ce(__u8 dsfield)
 {
        return (dsfield & INET_ECN_MASK) == INET_ECN_CE;
@@ -145,4 +147,78 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
        return 0;
 }
 
+/*
+ * RFC 6080 4.2
+ *  To decapsulate the inner header at the tunnel egress, a compliant
+ *  tunnel egress MUST set the outgoing ECN field to the codepoint at the
+ *  intersection of the appropriate arriving inner header (row) and outer
+ *  header (column) in Figure 4
+ *
+ *      +---------+------------------------------------------------+
+ *      |Arriving |            Arriving Outer Header               |
+ *      |   Inner +---------+------------+------------+------------+
+ *      |  Header | Not-ECT | ECT(0)     | ECT(1)     |     CE     |
+ *      +---------+---------+------------+------------+------------+
+ *      | Not-ECT | Not-ECT |Not-ECT(!!!)|Not-ECT(!!!)| <drop>(!!!)|
+ *      |  ECT(0) |  ECT(0) | ECT(0)     | ECT(1)     |     CE     |
+ *      |  ECT(1) |  ECT(1) | ECT(1) (!) | ECT(1)     |     CE     |
+ *      |    CE   |      CE |     CE     |     CE(!!!)|     CE     |
+ *      +---------+---------+------------+------------+------------+
+ *
+ *             Figure 4: New IP in IP Decapsulation Behaviour
+ *
+ *  returns 0 on success
+ *          1 if something is broken and should be logged (!!! above)
+ *          2 if packet should be dropped
+ */
+static inline int INET_ECN_decapsulate(struct sk_buff *skb,
+                                      __u8 outer, __u8 inner)
+{
+       if (INET_ECN_is_not_ect(inner)) {
+               switch (outer & INET_ECN_MASK) {
+               case INET_ECN_NOT_ECT:
+                       return 0;
+               case INET_ECN_ECT_0:
+               case INET_ECN_ECT_1:
+                       return 1;
+               case INET_ECN_CE:
+                       return 2;
+               }
+       }
+
+       if (INET_ECN_is_ce(outer))
+               INET_ECN_set_ce(skb);
+
+       return 0;
+}
+
+static inline int IP_ECN_decapsulate(const struct iphdr *oiph,
+                                    struct sk_buff *skb)
+{
+       __u8 inner;
+
+       if (skb->protocol == htons(ETH_P_IP))
+               inner = ip_hdr(skb)->tos;
+       else if (skb->protocol == htons(ETH_P_IPV6))
+               inner = ipv6_get_dsfield(ipv6_hdr(skb));
+       else
+               return 0;
+
+       return INET_ECN_decapsulate(skb, oiph->tos, inner);
+}
+
+static inline int IP6_ECN_decapsulate(const struct ipv6hdr *oipv6h,
+                                     struct sk_buff *skb)
+{
+       __u8 inner;
+
+       if (skb->protocol == htons(ETH_P_IP))
+               inner = ip_hdr(skb)->tos;
+       else if (skb->protocol == htons(ETH_P_IPV6))
+               inner = ipv6_get_dsfield(ipv6_hdr(skb));
+       else
+               return 0;
+
+       return INET_ECN_decapsulate(skb, ipv6_get_dsfield(oipv6h), inner);
+}
 #endif
index 2431cf83aecafb74ca1e9464bec6f6a669046d75..32786a0447187f44e1264f4dc4c0b4721ebc3e08 100644 (file)
@@ -29,6 +29,8 @@ struct inet_frag_queue {
 #define INET_FRAG_COMPLETE     4
 #define INET_FRAG_FIRST_IN     2
 #define INET_FRAG_LAST_IN      1
+
+       u16                     max_size;
 };
 
 #define INETFRAGS_HASHSZ               64
@@ -59,7 +61,7 @@ void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
 void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
 void inet_frag_destroy(struct inet_frag_queue *q,
                                struct inet_frags *f, int *work);
-int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f);
+int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force);
 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
                struct inet_frags *f, void *key, unsigned int hash)
        __releases(&f->lock);
index 613cfa4016728300da88a8fc6f1f664de99da5cc..256c1ed2d69afc06cfbc8b51550c7b374a166afe 100644 (file)
@@ -101,10 +101,8 @@ struct inet_cork {
        __be32                  addr;
        struct ip_options       *opt;
        unsigned int            fragsize;
-       struct dst_entry        *dst;
        int                     length; /* Total length of all frames */
-       struct page             *page;
-       u32                     off;
+       struct dst_entry        *dst;
        u8                      tx_flags;
 };
 
index 5a5d84d3d2c6b6e3777035a631fb10e7479ab8de..0707fb9551aa4c1011c88969a42cd4482450d035 100644 (file)
@@ -42,6 +42,8 @@ struct inet_skb_parm {
 #define IPSKB_XFRM_TRANSFORMED 4
 #define IPSKB_FRAG_COMPLETE    8
 #define IPSKB_REROUTED         16
+
+       u16                     frag_max_size;
 };
 
 static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
index 9fc7114159e885031550abedebf67c74f08bde76..8a2a203eb15d087c4838e80aa52e55208cbee1d5 100644 (file)
@@ -37,6 +37,7 @@ struct fib6_config {
        int             fc_ifindex;
        u32             fc_flags;
        u32             fc_protocol;
+       u32             fc_type;        /* only 8 bits are used */
 
        struct in6_addr fc_dst;
        struct in6_addr fc_src;
index 358fb86f57eb952816bc76736b7b85644dd77184..e03047f7090bb3419c2aa6f8b39f3a7ff0494f8a 100644 (file)
@@ -5,6 +5,8 @@
 #include <linux/netdevice.h>
 #include <linux/ip6_tunnel.h>
 
+#define IP6TUNNEL_ERR_TIMEO (30*HZ)
+
 /* capable of sending packets */
 #define IP6_TNL_F_CAP_XMIT 0x10000
 /* capable of receiving packets */
 /* determine capability on a per-packet basis */
 #define IP6_TNL_F_CAP_PER_PACKET 0x40000
 
-/* IPv6 tunnel */
+struct __ip6_tnl_parm {
+       char name[IFNAMSIZ];    /* name of tunnel device */
+       int link;               /* ifindex of underlying L2 interface */
+       __u8 proto;             /* tunnel protocol */
+       __u8 encap_limit;       /* encapsulation limit for tunnel */
+       __u8 hop_limit;         /* hop limit for tunnel */
+       __be32 flowinfo;        /* traffic class and flowlabel for tunnel */
+       __u32 flags;            /* tunnel flags */
+       struct in6_addr laddr;  /* local tunnel end-point address */
+       struct in6_addr raddr;  /* remote tunnel end-point address */
+
+       __be16                  i_flags;
+       __be16                  o_flags;
+       __be32                  i_key;
+       __be32                  o_key;
+};
 
+/* IPv6 tunnel */
 struct ip6_tnl {
        struct ip6_tnl __rcu *next;     /* next tunnel in list */
        struct net_device *dev; /* virtual device associated with tunnel */
-       struct ip6_tnl_parm parms;      /* tunnel configuration parameters */
+       struct __ip6_tnl_parm parms;    /* tunnel configuration parameters */
        struct flowi fl;        /* flowi template for xmit */
        struct dst_entry *dst_cache;    /* cached dst */
        u32 dst_cookie;
+
+       int err_count;
+       unsigned long err_time;
+
+       /* These fields used only by GRE */
+       __u32 i_seqno;  /* The last seen seqno  */
+       __u32 o_seqno;  /* The last output seqno */
+       int hlen;       /* Precalculated GRE header length */
+       int mlink;
 };
 
 /* Tunnel encapsulation limit destination sub-option */
@@ -31,4 +58,14 @@ struct ipv6_tlv_tnl_enc_lim {
        __u8 encap_limit;       /* tunnel encapsulation limit   */
 } __packed;
 
+struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t);
+void ip6_tnl_dst_reset(struct ip6_tnl *t);
+void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst);
+int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
+               const struct in6_addr *raddr);
+int ip6_tnl_xmit_ctl(struct ip6_tnl *t);
+__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw);
+__u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr,
+                            const struct in6_addr *raddr);
+
 #endif
index 95374d1696a163a75f8aa006bf99fe958df195aa..ee75ccdf5188cbf4eac840714a22b456d6700ad6 100644 (file)
@@ -808,8 +808,6 @@ struct netns_ipvs {
        struct list_head        rs_table[IP_VS_RTAB_SIZE];
        /* ip_vs_app */
        struct list_head        app_list;
-       /* ip_vs_ftp */
-       struct ip_vs_app        *ftp_app;
        /* ip_vs_proto */
        #define IP_VS_PROTO_TAB_SIZE    32      /* must be power of 2 */
        struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE];
@@ -890,6 +888,7 @@ struct netns_ipvs {
        unsigned int            sysctl_sync_refresh_period;
        int                     sysctl_sync_retries;
        int                     sysctl_nat_icmp_send;
+       int                     sysctl_pmtu_disc;
 
        /* ip_vs_lblc */
        int                     sysctl_lblc_expiration;
@@ -976,6 +975,11 @@ static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs)
        return ipvs->sysctl_sync_sock_size;
 }
 
+static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs)
+{
+       return ipvs->sysctl_pmtu_disc;
+}
+
 #else
 
 static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
@@ -1018,6 +1022,11 @@ static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs)
        return 0;
 }
 
+static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs)
+{
+       return 1;
+}
+
 #endif
 
 /*
@@ -1179,7 +1188,8 @@ extern void ip_vs_service_net_cleanup(struct net *net);
  *      (from ip_vs_app.c)
  */
 #define IP_VS_APP_MAX_PORTS  8
-extern int register_ip_vs_app(struct net *net, struct ip_vs_app *app);
+extern struct ip_vs_app *register_ip_vs_app(struct net *net,
+                                           struct ip_vs_app *app);
 extern void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app);
 extern int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
 extern void ip_vs_unbind_app(struct ip_vs_conn *cp);
index a93cf6d7e94b160dc2ad1ec061948c05713264ab..ddc077c51f324d74e248fada33fd152238cf5337 100644 (file)
@@ -2,6 +2,7 @@
 #define __NET_IPIP_H 1
 
 #include <linux/if_tunnel.h>
+#include <net/gro_cells.h>
 #include <net/ip.h>
 
 /* Keep error state on tunnel for 30 sec */
@@ -36,6 +37,8 @@ struct ip_tunnel {
 #endif
        struct ip_tunnel_prl_entry __rcu *prl;          /* potential router list */
        unsigned int                    prl_count;      /* # of entries in PRL */
+
+       struct gro_cells                gro_cells;
 };
 
 struct ip_tunnel_prl_entry {
index c8a202436e01844ad9ffdcd2746365fbf0489bdd..979bf6c131412be9a4662d4738056feb91a26272 100644 (file)
@@ -34,6 +34,7 @@
 #define NEXTHDR_IPV6           41      /* IPv6 in IPv6 */
 #define NEXTHDR_ROUTING                43      /* Routing header. */
 #define NEXTHDR_FRAGMENT       44      /* Fragmentation/reassembly header. */
+#define NEXTHDR_GRE            47      /* GRE header. */
 #define NEXTHDR_ESP            50      /* Encapsulating security payload. */
 #define NEXTHDR_AUTH           51      /* Authentication header. */
 #define NEXTHDR_ICMP           58      /* ICMP for IPv6. */
@@ -270,8 +271,17 @@ struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
 
 extern bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb);
 
-int ip6_frag_nqueues(struct net *net);
-int ip6_frag_mem(struct net *net);
+#if IS_ENABLED(CONFIG_IPV6)
+static inline int ip6_frag_nqueues(struct net *net)
+{
+       return net->ipv6.frags.nqueues;
+}
+
+static inline int ip6_frag_mem(struct net *net)
+{
+       return atomic_read(&net->ipv6.frags.mem);
+}
+#endif
 
 #define IPV6_FRAG_HIGH_THRESH  (256 * 1024)    /* 262144 */
 #define IPV6_FRAG_LOW_THRESH   (192 * 1024)    /* 196608 */
@@ -410,6 +420,25 @@ struct ip6_create_arg {
 void ip6_frag_init(struct inet_frag_queue *q, void *a);
 bool ip6_frag_match(struct inet_frag_queue *q, void *a);
 
+/*
+ *     Equivalent of ipv4 struct ip
+ */
+struct frag_queue {
+       struct inet_frag_queue  q;
+
+       __be32                  id;             /* fragment id          */
+       u32                     user;
+       struct in6_addr         saddr;
+       struct in6_addr         daddr;
+
+       int                     iif;
+       unsigned int            csum;
+       __u16                   nhoffset;
+};
+
+void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
+                          struct inet_frags *frags);
+
 static inline bool ipv6_addr_any(const struct in6_addr *a)
 {
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
index f2d0fc570527baf216f34cd364c0162330dcbd28..9e7d7f08ef77c5539ea887b483b274fc0ddb91fd 100644 (file)
@@ -151,7 +151,6 @@ extern int sysctl_llc2_ack_timeout;
 extern int sysctl_llc2_busy_timeout;
 extern int sysctl_llc2_p_timeout;
 extern int sysctl_llc2_rej_timeout;
-extern int sysctl_llc_station_ack_timeout;
 #else
 #define llc_sysctl_init() (0)
 #define llc_sysctl_exit() do { } while(0)
index bb86aa6f98dd065d701d37d6ba4a18b2d55f00ae..82558c8decf86e7cf720d6b7a190455c31e60be3 100644 (file)
@@ -171,6 +171,7 @@ struct ieee80211_low_level_stats {
  * @BSS_CHANGED_IDLE: Idle changed for this BSS/interface.
  * @BSS_CHANGED_SSID: SSID changed for this BSS (AP mode)
  * @BSS_CHANGED_AP_PROBE_RESP: Probe Response changed for this BSS (AP mode)
+ * @BSS_CHANGED_PS: PS changed for this BSS (STA mode)
  */
 enum ieee80211_bss_change {
        BSS_CHANGED_ASSOC               = 1<<0,
@@ -190,6 +191,7 @@ enum ieee80211_bss_change {
        BSS_CHANGED_IDLE                = 1<<14,
        BSS_CHANGED_SSID                = 1<<15,
        BSS_CHANGED_AP_PROBE_RESP       = 1<<16,
+       BSS_CHANGED_PS                  = 1<<17,
 
        /* when adding here, make sure to change ieee80211_reconfig */
 };
@@ -266,6 +268,8 @@ enum ieee80211_rssi_event {
  * @idle: This interface is idle. There's also a global idle flag in the
  *     hardware config which may be more appropriate depending on what
  *     your driver/device needs to do.
+ * @ps: power-save mode (STA only). This flag is NOT affected by
+ *     offchannel/dynamic_ps operations.
  * @ssid: The SSID of the current vif. Only valid in AP-mode.
  * @ssid_len: Length of SSID given in @ssid.
  * @hidden_ssid: The SSID of the current vif is hidden. Only valid in AP-mode.
@@ -296,6 +300,7 @@ struct ieee80211_bss_conf {
        bool arp_filter_enabled;
        bool qos;
        bool idle;
+       bool ps;
        u8 ssid[IEEE80211_MAX_SSID_LEN];
        size_t ssid_len;
        bool hidden_ssid;
@@ -522,9 +527,6 @@ struct ieee80211_tx_rate {
  *  (2) driver internal use (if applicable)
  *  (3) TX status information - driver tells mac80211 what happened
  *
- * The TX control's sta pointer is only valid during the ->tx call,
- * it may be NULL.
- *
  * @flags: transmit info flags, defined above
  * @band: the band to transmit on (use for checking for races)
  * @hw_queue: HW queue to put the frame on, skb_get_queue_mapping() gives the AC
@@ -555,6 +557,7 @@ struct ieee80211_tx_info {
                                        struct ieee80211_tx_rate rates[
                                                IEEE80211_TX_MAX_RATES];
                                        s8 rts_cts_rate_idx;
+                                       /* 3 bytes free */
                                };
                                /* only needed before rate control */
                                unsigned long jiffies;
@@ -562,7 +565,7 @@ struct ieee80211_tx_info {
                        /* NB: vif can be NULL for injected frames */
                        struct ieee80211_vif *vif;
                        struct ieee80211_key_conf *hw_key;
-                       struct ieee80211_sta *sta;
+                       /* 8 bytes free */
                } control;
                struct {
                        struct ieee80211_tx_rate rates[IEEE80211_TX_MAX_RATES];
@@ -673,21 +676,41 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
  * @RX_FLAG_HT_GF: This frame was received in a HT-greenfield transmission, if
  *     the driver fills this value it should add %IEEE80211_RADIOTAP_MCS_HAVE_FMT
  *     to hw.radiotap_mcs_details to advertise that fact
+ * @RX_FLAG_AMPDU_DETAILS: A-MPDU details are known, in particular the reference
+ *     number (@ampdu_reference) must be populated and be a distinct number for
+ *     each A-MPDU
+ * @RX_FLAG_AMPDU_REPORT_ZEROLEN: driver reports 0-length subframes
+ * @RX_FLAG_AMPDU_IS_ZEROLEN: This is a zero-length subframe, for
+ *     monitoring purposes only
+ * @RX_FLAG_AMPDU_LAST_KNOWN: last subframe is known, should be set on all
+ *     subframes of a single A-MPDU
+ * @RX_FLAG_AMPDU_IS_LAST: this subframe is the last subframe of the A-MPDU
+ * @RX_FLAG_AMPDU_DELIM_CRC_ERROR: A delimiter CRC error has been detected
+ *     on this subframe
+ * @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC
+ *     is stored in the @ampdu_delimiter_crc field)
  */
 enum mac80211_rx_flags {
-       RX_FLAG_MMIC_ERROR      = 1<<0,
-       RX_FLAG_DECRYPTED       = 1<<1,
-       RX_FLAG_MMIC_STRIPPED   = 1<<3,
-       RX_FLAG_IV_STRIPPED     = 1<<4,
-       RX_FLAG_FAILED_FCS_CRC  = 1<<5,
-       RX_FLAG_FAILED_PLCP_CRC = 1<<6,
-       RX_FLAG_MACTIME_MPDU    = 1<<7,
-       RX_FLAG_SHORTPRE        = 1<<8,
-       RX_FLAG_HT              = 1<<9,
-       RX_FLAG_40MHZ           = 1<<10,
-       RX_FLAG_SHORT_GI        = 1<<11,
-       RX_FLAG_NO_SIGNAL_VAL   = 1<<12,
-       RX_FLAG_HT_GF           = 1<<13,
+       RX_FLAG_MMIC_ERROR              = BIT(0),
+       RX_FLAG_DECRYPTED               = BIT(1),
+       RX_FLAG_MMIC_STRIPPED           = BIT(3),
+       RX_FLAG_IV_STRIPPED             = BIT(4),
+       RX_FLAG_FAILED_FCS_CRC          = BIT(5),
+       RX_FLAG_FAILED_PLCP_CRC         = BIT(6),
+       RX_FLAG_MACTIME_MPDU            = BIT(7),
+       RX_FLAG_SHORTPRE                = BIT(8),
+       RX_FLAG_HT                      = BIT(9),
+       RX_FLAG_40MHZ                   = BIT(10),
+       RX_FLAG_SHORT_GI                = BIT(11),
+       RX_FLAG_NO_SIGNAL_VAL           = BIT(12),
+       RX_FLAG_HT_GF                   = BIT(13),
+       RX_FLAG_AMPDU_DETAILS           = BIT(14),
+       RX_FLAG_AMPDU_REPORT_ZEROLEN    = BIT(15),
+       RX_FLAG_AMPDU_IS_ZEROLEN        = BIT(16),
+       RX_FLAG_AMPDU_LAST_KNOWN        = BIT(17),
+       RX_FLAG_AMPDU_IS_LAST           = BIT(18),
+       RX_FLAG_AMPDU_DELIM_CRC_ERROR   = BIT(19),
+       RX_FLAG_AMPDU_DELIM_CRC_KNOWN   = BIT(20),
 };
 
 /**
@@ -711,17 +734,22 @@ enum mac80211_rx_flags {
  *     HT rates are use (RX_FLAG_HT)
  * @flag: %RX_FLAG_*
  * @rx_flags: internal RX flags for mac80211
+ * @ampdu_reference: A-MPDU reference number, must be a different value for
+ *     each A-MPDU but the same for each subframe within one A-MPDU
+ * @ampdu_delimiter_crc: A-MPDU delimiter CRC
  */
 struct ieee80211_rx_status {
        u64 mactime;
        u32 device_timestamp;
-       u16 flag;
+       u32 ampdu_reference;
+       u32 flag;
        u16 freq;
        u8 rate_idx;
        u8 rx_flags;
        u8 band;
        u8 antenna;
        s8 signal;
+       u8 ampdu_delimiter_crc;
 };
 
 /**
@@ -945,21 +973,29 @@ static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
  *     generation in software.
  * @IEEE80211_KEY_FLAG_PAIRWISE: Set by mac80211, this flag indicates
  *     that the key is pairwise rather then a shared key.
- * @IEEE80211_KEY_FLAG_SW_MGMT: This flag should be set by the driver for a
+ * @IEEE80211_KEY_FLAG_SW_MGMT_TX: This flag should be set by the driver for a
  *     CCMP key if it requires CCMP encryption of management frames (MFP) to
  *     be done in software.
  * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver
  *     if space should be prepared for the IV, but the IV
  *     itself should not be generated. Do not set together with
  *     @IEEE80211_KEY_FLAG_GENERATE_IV on the same key.
+ * @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received
+ *     management frames. The flag can help drivers that have a hardware
+ *     crypto implementation that doesn't deal with management frames
+ *     properly by allowing them to not upload the keys to hardware and
+ *     fall back to software crypto. Note that this flag deals only with
+ *     RX, if your crypto engine can't deal with TX you can also set the
+ *     %IEEE80211_KEY_FLAG_SW_MGMT_TX flag to encrypt such frames in SW.
  */
 enum ieee80211_key_flags {
        IEEE80211_KEY_FLAG_WMM_STA      = 1<<0,
        IEEE80211_KEY_FLAG_GENERATE_IV  = 1<<1,
        IEEE80211_KEY_FLAG_GENERATE_MMIC= 1<<2,
        IEEE80211_KEY_FLAG_PAIRWISE     = 1<<3,
-       IEEE80211_KEY_FLAG_SW_MGMT      = 1<<4,
+       IEEE80211_KEY_FLAG_SW_MGMT_TX   = 1<<4,
        IEEE80211_KEY_FLAG_PUT_IV_SPACE = 1<<5,
+       IEEE80211_KEY_FLAG_RX_MGMT      = 1<<6,
 };
 
 /**
@@ -1073,6 +1109,16 @@ enum sta_notify_cmd {
        STA_NOTIFY_SLEEP, STA_NOTIFY_AWAKE,
 };
 
+/**
+ * struct ieee80211_tx_control - TX control data
+ *
+ * @sta: station table entry, this sta pointer may be NULL and
+ *     it is not allowed to copy the pointer, due to RCU.
+ */
+struct ieee80211_tx_control {
+       struct ieee80211_sta *sta;
+};
+
 /**
  * enum ieee80211_hw_flags - hardware flags
  *
@@ -1203,6 +1249,10 @@ enum sta_notify_cmd {
  *     queue mapping in order to use different queues (not just one per AC)
  *     for different virtual interfaces. See the doc section on HW queue
  *     control for more details.
+ *
+ * @IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF: Use the P2P Device address for any
+ *     P2P Interface. This will be honoured even if more than one interface
+ *     is supported.
  */
 enum ieee80211_hw_flags {
        IEEE80211_HW_HAS_RATE_CONTROL                   = 1<<0,
@@ -1230,6 +1280,7 @@ enum ieee80211_hw_flags {
        IEEE80211_HW_AP_LINK_PS                         = 1<<22,
        IEEE80211_HW_TX_AMPDU_SETUP_IN_HW               = 1<<23,
        IEEE80211_HW_SCAN_WHILE_IDLE                    = 1<<24,
+       IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF              = 1<<25,
 };
 
 /**
@@ -1884,10 +1935,14 @@ enum ieee80211_frame_release_type {
  * @IEEE80211_RC_BW_CHANGED: The bandwidth that can be used to transmit
  *     to this station changed.
  * @IEEE80211_RC_SMPS_CHANGED: The SMPS state of the station changed.
+ * @IEEE80211_RC_SUPP_RATES_CHANGED: The supported rate set of this peer
+ *     changed (in IBSS mode) due to discovering more information about
+ *     the peer.
  */
 enum ieee80211_rate_control_changed {
        IEEE80211_RC_BW_CHANGED         = BIT(0),
        IEEE80211_RC_SMPS_CHANGED       = BIT(1),
+       IEEE80211_RC_SUPP_RATES_CHANGED = BIT(2),
 };
 
 /**
@@ -2264,7 +2319,9 @@ enum ieee80211_rate_control_changed {
  *     The callback is optional and can (should!) sleep.
  */
 struct ieee80211_ops {
-       void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
+       void (*tx)(struct ieee80211_hw *hw,
+                  struct ieee80211_tx_control *control,
+                  struct sk_buff *skb);
        int (*start)(struct ieee80211_hw *hw);
        void (*stop)(struct ieee80211_hw *hw);
 #ifdef CONFIG_PM
index 96a3b5c03e37d965b51e9d9754af3969df6146fc..980d263765cf41059ede684d54abafb6a9e66c6c 100644 (file)
@@ -49,6 +49,7 @@ enum {
 #include <linux/types.h>
 #include <linux/if_arp.h>
 #include <linux/netdevice.h>
+#include <linux/hash.h>
 
 #include <net/neighbour.h>
 
@@ -134,7 +135,7 @@ static inline u32 ndisc_hashfn(const void *pkey, const struct net_device *dev, _
 {
        const u32 *p32 = pkey;
 
-       return (((p32[0] ^ dev->ifindex) * hash_rnd[0]) +
+       return (((p32[0] ^ hash32_ptr(dev)) * hash_rnd[0]) +
                (p32[1] * hash_rnd[1]) +
                (p32[2] * hash_rnd[2]) +
                (p32[3] * hash_rnd[3]));
index 344d8988842a527fbec3bffc674cda865f67b15c..0dab173e27da6e8e66a8eea4913f5b0230f612bd 100644 (file)
@@ -334,18 +334,22 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
 }
 #endif
 
-static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb)
+static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
 {
        unsigned int seq;
        int hh_len;
 
        do {
-               int hh_alen;
-
                seq = read_seqbegin(&hh->hh_lock);
                hh_len = hh->hh_len;
-               hh_alen = HH_DATA_ALIGN(hh_len);
-               memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
+               if (likely(hh_len <= HH_DATA_MOD)) {
+                       /* this is inlined by gcc */
+                       memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD);
+               } else {
+                       int hh_alen = HH_DATA_ALIGN(hh_len);
+
+                       memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
+               }
        } while (read_seqretry(&hh->hh_lock, seq));
 
        skb_push(skb, hh_len);
index fd87963a0ea5878cecae892541b9fc9f79e64814..4faf6612ecacc05d2b00877691e440338ebb0144 100644 (file)
@@ -15,6 +15,7 @@
 #include <net/netns/packet.h>
 #include <net/netns/ipv4.h>
 #include <net/netns/ipv6.h>
+#include <net/netns/sctp.h>
 #include <net/netns/dccp.h>
 #include <net/netns/x_tables.h>
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
@@ -66,6 +67,7 @@ struct net {
        struct hlist_head       *dev_name_head;
        struct hlist_head       *dev_index_head;
        unsigned int            dev_base_seq;   /* protected by rtnl_mutex */
+       int                     ifindex;
 
        /* core fib_rules */
        struct list_head        rules_ops;
@@ -80,6 +82,9 @@ struct net {
 #if IS_ENABLED(CONFIG_IPV6)
        struct netns_ipv6       ipv6;
 #endif
+#if defined(CONFIG_IP_SCTP) || defined(CONFIG_IP_SCTP_MODULE)
+       struct netns_sctp       sctp;
+#endif
 #if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
        struct netns_dccp       dccp;
 #endif
@@ -87,6 +92,9 @@ struct net {
        struct netns_xt         xt;
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
        struct netns_ct         ct;
+#endif
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
+       struct netns_nf_frag    nf_frag;
 #endif
        struct sock             *nfnl;
        struct sock             *nfnl_stash;
@@ -105,6 +113,13 @@ struct net {
        atomic_t                rt_genid;
 };
 
+/*
+ * ifindex generation is per-net namespace, and loopback is
+ * always the 1st device in ns (see net_dev_init), thus any
+ * loopback device should get ifindex 1
+ */
+
+#define LOOPBACK_IFINDEX       1
 
 #include <linux/seq_file_net.h>
 
index 4a045cda9c60c75a96b956b051dfbf5a6d6581b7..5654d292efd4f0883f6051610f6144552f20cb61 100644 (file)
@@ -17,7 +17,7 @@ struct nf_conntrack_ecache {
        unsigned long missed;   /* missed events */
        u16 ctmask;             /* bitmask of ct events to be delivered */
        u16 expmask;            /* bitmask of expect events to be delivered */
-       u32 pid;                /* netlink pid of destroyer */
+       u32 portid;             /* netlink portid of destroyer */
        struct timer_list timeout;
 };
 
@@ -60,7 +60,7 @@ nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
 /* This structure is passed to event handler */
 struct nf_ct_event {
        struct nf_conn *ct;
-       u32 pid;
+       u32 portid;
        int report;
 };
 
@@ -92,7 +92,7 @@ nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
 static inline int
 nf_conntrack_eventmask_report(unsigned int eventmask,
                              struct nf_conn *ct,
-                             u32 pid,
+                             u32 portid,
                              int report)
 {
        int ret = 0;
@@ -112,11 +112,11 @@ nf_conntrack_eventmask_report(unsigned int eventmask,
        if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) {
                struct nf_ct_event item = {
                        .ct     = ct,
-                       .pid    = e->pid ? e->pid : pid,
+                       .portid = e->portid ? e->portid : portid,
                        .report = report
                };
                /* This is a resent of a destroy event? If so, skip missed */
-               unsigned long missed = e->pid ? 0 : e->missed;
+               unsigned long missed = e->portid ? 0 : e->missed;
 
                if (!((eventmask | missed) & e->ctmask))
                        goto out_unlock;
@@ -126,11 +126,11 @@ nf_conntrack_eventmask_report(unsigned int eventmask,
                        spin_lock_bh(&ct->lock);
                        if (ret < 0) {
                                /* This is a destroy event that has been
-                                * triggered by a process, we store the PID
+                                * triggered by a process, we store the PORTID
                                 * to include it in the retransmission. */
                                if (eventmask & (1 << IPCT_DESTROY) &&
-                                   e->pid == 0 && pid != 0)
-                                       e->pid = pid;
+                                   e->portid == 0 && portid != 0)
+                                       e->portid = portid;
                                else
                                        e->missed |= eventmask;
                        } else
@@ -145,9 +145,9 @@ out_unlock:
 
 static inline int
 nf_conntrack_event_report(enum ip_conntrack_events event, struct nf_conn *ct,
-                         u32 pid, int report)
+                         u32 portid, int report)
 {
-       return nf_conntrack_eventmask_report(1 << event, ct, pid, report);
+       return nf_conntrack_eventmask_report(1 << event, ct, portid, report);
 }
 
 static inline int
@@ -158,7 +158,7 @@ nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct)
 
 struct nf_exp_event {
        struct nf_conntrack_expect *exp;
-       u32 pid;
+       u32 portid;
        int report;
 };
 
@@ -172,7 +172,7 @@ extern void nf_ct_expect_unregister_notifier(struct net *net, struct nf_exp_even
 static inline void
 nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
                          struct nf_conntrack_expect *exp,
-                         u32 pid,
+                         u32 portid,
                          int report)
 {
        struct net *net = nf_ct_exp_net(exp);
@@ -191,7 +191,7 @@ nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
        if (e->expmask & (1 << event)) {
                struct nf_exp_event item = {
                        .exp    = exp,
-                       .pid    = pid,
+                       .portid = portid,
                        .report = report
                };
                notify->fcn(1 << event, &item);
@@ -216,20 +216,20 @@ static inline void nf_conntrack_event_cache(enum ip_conntrack_events event,
                                            struct nf_conn *ct) {}
 static inline int nf_conntrack_eventmask_report(unsigned int eventmask,
                                                struct nf_conn *ct,
-                                               u32 pid,
+                                               u32 portid,
                                                int report) { return 0; }
 static inline int nf_conntrack_event(enum ip_conntrack_events event,
                                     struct nf_conn *ct) { return 0; }
 static inline int nf_conntrack_event_report(enum ip_conntrack_events event,
                                            struct nf_conn *ct,
-                                           u32 pid,
+                                           u32 portid,
                                            int report) { return 0; }
 static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) {}
 static inline void nf_ct_expect_event(enum ip_conntrack_expect_events event,
                                      struct nf_conntrack_expect *exp) {}
 static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e,
                                             struct nf_conntrack_expect *exp,
-                                            u32 pid,
+                                            u32 portid,
                                             int report) {}
 
 static inline int nf_conntrack_ecache_init(struct net *net)
index 983f00263243c66407ad4a1281167d9888378bd1..cc13f377a705c36c62987060d3cbe5e63eac8147 100644 (file)
@@ -43,7 +43,7 @@ struct nf_conntrack_expect {
        unsigned int class;
 
 #ifdef CONFIG_NF_NAT_NEEDED
-       __be32 saved_ip;
+       union nf_inet_addr saved_addr;
        /* This is the original per-proto part, used to map the
         * expected connection the way the recipient expects. */
        union nf_conntrack_man_proto saved_proto;
index 34ec89f8dbf90303246af81061e8746bad99e867..e41e472d08f2123a7a0917e598196ec4ebf8e5d3 100644 (file)
@@ -55,6 +55,26 @@ struct nf_conn_timeout *nf_ct_timeout_ext_add(struct nf_conn *ct,
 #endif
 };
 
+static inline unsigned int *
+nf_ct_timeout_lookup(struct net *net, struct nf_conn *ct,
+                    struct nf_conntrack_l4proto *l4proto)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+       struct nf_conn_timeout *timeout_ext;
+       unsigned int *timeouts;
+
+       timeout_ext = nf_ct_timeout_find(ct);
+       if (timeout_ext)
+               timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext);
+       else
+               timeouts = l4proto->get_timeouts(net);
+
+       return timeouts;
+#else
+       return l4proto->get_timeouts(net);
+#endif
+}
+
 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 extern int nf_conntrack_timeout_init(struct net *net);
 extern void nf_conntrack_timeout_fini(struct net *net);
index b4de990b55f123e7f2c9ad1095634743e2ad4698..bd8eea720f2ed0c3e0b61de6b03cb36035ff61cf 100644 (file)
@@ -43,14 +43,16 @@ struct nf_conn_nat {
        struct nf_conn *ct;
        union nf_conntrack_nat_help help;
 #if defined(CONFIG_IP_NF_TARGET_MASQUERADE) || \
-    defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE)
+    defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE) || \
+    defined(CONFIG_IP6_NF_TARGET_MASQUERADE) || \
+    defined(CONFIG_IP6_NF_TARGET_MASQUERADE_MODULE)
        int masq_index;
 #endif
 };
 
 /* Set up the info structure to map into this range. */
 extern unsigned int nf_nat_setup_info(struct nf_conn *ct,
-                                     const struct nf_nat_ipv4_range *range,
+                                     const struct nf_nat_range *range,
                                      enum nf_nat_manip_type maniptype);
 
 /* Is this tuple already taken? (not by us)*/
index b13d8d18d595b320d454b4d6b91e05d7aeea41b2..972e1e47ec79819f610cf9081be732b9131aa46a 100644 (file)
@@ -12,10 +12,7 @@ extern unsigned int nf_nat_packet(struct nf_conn *ct,
                                  unsigned int hooknum,
                                  struct sk_buff *skb);
 
-extern int nf_nat_icmp_reply_translation(struct nf_conn *ct,
-                                        enum ip_conntrack_info ctinfo,
-                                        unsigned int hooknum,
-                                        struct sk_buff *skb);
+extern int nf_xfrm_me_harder(struct sk_buff *skb, unsigned int family);
 
 static inline int nf_nat_initialized(struct nf_conn *ct,
                                     enum nf_nat_manip_type manip)
index 7d8fb7b46c442552f1d4375f3465d6dded328a4e..b4d6bfc2af034a32c1a5f9b847c91b99039cd868 100644 (file)
@@ -10,6 +10,7 @@ struct sk_buff;
 extern int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
                                      struct nf_conn *ct,
                                      enum ip_conntrack_info ctinfo,
+                                     unsigned int protoff,
                                      unsigned int match_offset,
                                      unsigned int match_len,
                                      const char *rep_buffer,
@@ -18,12 +19,13 @@ extern int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
 static inline int nf_nat_mangle_tcp_packet(struct sk_buff *skb,
                                           struct nf_conn *ct,
                                           enum ip_conntrack_info ctinfo,
+                                          unsigned int protoff,
                                           unsigned int match_offset,
                                           unsigned int match_len,
                                           const char *rep_buffer,
                                           unsigned int rep_len)
 {
-       return __nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
+       return __nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff,
                                          match_offset, match_len,
                                          rep_buffer, rep_len, true);
 }
@@ -31,6 +33,7 @@ static inline int nf_nat_mangle_tcp_packet(struct sk_buff *skb,
 extern int nf_nat_mangle_udp_packet(struct sk_buff *skb,
                                    struct nf_conn *ct,
                                    enum ip_conntrack_info ctinfo,
+                                   unsigned int protoff,
                                    unsigned int match_offset,
                                    unsigned int match_len,
                                    const char *rep_buffer,
@@ -41,10 +44,12 @@ extern void nf_nat_set_seq_adjust(struct nf_conn *ct,
                                  __be32 seq, s16 off);
 extern int nf_nat_seq_adjust(struct sk_buff *skb,
                             struct nf_conn *ct,
-                            enum ip_conntrack_info ctinfo);
+                            enum ip_conntrack_info ctinfo,
+                            unsigned int protoff);
 extern int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb,
                                     struct nf_conn *ct,
-                                    enum ip_conntrack_info ctinfo);
+                                    enum ip_conntrack_info ctinfo,
+                                    unsigned int protoff);
 
 /* Setup NAT on this expected conntrack so it follows master, but goes
  * to port ct->master->saved_proto. */
diff --git a/include/net/netfilter/nf_nat_l3proto.h b/include/net/netfilter/nf_nat_l3proto.h
new file mode 100644 (file)
index 0000000..bd3b97e
--- /dev/null
@@ -0,0 +1,52 @@
+#ifndef _NF_NAT_L3PROTO_H
+#define _NF_NAT_L3PROTO_H
+
+struct nf_nat_l4proto;
+struct nf_nat_l3proto {
+       u8      l3proto;
+
+       bool    (*in_range)(const struct nf_conntrack_tuple *t,
+                           const struct nf_nat_range *range);
+
+       u32     (*secure_port)(const struct nf_conntrack_tuple *t, __be16);
+
+       bool    (*manip_pkt)(struct sk_buff *skb,
+                            unsigned int iphdroff,
+                            const struct nf_nat_l4proto *l4proto,
+                            const struct nf_conntrack_tuple *target,
+                            enum nf_nat_manip_type maniptype);
+
+       void    (*csum_update)(struct sk_buff *skb, unsigned int iphdroff,
+                              __sum16 *check,
+                              const struct nf_conntrack_tuple *t,
+                              enum nf_nat_manip_type maniptype);
+
+       void    (*csum_recalc)(struct sk_buff *skb, u8 proto,
+                              void *data, __sum16 *check,
+                              int datalen, int oldlen);
+
+       void    (*decode_session)(struct sk_buff *skb,
+                                 const struct nf_conn *ct,
+                                 enum ip_conntrack_dir dir,
+                                 unsigned long statusbit,
+                                 struct flowi *fl);
+
+       int     (*nlattr_to_range)(struct nlattr *tb[],
+                                  struct nf_nat_range *range);
+};
+
+extern int nf_nat_l3proto_register(const struct nf_nat_l3proto *);
+extern void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *);
+extern const struct nf_nat_l3proto *__nf_nat_l3proto_find(u8 l3proto);
+
+extern int nf_nat_icmp_reply_translation(struct sk_buff *skb,
+                                        struct nf_conn *ct,
+                                        enum ip_conntrack_info ctinfo,
+                                        unsigned int hooknum);
+extern int nf_nat_icmpv6_reply_translation(struct sk_buff *skb,
+                                          struct nf_conn *ct,
+                                          enum ip_conntrack_info ctinfo,
+                                          unsigned int hooknum,
+                                          unsigned int hdrlen);
+
+#endif /* _NF_NAT_L3PROTO_H */
diff --git a/include/net/netfilter/nf_nat_l4proto.h b/include/net/netfilter/nf_nat_l4proto.h
new file mode 100644 (file)
index 0000000..24feb68
--- /dev/null
@@ -0,0 +1,72 @@
+/* Header for use in defining a given protocol. */
+#ifndef _NF_NAT_L4PROTO_H
+#define _NF_NAT_L4PROTO_H
+#include <net/netfilter/nf_nat.h>
+#include <linux/netfilter/nfnetlink_conntrack.h>
+
+struct nf_nat_range;
+struct nf_nat_l3proto;
+
+struct nf_nat_l4proto {
+       /* Protocol number. */
+       u8 l4proto;
+
+       /* Translate a packet to the target according to manip type.
+        * Return true if succeeded.
+        */
+       bool (*manip_pkt)(struct sk_buff *skb,
+                         const struct nf_nat_l3proto *l3proto,
+                         unsigned int iphdroff, unsigned int hdroff,
+                         const struct nf_conntrack_tuple *tuple,
+                         enum nf_nat_manip_type maniptype);
+
+       /* Is the manipable part of the tuple between min and max incl? */
+       bool (*in_range)(const struct nf_conntrack_tuple *tuple,
+                        enum nf_nat_manip_type maniptype,
+                        const union nf_conntrack_man_proto *min,
+                        const union nf_conntrack_man_proto *max);
+
+       /* Alter the per-proto part of the tuple (depending on
+        * maniptype), to give a unique tuple in the given range if
+        * possible.  Per-protocol part of tuple is initialized to the
+        * incoming packet.
+        */
+       void (*unique_tuple)(const struct nf_nat_l3proto *l3proto,
+                            struct nf_conntrack_tuple *tuple,
+                            const struct nf_nat_range *range,
+                            enum nf_nat_manip_type maniptype,
+                            const struct nf_conn *ct);
+
+       int (*nlattr_to_range)(struct nlattr *tb[],
+                              struct nf_nat_range *range);
+};
+
+/* Protocol registration. */
+extern int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto);
+extern void nf_nat_l4proto_unregister(u8 l3proto, const struct nf_nat_l4proto *l4proto);
+
+extern const struct nf_nat_l4proto *__nf_nat_l4proto_find(u8 l3proto, u8 l4proto);
+
+/* Built-in protocols. */
+extern const struct nf_nat_l4proto nf_nat_l4proto_tcp;
+extern const struct nf_nat_l4proto nf_nat_l4proto_udp;
+extern const struct nf_nat_l4proto nf_nat_l4proto_icmp;
+extern const struct nf_nat_l4proto nf_nat_l4proto_icmpv6;
+extern const struct nf_nat_l4proto nf_nat_l4proto_unknown;
+
+extern bool nf_nat_l4proto_in_range(const struct nf_conntrack_tuple *tuple,
+                                   enum nf_nat_manip_type maniptype,
+                                   const union nf_conntrack_man_proto *min,
+                                   const union nf_conntrack_man_proto *max);
+
+extern void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
+                                       struct nf_conntrack_tuple *tuple,
+                                       const struct nf_nat_range *range,
+                                       enum nf_nat_manip_type maniptype,
+                                       const struct nf_conn *ct,
+                                       u16 *rover);
+
+extern int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
+                                         struct nf_nat_range *range);
+
+#endif /*_NF_NAT_L4PROTO_H*/
diff --git a/include/net/netfilter/nf_nat_protocol.h b/include/net/netfilter/nf_nat_protocol.h
deleted file mode 100644 (file)
index 7b0b511..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-/* Header for use in defining a given protocol. */
-#ifndef _NF_NAT_PROTOCOL_H
-#define _NF_NAT_PROTOCOL_H
-#include <net/netfilter/nf_nat.h>
-#include <linux/netfilter/nfnetlink_conntrack.h>
-
-struct nf_nat_ipv4_range;
-
-struct nf_nat_protocol {
-       /* Protocol number. */
-       unsigned int protonum;
-
-       /* Translate a packet to the target according to manip type.
-          Return true if succeeded. */
-       bool (*manip_pkt)(struct sk_buff *skb,
-                         unsigned int iphdroff,
-                         const struct nf_conntrack_tuple *tuple,
-                         enum nf_nat_manip_type maniptype);
-
-       /* Is the manipable part of the tuple between min and max incl? */
-       bool (*in_range)(const struct nf_conntrack_tuple *tuple,
-                        enum nf_nat_manip_type maniptype,
-                        const union nf_conntrack_man_proto *min,
-                        const union nf_conntrack_man_proto *max);
-
-       /* Alter the per-proto part of the tuple (depending on
-          maniptype), to give a unique tuple in the given range if
-          possible.  Per-protocol part of tuple is initialized to the
-          incoming packet. */
-       void (*unique_tuple)(struct nf_conntrack_tuple *tuple,
-                            const struct nf_nat_ipv4_range *range,
-                            enum nf_nat_manip_type maniptype,
-                            const struct nf_conn *ct);
-
-       int (*nlattr_to_range)(struct nlattr *tb[],
-                              struct nf_nat_ipv4_range *range);
-};
-
-/* Protocol registration. */
-extern int nf_nat_protocol_register(const struct nf_nat_protocol *proto);
-extern void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto);
-
-/* Built-in protocols. */
-extern const struct nf_nat_protocol nf_nat_protocol_tcp;
-extern const struct nf_nat_protocol nf_nat_protocol_udp;
-extern const struct nf_nat_protocol nf_nat_protocol_icmp;
-extern const struct nf_nat_protocol nf_nat_unknown_protocol;
-
-extern int init_protocols(void) __init;
-extern void cleanup_protocols(void);
-extern const struct nf_nat_protocol *find_nat_proto(u_int16_t protonum);
-
-extern bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple,
-                                 enum nf_nat_manip_type maniptype,
-                                 const union nf_conntrack_man_proto *min,
-                                 const union nf_conntrack_man_proto *max);
-
-extern void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
-                                     const struct nf_nat_ipv4_range *range,
-                                     enum nf_nat_manip_type maniptype,
-                                     const struct nf_conn *ct,
-                                     u_int16_t *rover);
-
-extern int nf_nat_proto_nlattr_to_range(struct nlattr *tb[],
-                                       struct nf_nat_ipv4_range *range);
-
-#endif /*_NF_NAT_PROTO_H*/
diff --git a/include/net/netfilter/nf_nat_rule.h b/include/net/netfilter/nf_nat_rule.h
deleted file mode 100644 (file)
index 2890bdc..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef _NF_NAT_RULE_H
-#define _NF_NAT_RULE_H
-#include <net/netfilter/nf_conntrack.h>
-#include <net/netfilter/nf_nat.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-
-extern int nf_nat_rule_init(void) __init;
-extern void nf_nat_rule_cleanup(void);
-extern int nf_nat_rule_find(struct sk_buff *skb,
-                           unsigned int hooknum,
-                           const struct net_device *in,
-                           const struct net_device *out,
-                           struct nf_conn *ct);
-
-#endif /* _NF_NAT_RULE_H */
index 785f37a3b44ee80e1336d7301bd54c6ff48b7ba7..9690b0f6698a1d1b433ea572bdc4efb938f501c5 100644 (file)
  *   nla_put_u16(skb, type, value)     add u16 attribute to skb
  *   nla_put_u32(skb, type, value)     add u32 attribute to skb
  *   nla_put_u64(skb, type, value)     add u64 attribute to skb
+ *   nla_put_s8(skb, type, value)      add s8 attribute to skb
+ *   nla_put_s16(skb, type, value)     add s16 attribute to skb
+ *   nla_put_s32(skb, type, value)     add s32 attribute to skb
+ *   nla_put_s64(skb, type, value)     add s64 attribute to skb
  *   nla_put_string(skb, type, str)    add string attribute to skb
  *   nla_put_flag(skb, type)           add flag attribute to skb
  *   nla_put_msecs(skb, type, jiffies) add msecs attribute to skb
  *   nla_get_u16(nla)                  get payload for a u16 attribute
  *   nla_get_u32(nla)                  get payload for a u32 attribute
  *   nla_get_u64(nla)                  get payload for a u64 attribute
+ *   nla_get_s8(nla)                   get payload for a s8 attribute
+ *   nla_get_s16(nla)                  get payload for a s16 attribute
+ *   nla_get_s32(nla)                  get payload for a s32 attribute
+ *   nla_get_s64(nla)                  get payload for a s64 attribute
  *   nla_get_flag(nla)                 return 1 if flag is true
  *   nla_get_msecs(nla)                        get payload for a msecs attribute
  *
@@ -160,6 +168,10 @@ enum {
        NLA_NESTED_COMPAT,
        NLA_NUL_STRING,
        NLA_BINARY,
+       NLA_S8,
+       NLA_S16,
+       NLA_S32,
+       NLA_S64,
        __NLA_TYPE_MAX,
 };
 
@@ -183,6 +195,8 @@ enum {
  *    NLA_NESTED_COMPAT    Minimum length of structure payload
  *    NLA_U8, NLA_U16,
  *    NLA_U32, NLA_U64,
+ *    NLA_S8, NLA_S16,
+ *    NLA_S32, NLA_S64,
  *    NLA_MSECS            Leaving the length field zero will verify the
  *                         given type fits, using it verifies minimum length
  *                         just like "All other"
@@ -203,19 +217,19 @@ struct nla_policy {
 /**
  * struct nl_info - netlink source information
  * @nlh: Netlink message header of original request
- * @pid: Netlink PID of requesting application
+ * @portid: Netlink PORTID of requesting application
  */
 struct nl_info {
        struct nlmsghdr         *nlh;
        struct net              *nl_net;
-       u32                     pid;
+       u32                     portid;
 };
 
 extern int             netlink_rcv_skb(struct sk_buff *skb,
                                        int (*cb)(struct sk_buff *,
                                                  struct nlmsghdr *));
 extern int             nlmsg_notify(struct sock *sk, struct sk_buff *skb,
-                                    u32 pid, unsigned int group, int report,
+                                    u32 portid, unsigned int group, int report,
                                     gfp_t flags);
 
 extern int             nla_validate(const struct nlattr *head,
@@ -430,7 +444,7 @@ static inline int nlmsg_report(const struct nlmsghdr *nlh)
 /**
  * nlmsg_put - Add a new netlink message to an skb
  * @skb: socket buffer to store message in
- * @pid: netlink process id
+ * @portid: netlink process id
  * @seq: sequence number of message
  * @type: message type
  * @payload: length of message payload
@@ -439,13 +453,13 @@ static inline int nlmsg_report(const struct nlmsghdr *nlh)
  * Returns NULL if the tailroom of the skb is insufficient to store
  * the message header and payload.
  */
-static inline struct nlmsghdr *nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq,
+static inline struct nlmsghdr *nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
                                         int type, int payload, int flags)
 {
        if (unlikely(skb_tailroom(skb) < nlmsg_total_size(payload)))
                return NULL;
 
-       return __nlmsg_put(skb, pid, seq, type, payload, flags);
+       return __nlmsg_put(skb, portid, seq, type, payload, flags);
 }
 
 /**
@@ -464,7 +478,7 @@ static inline struct nlmsghdr *nlmsg_put_answer(struct sk_buff *skb,
                                                int type, int payload,
                                                int flags)
 {
-       return nlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
+       return nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
                         type, payload, flags);
 }
 
@@ -549,18 +563,18 @@ static inline void nlmsg_free(struct sk_buff *skb)
  * nlmsg_multicast - multicast a netlink message
  * @sk: netlink socket to spread messages to
  * @skb: netlink message as socket buffer
- * @pid: own netlink pid to avoid sending to yourself
+ * @portid: own netlink portid to avoid sending to yourself
  * @group: multicast group id
  * @flags: allocation flags
  */
 static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
-                                 u32 pid, unsigned int group, gfp_t flags)
+                                 u32 portid, unsigned int group, gfp_t flags)
 {
        int err;
 
        NETLINK_CB(skb).dst_group = group;
 
-       err = netlink_broadcast(sk, skb, pid, group, flags);
+       err = netlink_broadcast(sk, skb, portid, group, flags);
        if (err > 0)
                err = 0;
 
@@ -571,13 +585,13 @@ static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
  * nlmsg_unicast - unicast a netlink message
  * @sk: netlink socket to spread message to
  * @skb: netlink message as socket buffer
- * @pid: netlink pid of the destination socket
+ * @portid: netlink portid of the destination socket
  */
-static inline int nlmsg_unicast(struct sock *sk, struct sk_buff *skb, u32 pid)
+static inline int nlmsg_unicast(struct sock *sk, struct sk_buff *skb, u32 portid)
 {
        int err;
 
-       err = netlink_unicast(sk, skb, pid, MSG_DONTWAIT);
+       err = netlink_unicast(sk, skb, portid, MSG_DONTWAIT);
        if (err > 0)
                err = 0;
 
@@ -878,6 +892,50 @@ static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value)
        return nla_put(skb, attrtype, sizeof(__le64), &value);
 }
 
+/**
+ * nla_put_s8 - Add a s8 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
+{
+       return nla_put(skb, attrtype, sizeof(s8), &value);
+}
+
+/**
+ * nla_put_s16 - Add a s16 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
+{
+       return nla_put(skb, attrtype, sizeof(s16), &value);
+}
+
+/**
+ * nla_put_s32 - Add a s32 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
+{
+       return nla_put(skb, attrtype, sizeof(s32), &value);
+}
+
+/**
+ * nla_put_s64 - Add a s64 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value)
+{
+       return nla_put(skb, attrtype, sizeof(s64), &value);
+}
+
 /**
  * nla_put_string - Add a string netlink attribute to a socket buffer
  * @skb: socket buffer to add attribute to
@@ -993,6 +1051,46 @@ static inline __be64 nla_get_be64(const struct nlattr *nla)
        return tmp;
 }
 
+/**
+ * nla_get_s32 - return payload of s32 attribute
+ * @nla: s32 netlink attribute
+ */
+static inline s32 nla_get_s32(const struct nlattr *nla)
+{
+       return *(s32 *) nla_data(nla);
+}
+
+/**
+ * nla_get_s16 - return payload of s16 attribute
+ * @nla: s16 netlink attribute
+ */
+static inline s16 nla_get_s16(const struct nlattr *nla)
+{
+       return *(s16 *) nla_data(nla);
+}
+
+/**
+ * nla_get_s8 - return payload of s8 attribute
+ * @nla: s8 netlink attribute
+ */
+static inline s8 nla_get_s8(const struct nlattr *nla)
+{
+       return *(s8 *) nla_data(nla);
+}
+
+/**
+ * nla_get_s64 - return payload of s64 attribute
+ * @nla: s64 netlink attribute
+ */
+static inline s64 nla_get_s64(const struct nlattr *nla)
+{
+       s64 tmp;
+
+       nla_memcpy(&tmp, nla, sizeof(tmp));
+
+       return tmp;
+}
+
 /**
  * nla_get_flag - return payload of flag attribute
  * @nla: flag netlink attribute
index 3aecdc7a84fb145255d6ae2c8457945d55a3c732..a1d83cc8bf859d356b9d4b437c4d03590fa25453 100644 (file)
@@ -83,6 +83,10 @@ struct netns_ct {
        int                     sysctl_auto_assign_helper;
        bool                    auto_assign_helper_warned;
        struct nf_ip_net        nf_ct_proto;
+#ifdef CONFIG_NF_NAT_NEEDED
+       struct hlist_head       *nat_bysource;
+       unsigned int            nat_htable_size;
+#endif
 #ifdef CONFIG_SYSCTL
        struct ctl_table_header *sysctl_header;
        struct ctl_table_header *acct_sysctl_header;
index 69e50c789d9663ec46f9cc743f8720e2ec1e927a..2ae2b8372cfdc1a64210218fe3824d7f7973b60c 100644 (file)
@@ -52,8 +52,6 @@ struct netns_ipv4 {
        struct xt_table         *iptable_security;
 #endif
        struct xt_table         *nat_table;
-       struct hlist_head       *nat_bysource;
-       unsigned int            nat_htable_size;
 #endif
 
        int sysctl_icmp_echo_ignore_all;
index df0a5456a3fd08b78b7213457a94e5bfd282b87e..214cb0a53359e03b476670e49eb73298545e1859 100644 (file)
@@ -42,6 +42,7 @@ struct netns_ipv6 {
 #ifdef CONFIG_SECURITY
        struct xt_table         *ip6table_security;
 #endif
+       struct xt_table         *ip6table_nat;
 #endif
        struct rt6_info         *ip6_null_entry;
        struct rt6_statistics   *rt6_stats;
@@ -70,4 +71,12 @@ struct netns_ipv6 {
 #endif
 #endif
 };
+
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
+struct netns_nf_frag {
+       struct netns_sysctl_ipv6 sysctl;
+       struct netns_frags      frags;
+};
+#endif
+
 #endif
index cb4e894c0f8dbe5fefb6e82e50c4ad6b524e777d..17ec2b95c062d0b0dc13d2c9702c2ab9da831682 100644 (file)
@@ -5,10 +5,10 @@
 #define __NETNS_PACKET_H__
 
 #include <linux/rculist.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
 
 struct netns_packet {
-       spinlock_t              sklist_lock;
+       struct mutex            sklist_lock;
        struct hlist_head       sklist;
 };
 
diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
new file mode 100644 (file)
index 0000000..5e5eb1f
--- /dev/null
@@ -0,0 +1,131 @@
+#ifndef __NETNS_SCTP_H__
+#define __NETNS_SCTP_H__
+
+struct sock;
+struct proc_dir_entry;
+struct sctp_mib;
+struct ctl_table_header;
+
+struct netns_sctp {
+       DEFINE_SNMP_STAT(struct sctp_mib, sctp_statistics);
+
+#ifdef CONFIG_PROC_FS
+       struct proc_dir_entry *proc_net_sctp;
+#endif
+#ifdef CONFIG_SYSCTL
+       struct ctl_table_header *sysctl_header;
+#endif
+       /* This is the global socket data structure used for responding to
+        * the Out-of-the-blue (OOTB) packets.  A control sock will be created
+        * for this socket at the initialization time.
+        */
+       struct sock *ctl_sock;
+
+       /* This is the global local address list.
+        * We actively maintain this complete list of addresses on
+        * the system by catching address add/delete events.
+        *
+        * It is a list of sctp_sockaddr_entry.
+        */
+       struct list_head local_addr_list;
+       struct list_head addr_waitq;
+       struct timer_list addr_wq_timer;
+       struct list_head auto_asconf_splist;
+       spinlock_t addr_wq_lock;
+
+       /* Lock that protects the local_addr_list writers */
+       spinlock_t local_addr_lock;
+
+       /* RFC2960 Section 14. Suggested SCTP Protocol Parameter Values
+        *
+        * The following protocol parameters are RECOMMENDED:
+        *
+        * RTO.Initial              - 3  seconds
+        * RTO.Min                  - 1  second
+        * RTO.Max                 -  60 seconds
+        * RTO.Alpha                - 1/8  (3 when converted to right shifts.)
+        * RTO.Beta                 - 1/4  (2 when converted to right shifts.)
+        */
+       unsigned int rto_initial;
+       unsigned int rto_min;
+       unsigned int rto_max;
+
+       /* Note: rto_alpha and rto_beta are really defined as inverse
+        * powers of two to facilitate integer operations.
+        */
+       int rto_alpha;
+       int rto_beta;
+
+       /* Max.Burst                - 4 */
+       int max_burst;
+
+       /* Whether Cookie Preservative is enabled(1) or not(0) */
+       int cookie_preserve_enable;
+
+       /* Valid.Cookie.Life        - 60  seconds  */
+       unsigned int valid_cookie_life;
+
+       /* Delayed SACK timeout  200ms default*/
+       unsigned int sack_timeout;
+
+       /* HB.interval              - 30 seconds  */
+       unsigned int hb_interval;
+
+       /* Association.Max.Retrans  - 10 attempts
+        * Path.Max.Retrans         - 5  attempts (per destination address)
+        * Max.Init.Retransmits     - 8  attempts
+        */
+       int max_retrans_association;
+       int max_retrans_path;
+       int max_retrans_init;
+       /* Potentially-Failed.Max.Retrans sysctl value
+        * taken from:
+        * http://tools.ietf.org/html/draft-nishida-tsvwg-sctp-failover-05
+        */
+       int pf_retrans;
+
+       /*
+        * Policy for preforming sctp/socket accounting
+        * 0   - do socket level accounting, all assocs share sk_sndbuf
+        * 1   - do sctp accounting, each asoc may use sk_sndbuf bytes
+        */
+       int sndbuf_policy;
+
+       /*
+        * Policy for preforming sctp/socket accounting
+        * 0   - do socket level accounting, all assocs share sk_rcvbuf
+        * 1   - do sctp accounting, each asoc may use sk_rcvbuf bytes
+        */
+       int rcvbuf_policy;
+
+       int default_auto_asconf;
+
+       /* Flag to indicate if addip is enabled. */
+       int addip_enable;
+       int addip_noauth;
+
+       /* Flag to indicate if PR-SCTP is enabled. */
+       int prsctp_enable;
+
+       /* Flag to idicate if SCTP-AUTH is enabled */
+       int auth_enable;
+
+       /*
+        * Policy to control SCTP IPv4 address scoping
+        * 0   - Disable IPv4 address scoping
+        * 1   - Enable IPv4 address scoping
+        * 2   - Selectively allow only IPv4 private addresses
+        * 3   - Selectively allow only IPv4 link local address
+        */
+       int scope_policy;
+
+       /* Threshold for rwnd update SACKS.  Receive buffer shifted this many
+        * bits is an indicator of when to send and window update SACK.
+        */
+       int rwnd_upd_shift;
+
+       /* Threshold for autoclose timeout, in seconds. */
+       unsigned long max_autoclose;
+};
+
+#endif /* __NETNS_SCTP_H__ */
index f5169b04f0829aa10a438fcfb33e336b4cc26f33..e900072950cb8cf5635e4a106389e34bd5df21f1 100644 (file)
@@ -30,6 +30,11 @@ struct nfc_hci_ops {
        int (*open) (struct nfc_hci_dev *hdev);
        void (*close) (struct nfc_hci_dev *hdev);
        int (*hci_ready) (struct nfc_hci_dev *hdev);
+       /*
+        * xmit must always send the complete buffer before
+        * returning. Returned result must be 0 for success
+        * or negative for failure.
+        */
        int (*xmit) (struct nfc_hci_dev *hdev, struct sk_buff *skb);
        int (*start_poll) (struct nfc_hci_dev *hdev,
                           u32 im_protocols, u32 tm_protocols);
@@ -38,8 +43,8 @@ struct nfc_hci_ops {
        int (*complete_target_discovered) (struct nfc_hci_dev *hdev, u8 gate,
                                           struct nfc_target *target);
        int (*data_exchange) (struct nfc_hci_dev *hdev,
-                             struct nfc_target *target,
-                             struct sk_buff *skb, struct sk_buff **res_skb);
+                             struct nfc_target *target, struct sk_buff *skb,
+                             data_exchange_cb_t cb, void *cb_context);
        int (*check_presence)(struct nfc_hci_dev *hdev,
                              struct nfc_target *target);
 };
@@ -74,7 +79,6 @@ struct nfc_hci_dev {
 
        struct list_head msg_tx_queue;
 
-       struct workqueue_struct *msg_tx_wq;
        struct work_struct msg_tx_work;
 
        struct timer_list cmd_timer;
@@ -82,13 +86,14 @@ struct nfc_hci_dev {
 
        struct sk_buff_head rx_hcp_frags;
 
-       struct workqueue_struct *msg_rx_wq;
        struct work_struct msg_rx_work;
 
        struct sk_buff_head msg_rx_queue;
 
        struct nfc_hci_ops *ops;
 
+       struct nfc_llc *llc;
+
        struct nfc_hci_init_data init_data;
 
        void *clientdata;
@@ -105,12 +110,17 @@ struct nfc_hci_dev {
        u8 hw_mpw;
        u8 hw_software;
        u8 hw_bsid;
+
+       int async_cb_type;
+       data_exchange_cb_t async_cb;
+       void *async_cb_context;
 };
 
 /* hci device allocation */
 struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
                                            struct nfc_hci_init_data *init_data,
                                            u32 protocols,
+                                           const char *llc_name,
                                            int tx_headroom,
                                            int tx_tailroom,
                                            int max_link_payload);
@@ -202,6 +212,9 @@ int nfc_hci_set_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx,
                      const u8 *param, size_t param_len);
 int nfc_hci_send_cmd(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
                     const u8 *param, size_t param_len, struct sk_buff **skb);
+int nfc_hci_send_cmd_async(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
+                          const u8 *param, size_t param_len,
+                          data_exchange_cb_t cb, void *cb_context);
 int nfc_hci_send_response(struct nfc_hci_dev *hdev, u8 gate, u8 response,
                          const u8 *param, size_t param_len);
 int nfc_hci_send_event(struct nfc_hci_dev *hdev, u8 gate, u8 event,
diff --git a/include/net/nfc/llc.h b/include/net/nfc/llc.h
new file mode 100644 (file)
index 0000000..400ab7a
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * Link Layer Control manager public interface
+ *
+ * Copyright (C) 2012  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __NFC_LLC_H_
+#define __NFC_LLC_H_
+
+#include <net/nfc/hci.h>
+#include <linux/skbuff.h>
+
+#define LLC_NOP_NAME "nop"
+#define LLC_SHDLC_NAME "shdlc"
+
+typedef void (*rcv_to_hci_t) (struct nfc_hci_dev *hdev, struct sk_buff *skb);
+typedef int (*xmit_to_drv_t) (struct nfc_hci_dev *hdev, struct sk_buff *skb);
+typedef void (*llc_failure_t) (struct nfc_hci_dev *hdev, int err);
+
+struct nfc_llc;
+
+struct nfc_llc *nfc_llc_allocate(const char *name, struct nfc_hci_dev *hdev,
+                                xmit_to_drv_t xmit_to_drv,
+                                rcv_to_hci_t rcv_to_hci, int tx_headroom,
+                                int tx_tailroom, llc_failure_t llc_failure);
+void nfc_llc_free(struct nfc_llc *llc);
+
+void nfc_llc_get_rx_head_tail_room(struct nfc_llc *llc, int *rx_headroom,
+                                  int *rx_tailroom);
+
+
+int nfc_llc_start(struct nfc_llc *llc);
+int nfc_llc_stop(struct nfc_llc *llc);
+void nfc_llc_rcv_from_drv(struct nfc_llc *llc, struct sk_buff *skb);
+int nfc_llc_xmit_from_hci(struct nfc_llc *llc, struct sk_buff *skb);
+
+int nfc_llc_init(void);
+void nfc_llc_exit(void);
+
+#endif /* __NFC_LLC_H_ */
index 276094b91d7ced880477730fd487f162cf5b4a6e..88785e5c6b2cf6d2c32a8af553259a9d21678025 100644 (file)
@@ -32,6 +32,7 @@
 #define NCI_MAX_NUM_MAPPING_CONFIGS                            10
 #define NCI_MAX_NUM_RF_CONFIGS                                 10
 #define NCI_MAX_NUM_CONN                                       10
+#define NCI_MAX_PARAM_LEN                                      251
 
 /* NCI Status Codes */
 #define NCI_STATUS_OK                                          0x00
 #define NCI_RF_INTERFACE_ISO_DEP                               0x02
 #define NCI_RF_INTERFACE_NFC_DEP                               0x03
 
+/* NCI Configuration Parameter Tags */
+#define NCI_PN_ATR_REQ_GEN_BYTES                               0x29
+
 /* NCI Reset types */
 #define NCI_RESET_TYPE_KEEP_CONFIG                             0x00
 #define NCI_RESET_TYPE_RESET_CONFIG                            0x01
@@ -188,6 +192,18 @@ struct nci_core_reset_cmd {
 
 #define NCI_OP_CORE_INIT_CMD           nci_opcode_pack(NCI_GID_CORE, 0x01)
 
+#define NCI_OP_CORE_SET_CONFIG_CMD     nci_opcode_pack(NCI_GID_CORE, 0x02)
+struct set_config_param {
+       __u8    id;
+       __u8    len;
+       __u8    val[NCI_MAX_PARAM_LEN];
+} __packed;
+
+struct nci_core_set_config_cmd {
+       __u8    num_params;
+       struct  set_config_param param; /* support 1 param per cmd is enough */
+} __packed;
+
 #define NCI_OP_RF_DISCOVER_MAP_CMD     nci_opcode_pack(NCI_GID_RF_MGMT, 0x00)
 struct disc_map_config {
        __u8    rf_protocol;
@@ -252,6 +268,13 @@ struct nci_core_init_rsp_2 {
        __le32  manufact_specific_info;
 } __packed;
 
+#define NCI_OP_CORE_SET_CONFIG_RSP     nci_opcode_pack(NCI_GID_CORE, 0x02)
+struct nci_core_set_config_rsp {
+       __u8    status;
+       __u8    num_params;
+       __u8    params_id[0];   /* variable size array */
+} __packed;
+
 #define NCI_OP_RF_DISCOVER_MAP_RSP     nci_opcode_pack(NCI_GID_RF_MGMT, 0x00)
 
 #define NCI_OP_RF_DISCOVER_RSP         nci_opcode_pack(NCI_GID_RF_MGMT, 0x03)
@@ -328,6 +351,11 @@ struct activation_params_nfcb_poll_iso_dep {
        __u8    attrib_res[50];
 };
 
+struct activation_params_poll_nfc_dep {
+       __u8    atr_res_len;
+       __u8    atr_res[63];
+};
+
 struct nci_rf_intf_activated_ntf {
        __u8    rf_discovery_id;
        __u8    rf_interface;
@@ -351,6 +379,7 @@ struct nci_rf_intf_activated_ntf {
        union {
                struct activation_params_nfca_poll_iso_dep nfca_poll_iso_dep;
                struct activation_params_nfcb_poll_iso_dep nfcb_poll_iso_dep;
+               struct activation_params_poll_nfc_dep poll_nfc_dep;
        } activation_params;
 
 } __packed;
index feba74027ff8bc18674c68625f8f6e5a9cd6562f..d705d867494987b30da4272fe4c1967a7884412a 100644 (file)
@@ -54,6 +54,7 @@ enum nci_state {
 /* NCI timeouts */
 #define NCI_RESET_TIMEOUT                      5000
 #define NCI_INIT_TIMEOUT                       5000
+#define NCI_SET_CONFIG_TIMEOUT                 5000
 #define NCI_RF_DISC_TIMEOUT                    5000
 #define NCI_RF_DISC_SELECT_TIMEOUT             5000
 #define NCI_RF_DEACTIVATE_TIMEOUT              30000
@@ -137,6 +138,10 @@ struct nci_dev {
        data_exchange_cb_t      data_exchange_cb;
        void                    *data_exchange_cb_context;
        struct sk_buff          *rx_data_reassembly;
+
+       /* stored during intf_activated_ntf */
+       __u8 remote_gb[NFC_MAX_GT_LEN];
+       __u8 remote_gb_len;
 };
 
 /* ----- NCI Devices ----- */
index 6431f5e3902217cba706b36f269844242f907856..f05b10682c9d9bbe0aa65579fc73bd8adf1747df 100644 (file)
@@ -72,6 +72,7 @@ struct nfc_ops {
 
 #define NFC_TARGET_IDX_ANY -1
 #define NFC_MAX_GT_LEN 48
+#define NFC_ATR_RES_GT_OFFSET 15
 
 struct nfc_target {
        u32 idx;
@@ -89,7 +90,7 @@ struct nfc_target {
 };
 
 struct nfc_genl_data {
-       u32 poll_req_pid;
+       u32 poll_req_portid;
        struct mutex genl_data_mutex;
 };
 
@@ -112,7 +113,6 @@ struct nfc_dev {
        int tx_tailroom;
 
        struct timer_list check_pres_timer;
-       struct workqueue_struct *check_pres_wq;
        struct work_struct check_pres_work;
 
        struct nfc_ops *ops;
diff --git a/include/net/nfc/shdlc.h b/include/net/nfc/shdlc.h
deleted file mode 100644 (file)
index 35e930d..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright (C) 2012  Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#ifndef __NFC_SHDLC_H
-#define __NFC_SHDLC_H
-
-struct nfc_shdlc;
-
-struct nfc_shdlc_ops {
-       int (*open) (struct nfc_shdlc *shdlc);
-       void (*close) (struct nfc_shdlc *shdlc);
-       int (*hci_ready) (struct nfc_shdlc *shdlc);
-       int (*xmit) (struct nfc_shdlc *shdlc, struct sk_buff *skb);
-       int (*start_poll) (struct nfc_shdlc *shdlc,
-                          u32 im_protocols, u32 tm_protocols);
-       int (*target_from_gate) (struct nfc_shdlc *shdlc, u8 gate,
-                                struct nfc_target *target);
-       int (*complete_target_discovered) (struct nfc_shdlc *shdlc, u8 gate,
-                                          struct nfc_target *target);
-       int (*data_exchange) (struct nfc_shdlc *shdlc,
-                             struct nfc_target *target,
-                             struct sk_buff *skb, struct sk_buff **res_skb);
-       int (*check_presence)(struct nfc_shdlc *shdlc,
-                             struct nfc_target *target);
-};
-
-enum shdlc_state {
-       SHDLC_DISCONNECTED = 0,
-       SHDLC_CONNECTING = 1,
-       SHDLC_NEGOCIATING = 2,
-       SHDLC_CONNECTED = 3
-};
-
-struct nfc_shdlc {
-       struct mutex state_mutex;
-       enum shdlc_state state;
-       int hard_fault;
-
-       struct nfc_hci_dev *hdev;
-
-       wait_queue_head_t *connect_wq;
-       int connect_tries;
-       int connect_result;
-       struct timer_list connect_timer;/* aka T3 in spec 10.6.1 */
-
-       u8 w;                           /* window size */
-       bool srej_support;
-
-       struct timer_list t1_timer;     /* send ack timeout */
-       bool t1_active;
-
-       struct timer_list t2_timer;     /* guard/retransmit timeout */
-       bool t2_active;
-
-       int ns;                         /* next seq num for send */
-       int nr;                         /* next expected seq num for receive */
-       int dnr;                        /* oldest sent unacked seq num */
-
-       struct sk_buff_head rcv_q;
-
-       struct sk_buff_head send_q;
-       bool rnr;                       /* other side is not ready to receive */
-
-       struct sk_buff_head ack_pending_q;
-
-       struct workqueue_struct *sm_wq;
-       struct work_struct sm_work;
-
-       struct nfc_shdlc_ops *ops;
-
-       int client_headroom;
-       int client_tailroom;
-
-       void *clientdata;
-};
-
-void nfc_shdlc_recv_frame(struct nfc_shdlc *shdlc, struct sk_buff *skb);
-
-struct nfc_shdlc *nfc_shdlc_allocate(struct nfc_shdlc_ops *ops,
-                                    struct nfc_hci_init_data *init_data,
-                                    u32 protocols,
-                                    int tx_headroom, int tx_tailroom,
-                                    int max_link_payload, const char *devname);
-
-void nfc_shdlc_free(struct nfc_shdlc *shdlc);
-
-void nfc_shdlc_set_clientdata(struct nfc_shdlc *shdlc, void *clientdata);
-void *nfc_shdlc_get_clientdata(struct nfc_shdlc *shdlc);
-struct nfc_hci_dev *nfc_shdlc_get_hci_dev(struct nfc_shdlc *shdlc);
-
-#endif /* __NFC_SHDLC_H */
index 4c0766e201e39b7650773e6ba8a6329d997269b4..b01d8dd9ee7ce99eb1bf702f00007ae456f28c16 100644 (file)
@@ -106,6 +106,34 @@ struct listen_sock {
        struct request_sock     *syn_table[0];
 };
 
+/*
+ * For a TCP Fast Open listener -
+ *     lock - protects the access to all the reqsk, which is co-owned by
+ *             the listener and the child socket.
+ *     qlen - pending TFO requests (still in TCP_SYN_RECV).
+ *     max_qlen - max TFO reqs allowed before TFO is disabled.
+ *
+ *     XXX (TFO) - ideally these fields can be made as part of "listen_sock"
+ *     structure above. But there is some implementation difficulty due to
+ *     listen_sock being part of request_sock_queue hence will be freed when
+ *     a listener is stopped. But TFO related fields may continue to be
+ *     accessed even after a listener is closed, until its sk_refcnt drops
+ *     to 0 implying no more outstanding TFO reqs. One solution is to keep
+ *     listen_opt around until sk_refcnt drops to 0. But there is some other
+ *     complexity that needs to be resolved. E.g., a listener can be disabled
+ *     temporarily through shutdown()->tcp_disconnect(), and re-enabled later.
+ */
+struct fastopen_queue {
+       struct request_sock     *rskq_rst_head; /* Keep track of past TFO */
+       struct request_sock     *rskq_rst_tail; /* requests that caused RST.
+                                                * This is part of the defense
+                                                * against spoofing attack.
+                                                */
+       spinlock_t      lock;
+       int             qlen;           /* # of pending (TCP_SYN_RECV) reqs */
+       int             max_qlen;       /* != 0 iff TFO is currently enabled */
+};
+
 /** struct request_sock_queue - queue of request_socks
  *
  * @rskq_accept_head - FIFO head of established children
@@ -129,6 +157,12 @@ struct request_sock_queue {
        u8                      rskq_defer_accept;
        /* 3 bytes hole, try to pack */
        struct listen_sock      *listen_opt;
+       struct fastopen_queue   *fastopenq; /* This is non-NULL iff TFO has been
+                                            * enabled on this listener. Check
+                                            * max_qlen != 0 in fastopen_queue
+                                            * to determine if TFO is enabled
+                                            * right at this moment.
+                                            */
 };
 
 extern int reqsk_queue_alloc(struct request_sock_queue *queue,
@@ -136,6 +170,8 @@ extern int reqsk_queue_alloc(struct request_sock_queue *queue,
 
 extern void __reqsk_queue_destroy(struct request_sock_queue *queue);
 extern void reqsk_queue_destroy(struct request_sock_queue *queue);
+extern void reqsk_fastopen_remove(struct sock *sk,
+                                 struct request_sock *req, bool reset);
 
 static inline struct request_sock *
        reqsk_queue_yank_acceptq(struct request_sock_queue *queue)
@@ -190,19 +226,6 @@ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue
        return req;
 }
 
-static inline struct sock *reqsk_queue_get_child(struct request_sock_queue *queue,
-                                                struct sock *parent)
-{
-       struct request_sock *req = reqsk_queue_remove(queue);
-       struct sock *child = req->sk;
-
-       WARN_ON(child == NULL);
-
-       sk_acceptq_removed(parent);
-       __reqsk_free(req);
-       return child;
-}
-
 static inline int reqsk_queue_removed(struct request_sock_queue *queue,
                                      struct request_sock *req)
 {
index 7dc0854f0b3891992696002b7f75edf7a01233ab..975cca01048bee3b7da9017725654053921250ae 100644 (file)
  */
 #define SCM_MAX_FD     253
 
+struct scm_creds {
+       u32     pid;
+       kuid_t  uid;
+       kgid_t  gid;
+};
+
 struct scm_fp_list {
        short                   count;
        short                   max;
@@ -22,7 +28,7 @@ struct scm_cookie {
        struct pid              *pid;           /* Skb credentials */
        const struct cred       *cred;
        struct scm_fp_list      *fp;            /* Passed files         */
-       struct ucred            creds;          /* Skb credentials      */
+       struct scm_creds        creds;          /* Skb credentials      */
 #ifdef CONFIG_SECURITY_NETWORK
        u32                     secid;          /* Passed security ID   */
 #endif
@@ -49,7 +55,9 @@ static __inline__ void scm_set_cred(struct scm_cookie *scm,
 {
        scm->pid  = get_pid(pid);
        scm->cred = cred ? get_cred(cred) : NULL;
-       cred_to_ucred(pid, cred, &scm->creds);
+       scm->creds.pid = pid_vnr(pid);
+       scm->creds.uid = cred ? cred->euid : INVALID_UID;
+       scm->creds.gid = cred ? cred->egid : INVALID_GID;
 }
 
 static __inline__ void scm_destroy_cred(struct scm_cookie *scm)
@@ -65,7 +73,7 @@ static __inline__ void scm_destroy_cred(struct scm_cookie *scm)
 static __inline__ void scm_destroy(struct scm_cookie *scm)
 {
        scm_destroy_cred(scm);
-       if (scm && scm->fp)
+       if (scm->fp)
                __scm_destroy(scm);
 }
 
@@ -112,8 +120,15 @@ static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg,
                return;
        }
 
-       if (test_bit(SOCK_PASSCRED, &sock->flags))
-               put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(scm->creds), &scm->creds);
+       if (test_bit(SOCK_PASSCRED, &sock->flags)) {
+               struct user_namespace *current_ns = current_user_ns();
+               struct ucred ucreds = {
+                       .pid = scm->creds.pid,
+                       .uid = from_kuid_munged(current_ns, scm->creds.uid),
+                       .gid = from_kgid_munged(current_ns, scm->creds.gid),
+               };
+               put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(ucreds), &ucreds);
+       }
 
        scm_destroy_cred(scm);
 
index ff499640528b0012fd76c09141e978688f00b0a9..9c6414f553f91f2256698323ea5d8ec72db4e868 100644 (file)
 /*
  * sctp/protocol.c
  */
-extern struct sock *sctp_get_ctl_sock(void);
-extern int sctp_copy_local_addr_list(struct sctp_bind_addr *,
+extern int sctp_copy_local_addr_list(struct net *, struct sctp_bind_addr *,
                                     sctp_scope_t, gfp_t gfp,
                                     int flags);
 extern struct sctp_pf *sctp_get_pf_specific(sa_family_t family);
 extern int sctp_register_pf(struct sctp_pf *, sa_family_t);
-extern void sctp_addr_wq_mgmt(struct sctp_sockaddr_entry *, int);
+extern void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
 
 /*
  * sctp/socket.c
@@ -140,12 +139,12 @@ extern int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
 /*
  * sctp/primitive.c
  */
-int sctp_primitive_ASSOCIATE(struct sctp_association *, void *arg);
-int sctp_primitive_SHUTDOWN(struct sctp_association *, void *arg);
-int sctp_primitive_ABORT(struct sctp_association *, void *arg);
-int sctp_primitive_SEND(struct sctp_association *, void *arg);
-int sctp_primitive_REQUESTHEARTBEAT(struct sctp_association *, void *arg);
-int sctp_primitive_ASCONF(struct sctp_association *, void *arg);
+int sctp_primitive_ASSOCIATE(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_SHUTDOWN(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_ABORT(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_SEND(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_REQUESTHEARTBEAT(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_ASCONF(struct net *, struct sctp_association *, void *arg);
 
 /*
  * sctp/input.c
@@ -156,7 +155,7 @@ void sctp_hash_established(struct sctp_association *);
 void sctp_unhash_established(struct sctp_association *);
 void sctp_hash_endpoint(struct sctp_endpoint *);
 void sctp_unhash_endpoint(struct sctp_endpoint *);
-struct sock *sctp_err_lookup(int family, struct sk_buff *,
+struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *,
                             struct sctphdr *, struct sctp_association **,
                             struct sctp_transport **);
 void sctp_err_finish(struct sock *, struct sctp_association *);
@@ -173,14 +172,14 @@ void sctp_backlog_migrate(struct sctp_association *assoc,
 /*
  * sctp/proc.c
  */
-int sctp_snmp_proc_init(void);
-void sctp_snmp_proc_exit(void);
-int sctp_eps_proc_init(void);
-void sctp_eps_proc_exit(void);
-int sctp_assocs_proc_init(void);
-void sctp_assocs_proc_exit(void);
-int sctp_remaddr_proc_init(void);
-void sctp_remaddr_proc_exit(void);
+int sctp_snmp_proc_init(struct net *net);
+void sctp_snmp_proc_exit(struct net *net);
+int sctp_eps_proc_init(struct net *net);
+void sctp_eps_proc_exit(struct net *net);
+int sctp_assocs_proc_init(struct net *net);
+void sctp_assocs_proc_exit(struct net *net);
+int sctp_remaddr_proc_init(struct net *net);
+void sctp_remaddr_proc_exit(struct net *net);
 
 
 /*
@@ -222,11 +221,10 @@ extern struct kmem_cache *sctp_bucket_cachep __read_mostly;
 #define sctp_bh_unlock_sock(sk)  bh_unlock_sock(sk)
 
 /* SCTP SNMP MIB stats handlers */
-DECLARE_SNMP_STAT(struct sctp_mib, sctp_statistics);
-#define SCTP_INC_STATS(field)      SNMP_INC_STATS(sctp_statistics, field)
-#define SCTP_INC_STATS_BH(field)   SNMP_INC_STATS_BH(sctp_statistics, field)
-#define SCTP_INC_STATS_USER(field) SNMP_INC_STATS_USER(sctp_statistics, field)
-#define SCTP_DEC_STATS(field)      SNMP_DEC_STATS(sctp_statistics, field)
+#define SCTP_INC_STATS(net, field)      SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
+#define SCTP_INC_STATS_BH(net, field)   SNMP_INC_STATS_BH((net)->sctp.sctp_statistics, field)
+#define SCTP_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->sctp.sctp_statistics, field)
+#define SCTP_DEC_STATS(net, field)      SNMP_DEC_STATS((net)->sctp.sctp_statistics, field)
 
 #endif /* !TEST_FRAME */
 
@@ -361,25 +359,29 @@ atomic_t sctp_dbg_objcnt_## name = ATOMIC_INIT(0)
 #define SCTP_DBG_OBJCNT_ENTRY(name) \
 {.label= #name, .counter= &sctp_dbg_objcnt_## name}
 
-void sctp_dbg_objcnt_init(void);
-void sctp_dbg_objcnt_exit(void);
+void sctp_dbg_objcnt_init(struct net *);
+void sctp_dbg_objcnt_exit(struct net *);
 
 #else
 
 #define SCTP_DBG_OBJCNT_INC(name)
 #define SCTP_DBG_OBJCNT_DEC(name)
 
-static inline void sctp_dbg_objcnt_init(void) { return; }
-static inline void sctp_dbg_objcnt_exit(void) { return; }
+static inline void sctp_dbg_objcnt_init(struct net *net) { return; }
+static inline void sctp_dbg_objcnt_exit(struct net *net) { return; }
 
 #endif /* CONFIG_SCTP_DBG_OBJCOUNT */
 
 #if defined CONFIG_SYSCTL
 void sctp_sysctl_register(void);
 void sctp_sysctl_unregister(void);
+int sctp_sysctl_net_register(struct net *net);
+void sctp_sysctl_net_unregister(struct net *net);
 #else
 static inline void sctp_sysctl_register(void) { return; }
 static inline void sctp_sysctl_unregister(void) { return; }
+static inline int sctp_sysctl_net_register(struct net *net) { return 0; }
+static inline void sctp_sysctl_net_unregister(struct net *net) { return; }
 #endif
 
 /* Size of Supported Address Parameter for 'x' address types. */
@@ -586,7 +588,6 @@ for (pos = chunk->subh.fwdtsn_hdr->skip;\
 
 extern struct proto sctp_prot;
 extern struct proto sctpv6_prot;
-extern struct proc_dir_entry *proc_net_sctp;
 void sctp_put_port(struct sock *sk);
 
 extern struct idr sctp_assocs_id;
@@ -632,21 +633,21 @@ static inline int sctp_sanity_check(void)
 
 /* Warning: The following hash functions assume a power of two 'size'. */
 /* This is the hash function for the SCTP port hash table. */
-static inline int sctp_phashfn(__u16 lport)
+static inline int sctp_phashfn(struct net *net, __u16 lport)
 {
-       return lport & (sctp_port_hashsize - 1);
+       return (net_hash_mix(net) + lport) & (sctp_port_hashsize - 1);
 }
 
 /* This is the hash function for the endpoint hash table. */
-static inline int sctp_ep_hashfn(__u16 lport)
+static inline int sctp_ep_hashfn(struct net *net, __u16 lport)
 {
-       return lport & (sctp_ep_hashsize - 1);
+       return (net_hash_mix(net) + lport) & (sctp_ep_hashsize - 1);
 }
 
 /* This is the hash function for the association hash table. */
-static inline int sctp_assoc_hashfn(__u16 lport, __u16 rport)
+static inline int sctp_assoc_hashfn(struct net *net, __u16 lport, __u16 rport)
 {
-       int h = (lport << 16) + rport;
+       int h = (lport << 16) + rport + net_hash_mix(net);
        h ^= h>>8;
        return h & (sctp_assoc_hashsize - 1);
 }
index 9148632b820467ff3e64ec911c63429e56272539..b5887e1677e4e421479919399b945844133595af 100644 (file)
@@ -77,7 +77,8 @@ typedef struct {
        int action;
 } sctp_sm_command_t;
 
-typedef sctp_disposition_t (sctp_state_fn_t) (const struct sctp_endpoint *,
+typedef sctp_disposition_t (sctp_state_fn_t) (struct net *,
+                                             const struct sctp_endpoint *,
                                              const struct sctp_association *,
                                              const sctp_subtype_t type,
                                              void *arg,
@@ -178,7 +179,8 @@ sctp_state_fn_t sctp_sf_autoclose_timer_expire;
 
 /* Prototypes for utility support functions.  */
 __u8 sctp_get_chunk_type(struct sctp_chunk *chunk);
-const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t,
+const sctp_sm_table_entry_t *sctp_sm_lookup_event(struct net *,
+                                           sctp_event_t,
                                            sctp_state_t,
                                            sctp_subtype_t);
 int sctp_chunk_iif(const struct sctp_chunk *);
@@ -268,7 +270,7 @@ void sctp_chunk_assign_ssn(struct sctp_chunk *);
 
 /* Prototypes for statetable processing. */
 
-int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype,
+int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype,
               sctp_state_t state,
                struct sctp_endpoint *,
                struct sctp_association *asoc,
index fc5e60016e37422e9408d9ca0c0b00136aaf2bb6..0fef00f5d3ce1fe65e2e6483ba1502f69615597b 100644 (file)
@@ -102,6 +102,7 @@ struct sctp_bind_bucket {
        unsigned short  fastreuse;
        struct hlist_node       node;
        struct hlist_head       owner;
+       struct net      *net;
 };
 
 struct sctp_bind_hashbucket {
@@ -118,69 +119,6 @@ struct sctp_hashbucket {
 
 /* The SCTP globals structure. */
 extern struct sctp_globals {
-       /* RFC2960 Section 14. Suggested SCTP Protocol Parameter Values
-        *
-        * The following protocol parameters are RECOMMENDED:
-        *
-        * RTO.Initial              - 3  seconds
-        * RTO.Min                  - 1  second
-        * RTO.Max                 -  60 seconds
-        * RTO.Alpha                - 1/8  (3 when converted to right shifts.)
-        * RTO.Beta                 - 1/4  (2 when converted to right shifts.)
-        */
-       unsigned int rto_initial;
-       unsigned int rto_min;
-       unsigned int rto_max;
-
-       /* Note: rto_alpha and rto_beta are really defined as inverse
-        * powers of two to facilitate integer operations.
-        */
-       int rto_alpha;
-       int rto_beta;
-
-       /* Max.Burst                - 4 */
-       int max_burst;
-
-       /* Whether Cookie Preservative is enabled(1) or not(0) */
-       int cookie_preserve_enable;
-
-       /* Valid.Cookie.Life        - 60  seconds  */
-       unsigned int valid_cookie_life;
-
-       /* Delayed SACK timeout  200ms default*/
-       unsigned int sack_timeout;
-
-       /* HB.interval              - 30 seconds  */
-       unsigned int hb_interval;
-
-       /* Association.Max.Retrans  - 10 attempts
-        * Path.Max.Retrans         - 5  attempts (per destination address)
-        * Max.Init.Retransmits     - 8  attempts
-        */
-       int max_retrans_association;
-       int max_retrans_path;
-       int max_retrans_init;
-
-       /* Potentially-Failed.Max.Retrans sysctl value
-        * taken from:
-        * http://tools.ietf.org/html/draft-nishida-tsvwg-sctp-failover-05
-        */
-       int pf_retrans;
-
-       /*
-        * Policy for preforming sctp/socket accounting
-        * 0   - do socket level accounting, all assocs share sk_sndbuf
-        * 1   - do sctp accounting, each asoc may use sk_sndbuf bytes
-        */
-       int sndbuf_policy;
-
-       /*
-        * Policy for preforming sctp/socket accounting
-        * 0   - do socket level accounting, all assocs share sk_rcvbuf
-        * 1   - do sctp accounting, each asoc may use sk_rcvbuf bytes
-        */
-       int rcvbuf_policy;
-
        /* The following variables are implementation specific.  */
 
        /* Default initialization values to be applied to new associations. */
@@ -204,70 +142,11 @@ extern struct sctp_globals {
        int port_hashsize;
        struct sctp_bind_hashbucket *port_hashtable;
 
-       /* This is the global local address list.
-        * We actively maintain this complete list of addresses on
-        * the system by catching address add/delete events.
-        *
-        * It is a list of sctp_sockaddr_entry.
-        */
-       struct list_head local_addr_list;
-       int default_auto_asconf;
-       struct list_head addr_waitq;
-       struct timer_list addr_wq_timer;
-       struct list_head auto_asconf_splist;
-       spinlock_t addr_wq_lock;
-
-       /* Lock that protects the local_addr_list writers */
-       spinlock_t addr_list_lock;
-       
-       /* Flag to indicate if addip is enabled. */
-       int addip_enable;
-       int addip_noauth_enable;
-
-       /* Flag to indicate if PR-SCTP is enabled. */
-       int prsctp_enable;
-
-       /* Flag to idicate if SCTP-AUTH is enabled */
-       int auth_enable;
-
-       /*
-        * Policy to control SCTP IPv4 address scoping
-        * 0   - Disable IPv4 address scoping
-        * 1   - Enable IPv4 address scoping
-        * 2   - Selectively allow only IPv4 private addresses
-        * 3   - Selectively allow only IPv4 link local address
-        */
-       int ipv4_scope_policy;
-
        /* Flag to indicate whether computing and verifying checksum
         * is disabled. */
         bool checksum_disable;
-
-       /* Threshold for rwnd update SACKS.  Receive buffer shifted this many
-        * bits is an indicator of when to send and window update SACK.
-        */
-       int rwnd_update_shift;
-
-       /* Threshold for autoclose timeout, in seconds. */
-       unsigned long max_autoclose;
 } sctp_globals;
 
-#define sctp_rto_initial               (sctp_globals.rto_initial)
-#define sctp_rto_min                   (sctp_globals.rto_min)
-#define sctp_rto_max                   (sctp_globals.rto_max)
-#define sctp_rto_alpha                 (sctp_globals.rto_alpha)
-#define sctp_rto_beta                  (sctp_globals.rto_beta)
-#define sctp_max_burst                 (sctp_globals.max_burst)
-#define sctp_valid_cookie_life         (sctp_globals.valid_cookie_life)
-#define sctp_cookie_preserve_enable    (sctp_globals.cookie_preserve_enable)
-#define sctp_max_retrans_association   (sctp_globals.max_retrans_association)
-#define sctp_sndbuf_policy             (sctp_globals.sndbuf_policy)
-#define sctp_rcvbuf_policy             (sctp_globals.rcvbuf_policy)
-#define sctp_max_retrans_path          (sctp_globals.max_retrans_path)
-#define sctp_pf_retrans                        (sctp_globals.pf_retrans)
-#define sctp_max_retrans_init          (sctp_globals.max_retrans_init)
-#define sctp_sack_timeout              (sctp_globals.sack_timeout)
-#define sctp_hb_interval               (sctp_globals.hb_interval)
 #define sctp_max_instreams             (sctp_globals.max_instreams)
 #define sctp_max_outstreams            (sctp_globals.max_outstreams)
 #define sctp_address_families          (sctp_globals.address_families)
@@ -277,21 +156,7 @@ extern struct sctp_globals {
 #define sctp_assoc_hashtable           (sctp_globals.assoc_hashtable)
 #define sctp_port_hashsize             (sctp_globals.port_hashsize)
 #define sctp_port_hashtable            (sctp_globals.port_hashtable)
-#define sctp_local_addr_list           (sctp_globals.local_addr_list)
-#define sctp_local_addr_lock           (sctp_globals.addr_list_lock)
-#define sctp_auto_asconf_splist                (sctp_globals.auto_asconf_splist)
-#define sctp_addr_waitq                        (sctp_globals.addr_waitq)
-#define sctp_addr_wq_timer             (sctp_globals.addr_wq_timer)
-#define sctp_addr_wq_lock              (sctp_globals.addr_wq_lock)
-#define sctp_default_auto_asconf       (sctp_globals.default_auto_asconf)
-#define sctp_scope_policy              (sctp_globals.ipv4_scope_policy)
-#define sctp_addip_enable              (sctp_globals.addip_enable)
-#define sctp_addip_noauth              (sctp_globals.addip_noauth_enable)
-#define sctp_prsctp_enable             (sctp_globals.prsctp_enable)
-#define sctp_auth_enable               (sctp_globals.auth_enable)
 #define sctp_checksum_disable          (sctp_globals.checksum_disable)
-#define sctp_rwnd_upd_shift            (sctp_globals.rwnd_update_shift)
-#define sctp_max_autoclose             (sctp_globals.max_autoclose)
 
 /* SCTP Socket type: UDP or TCP style. */
 typedef enum {
@@ -1085,7 +950,7 @@ struct sctp_transport {
        __u64 hb_nonce;
 };
 
-struct sctp_transport *sctp_transport_new(const union sctp_addr *,
+struct sctp_transport *sctp_transport_new(struct net *, const union sctp_addr *,
                                          gfp_t);
 void sctp_transport_set_owner(struct sctp_transport *,
                              struct sctp_association *);
@@ -1240,7 +1105,7 @@ struct sctp_bind_addr {
 
 void sctp_bind_addr_init(struct sctp_bind_addr *, __u16 port);
 void sctp_bind_addr_free(struct sctp_bind_addr *);
-int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
+int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest,
                        const struct sctp_bind_addr *src,
                        sctp_scope_t scope, gfp_t gfp,
                        int flags);
@@ -1267,7 +1132,7 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw, int len,
                           __u16 port, gfp_t gfp);
 
 sctp_scope_t sctp_scope(const union sctp_addr *);
-int sctp_in_scope(const union sctp_addr *addr, const sctp_scope_t scope);
+int sctp_in_scope(struct net *net, const union sctp_addr *addr, const sctp_scope_t scope);
 int sctp_is_any(struct sock *sk, const union sctp_addr *addr);
 int sctp_addr_is_valid(const union sctp_addr *addr);
 int sctp_is_ep_boundall(struct sock *sk);
@@ -1425,13 +1290,13 @@ struct sctp_association *sctp_endpoint_lookup_assoc(
 int sctp_endpoint_is_peeled_off(struct sctp_endpoint *,
                                const union sctp_addr *);
 struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *,
-                                       const union sctp_addr *);
-int sctp_has_association(const union sctp_addr *laddr,
+                                       struct net *, const union sctp_addr *);
+int sctp_has_association(struct net *net, const union sctp_addr *laddr,
                         const union sctp_addr *paddr);
 
-int sctp_verify_init(const struct sctp_association *asoc, sctp_cid_t,
-                    sctp_init_chunk_t *peer_init, struct sctp_chunk *chunk,
-                    struct sctp_chunk **err_chunk);
+int sctp_verify_init(struct net *net, const struct sctp_association *asoc,
+                    sctp_cid_t, sctp_init_chunk_t *peer_init,
+                    struct sctp_chunk *chunk, struct sctp_chunk **err_chunk);
 int sctp_process_init(struct sctp_association *, struct sctp_chunk *chunk,
                      const union sctp_addr *peer,
                      sctp_init_chunk_t *init, gfp_t gfp);
@@ -2013,6 +1878,7 @@ void sctp_assoc_control_transport(struct sctp_association *,
                                  sctp_transport_cmd_t, sctp_sn_error_t);
 struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *, __u32);
 struct sctp_transport *sctp_assoc_is_match(struct sctp_association *,
+                                          struct net *,
                                           const union sctp_addr *,
                                           const union sctp_addr *);
 void sctp_assoc_migrate(struct sctp_association *, struct sock *);
index 0147b901e79c4d6b43eef9dbe45f76eef0fc1720..71596261fa997ec7014b77f0bbee9b47b6146493 100644 (file)
@@ -154,13 +154,15 @@ struct linux_xfrm_mib {
  */
 #define SNMP_UPD_PO_STATS(mib, basefield, addend)      \
        do { \
-               this_cpu_inc(mib[0]->mibs[basefield##PKTS]);            \
-               this_cpu_add(mib[0]->mibs[basefield##OCTETS], addend);  \
+               __typeof__(*mib[0]->mibs) *ptr = mib[0]->mibs;  \
+               this_cpu_inc(ptr[basefield##PKTS]);             \
+               this_cpu_add(ptr[basefield##OCTETS], addend);   \
        } while (0)
 #define SNMP_UPD_PO_STATS_BH(mib, basefield, addend)   \
        do { \
-               __this_cpu_inc(mib[0]->mibs[basefield##PKTS]);          \
-               __this_cpu_add(mib[0]->mibs[basefield##OCTETS], addend);        \
+               __typeof__(*mib[0]->mibs) *ptr = mib[0]->mibs;  \
+               __this_cpu_inc(ptr[basefield##PKTS]);           \
+               __this_cpu_add(ptr[basefield##OCTETS], addend); \
        } while (0)
 
 
index 0d7e9834d9be78dad67707301866d9568deb7942..c945fba4f54351475ff2efb989f77b23237f60d4 100644 (file)
@@ -247,8 +247,7 @@ struct cg_proto;
   *    @sk_stamp: time stamp of last packet received
   *    @sk_socket: Identd and reporting IO signals
   *    @sk_user_data: RPC layer private data
-  *    @sk_sndmsg_page: cached page for sendmsg
-  *    @sk_sndmsg_off: cached offset for sendmsg
+  *    @sk_frag: cached page frag
   *    @sk_peek_off: current peek_offset value
   *    @sk_send_head: front of stuff to transmit
   *    @sk_security: used by security modules
@@ -362,9 +361,8 @@ struct sock {
        ktime_t                 sk_stamp;
        struct socket           *sk_socket;
        void                    *sk_user_data;
-       struct page             *sk_sndmsg_page;
+       struct page_frag        sk_frag;
        struct sk_buff          *sk_send_head;
-       __u32                   sk_sndmsg_off;
        __s32                   sk_peek_off;
        int                     sk_write_pending;
 #ifdef CONFIG_SECURITY
@@ -2026,18 +2024,23 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
 
 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
 
-static inline struct page *sk_stream_alloc_page(struct sock *sk)
+/**
+ * sk_page_frag - return an appropriate page_frag
+ * @sk: socket
+ *
+ * If socket allocation mode allows current thread to sleep, it means its
+ * safe to use the per task page_frag instead of the per socket one.
+ */
+static inline struct page_frag *sk_page_frag(struct sock *sk)
 {
-       struct page *page = NULL;
+       if (sk->sk_allocation & __GFP_WAIT)
+               return &current->task_frag;
 
-       page = alloc_pages(sk->sk_allocation, 0);
-       if (!page) {
-               sk_enter_memory_pressure(sk);
-               sk_stream_moderate_sndbuf(sk);
-       }
-       return page;
+       return &sk->sk_frag;
 }
 
+extern bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
+
 /*
  *     Default write policy as shown to user space via poll/select/SIGIO
  */
@@ -2218,8 +2221,6 @@ extern int net_msg_warn;
 extern __u32 sysctl_wmem_max;
 extern __u32 sysctl_rmem_max;
 
-extern void sk_init(void);
-
 extern int sysctl_optmem_max;
 
 extern __u32 sysctl_wmem_default;
index 9a0021d16d919a46240d5f502207d95c1a22853c..6feeccd83dd7557abd30e32c0547f483b79bd238 100644 (file)
@@ -98,11 +98,21 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
                                 * 15 is ~13-30min depending on RTO.
                                 */
 
-#define TCP_SYN_RETRIES         5      /* number of times to retry active opening a
-                                * connection: ~180sec is RFC minimum   */
+#define TCP_SYN_RETRIES         6      /* This is how many retries are done
+                                * when active opening a connection.
+                                * RFC1122 says the minimum retry MUST
+                                * be at least 180secs.  Nevertheless
+                                * this value is corresponding to
+                                * 63secs of retransmission with the
+                                * current initial RTO.
+                                */
 
-#define TCP_SYNACK_RETRIES 5   /* number of times to retry passive opening a
-                                * connection: ~180sec is RFC minimum   */
+#define TCP_SYNACK_RETRIES 5   /* This is how may retries are done
+                                * when passive opening a connection.
+                                * This is corresponding to 31secs of
+                                * retransmission with the current
+                                * initial RTO.
+                                */
 
 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
                                  * state, about 60 seconds     */
@@ -214,8 +224,24 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
 
 /* Bit Flags for sysctl_tcp_fastopen */
 #define        TFO_CLIENT_ENABLE       1
+#define        TFO_SERVER_ENABLE       2
 #define        TFO_CLIENT_NO_COOKIE    4       /* Data in SYN w/o cookie option */
 
+/* Process SYN data but skip cookie validation */
+#define        TFO_SERVER_COOKIE_NOT_CHKED     0x100
+/* Accept SYN data w/o any cookie option */
+#define        TFO_SERVER_COOKIE_NOT_REQD      0x200
+
+/* Force enable TFO on all listeners, i.e., not requiring the
+ * TCP_FASTOPEN socket option. SOCKOPT1/2 determine how to set max_qlen.
+ */
+#define        TFO_SERVER_WO_SOCKOPT1  0x400
+#define        TFO_SERVER_WO_SOCKOPT2  0x800
+/* Always create TFO child sockets on a TFO listener even when
+ * cookie/data not present. (For testing purpose!)
+ */
+#define        TFO_SERVER_ALWAYS       0x1000
+
 extern struct inet_timewait_death_row tcp_death_row;
 
 /* sysctl variables for tcp */
@@ -398,7 +424,8 @@ extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *
                                                     const struct tcphdr *th);
 extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
                                   struct request_sock *req,
-                                  struct request_sock **prev);
+                                  struct request_sock **prev,
+                                  bool fastopen);
 extern int tcp_child_process(struct sock *parent, struct sock *child,
                             struct sk_buff *skb);
 extern bool tcp_use_frto(struct sock *sk);
@@ -411,12 +438,6 @@ extern void tcp_metrics_init(void);
 extern bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check);
 extern bool tcp_remember_stamp(struct sock *sk);
 extern bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
-extern void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
-                                  struct tcp_fastopen_cookie *cookie,
-                                  int *syn_loss, unsigned long *last_syn_loss);
-extern void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
-                                  struct tcp_fastopen_cookie *cookie,
-                                  bool syn_lost);
 extern void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
 extern void tcp_disable_fack(struct tcp_sock *tp);
 extern void tcp_close(struct sock *sk, long timeout);
@@ -458,7 +479,8 @@ extern int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
 extern int tcp_connect(struct sock *sk);
 extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
                                        struct request_sock *req,
-                                       struct request_values *rvp);
+                                       struct request_values *rvp,
+                                       struct tcp_fastopen_cookie *foc);
 extern int tcp_disconnect(struct sock *sk, int flags);
 
 void tcp_connect_init(struct sock *sk);
@@ -527,6 +549,7 @@ extern void tcp_send_delayed_ack(struct sock *sk);
 extern void tcp_cwnd_application_limited(struct sock *sk);
 extern void tcp_resume_early_retransmit(struct sock *sk);
 extern void tcp_rearm_rto(struct sock *sk);
+extern void tcp_reset(struct sock *sk);
 
 /* tcp_timer.c */
 extern void tcp_init_xmit_timers(struct sock *);
@@ -576,6 +599,7 @@ extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
 extern int tcp_mss_to_mtu(struct sock *sk, int mss);
 extern void tcp_mtup_init(struct sock *sk);
 extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt);
+extern void tcp_init_buffer_space(struct sock *sk);
 
 static inline void tcp_bound_rto(const struct sock *sk)
 {
@@ -889,15 +913,21 @@ static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
        return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
 }
 
+static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
+{
+       return (TCPF_CA_CWR | TCPF_CA_Recovery) &
+              (1 << inet_csk(sk)->icsk_ca_state);
+}
+
 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
- * The exception is rate halving phase, when cwnd is decreasing towards
+ * The exception is cwnd reduction phase, when cwnd is decreasing towards
  * ssthresh.
  */
 static inline __u32 tcp_current_ssthresh(const struct sock *sk)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
 
-       if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery))
+       if (tcp_in_cwnd_reduction(sk))
                return tp->snd_ssthresh;
        else
                return max(tp->snd_ssthresh,
@@ -1094,6 +1124,8 @@ static inline void tcp_openreq_init(struct request_sock *req,
        req->rcv_wnd = 0;               /* So that tcp_send_synack() knows! */
        req->cookie_ts = 0;
        tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
+       tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
+       tcp_rsk(req)->snt_synack = 0;
        req->mss = rx_opt->mss_clamp;
        req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
        ireq->tstamp_ok = rx_opt->tstamp_ok;
@@ -1106,6 +1138,15 @@ static inline void tcp_openreq_init(struct request_sock *req,
        ireq->loc_port = tcp_hdr(skb)->dest;
 }
 
+/* Compute time elapsed between SYNACK and the ACK completing 3WHS */
+static inline void tcp_synack_rtt_meas(struct sock *sk,
+                                      struct request_sock *req)
+{
+       if (tcp_rsk(req)->snt_synack)
+               tcp_valid_rtt_meas(sk,
+                   tcp_time_stamp - tcp_rsk(req)->snt_synack);
+}
+
 extern void tcp_enter_memory_pressure(struct sock *sk);
 
 static inline int keepalive_intvl_when(const struct tcp_sock *tp)
@@ -1298,15 +1339,34 @@ extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff
 extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
                            const struct tcp_md5sig_key *key);
 
+/* From tcp_fastopen.c */
+extern void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
+                                  struct tcp_fastopen_cookie *cookie,
+                                  int *syn_loss, unsigned long *last_syn_loss);
+extern void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
+                                  struct tcp_fastopen_cookie *cookie,
+                                  bool syn_lost);
 struct tcp_fastopen_request {
        /* Fast Open cookie. Size 0 means a cookie request */
        struct tcp_fastopen_cookie      cookie;
        struct msghdr                   *data;  /* data in MSG_FASTOPEN */
        u16                             copied; /* queued in tcp_connect() */
 };
-
 void tcp_free_fastopen_req(struct tcp_sock *tp);
 
+extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
+int tcp_fastopen_reset_cipher(void *key, unsigned int len);
+void tcp_fastopen_cookie_gen(__be32 addr, struct tcp_fastopen_cookie *foc);
+
+#define TCP_FASTOPEN_KEY_LENGTH 16
+
+/* Fastopen key context */
+struct tcp_fastopen_context {
+       struct crypto_cipher __rcu      *tfm;
+       __u8                            key[TCP_FASTOPEN_KEY_LENGTH];
+       struct rcu_head                 rcu;
+};
+
 /* write queue abstraction */
 static inline void tcp_write_queue_purge(struct sock *sk)
 {
index 411d83c9821d1854b1468b24359354563835a8f6..6f0ba01afe7315d443a0aad152a9df2d5a22abf6 100644 (file)
@@ -263,7 +263,7 @@ struct km_event {
        } data;
 
        u32     seq;
-       u32     pid;
+       u32     portid;
        u32     event;
        struct net *net;
 };
@@ -313,7 +313,7 @@ extern void km_state_notify(struct xfrm_state *x, const struct km_event *c);
 
 struct xfrm_tmpl;
 extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
-extern void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
+extern void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
 extern int __xfrm_state_delete(struct xfrm_state *x);
 
 struct xfrm_state_afinfo {
@@ -576,7 +576,7 @@ struct xfrm_mgr {
        struct list_head        list;
        char                    *id;
        int                     (*notify)(struct xfrm_state *x, const struct km_event *c);
-       int                     (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir);
+       int                     (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp);
        struct xfrm_policy      *(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
        int                     (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
        int                     (*notify_policy)(struct xfrm_policy *x, int dir, const struct km_event *c);
@@ -1558,7 +1558,7 @@ extern int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
 #endif
 
 extern int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
-extern void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid);
+extern void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid);
 extern int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
 
 extern void xfrm_input_init(void);
index 5cb20ccb195606b9cc0fdac6869b2789c06e1649..62b4edab15d32ca59871921433f378f1aa842602 100644 (file)
@@ -119,29 +119,5 @@ struct scsi_nl_host_vendor_msg {
        (hdr)->msglen = mlen;                                   \
        }
 
-
-#ifdef __KERNEL__
-
-#include <scsi/scsi_host.h>
-
-/* Exported Kernel Interfaces */
-int scsi_nl_add_transport(u8 tport,
-        int (*msg_handler)(struct sk_buff *),
-       void (*event_handler)(struct notifier_block *, unsigned long, void *));
-void scsi_nl_remove_transport(u8 tport);
-
-int scsi_nl_add_driver(u64 vendor_id, struct scsi_host_template *hostt,
-       int (*nlmsg_handler)(struct Scsi_Host *shost, void *payload,
-                                u32 len, u32 pid),
-       void (*nlevt_handler)(struct notifier_block *nb,
-                                unsigned long event, void *notify_ptr));
-void scsi_nl_remove_driver(u64 vendor_id);
-
-void scsi_nl_send_transport_msg(u32 pid, struct scsi_nl_hdr *hdr);
-int scsi_nl_send_vendor_msg(u32 pid, unsigned short host_no, u64 vendor_id,
-                        char *data_buf, u32 data_len);
-
-#endif /* __KERNEL__ */
-
 #endif /* SCSI_NETLINK_H */
 
index 511488a7bc71d0f1f6265ed9c3fe62838923a18b..4d0ceede33194e4d29334899ce3c85dba141fb65 100644 (file)
@@ -88,11 +88,11 @@ static int  audit_failure = AUDIT_FAIL_PRINTK;
 
 /*
  * If audit records are to be written to the netlink socket, audit_pid
- * contains the pid of the auditd process and audit_nlk_pid contains
- * the pid to use to send netlink messages to that process.
+ * contains the pid of the auditd process and audit_nlk_portid contains
+ * the portid to use to send netlink messages to that process.
  */
 int            audit_pid;
-static int     audit_nlk_pid;
+static int     audit_nlk_portid;
 
 /* If audit_rate_limit is non-zero, limit the rate of sending audit records
  * to that number per second.  This prevents DoS attacks, but results in
@@ -402,7 +402,7 @@ static void kauditd_send_skb(struct sk_buff *skb)
        int err;
        /* take a reference in case we can't send it and we want to hold it */
        skb_get(skb);
-       err = netlink_unicast(audit_sock, skb, audit_nlk_pid, 0);
+       err = netlink_unicast(audit_sock, skb, audit_nlk_portid, 0);
        if (err < 0) {
                BUG_ON(err != -ECONNREFUSED); /* Shouldn't happen */
                printk(KERN_ERR "audit: *NO* daemon at audit_pid=%d\n", audit_pid);
@@ -679,7 +679,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                status_set.backlog_limit = audit_backlog_limit;
                status_set.lost          = atomic_read(&audit_lost);
                status_set.backlog       = skb_queue_len(&audit_skb_queue);
-               audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
+               audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
                                 &status_set, sizeof(status_set));
                break;
        case AUDIT_SET:
@@ -707,7 +707,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                                                        sessionid, sid, 1);
 
                        audit_pid = new_pid;
-                       audit_nlk_pid = NETLINK_CB(skb).pid;
+                       audit_nlk_portid = NETLINK_CB(skb).portid;
                }
                if (status_get->mask & AUDIT_STATUS_RATE_LIMIT) {
                        err = audit_set_rate_limit(status_get->rate_limit,
@@ -750,7 +750,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                                        size--;
                                audit_log_n_untrustedstring(ab, data, size);
                        }
-                       audit_set_pid(ab, NETLINK_CB(skb).pid);
+                       audit_set_pid(ab, NETLINK_CB(skb).portid);
                        audit_log_end(ab);
                }
                break;
@@ -769,7 +769,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                }
                /* fallthrough */
        case AUDIT_LIST:
-               err = audit_receive_filter(msg_type, NETLINK_CB(skb).pid,
+               err = audit_receive_filter(msg_type, NETLINK_CB(skb).portid,
                                           seq, data, nlmsg_len(nlh),
                                           loginuid, sessionid, sid);
                break;
@@ -788,7 +788,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                }
                /* fallthrough */
        case AUDIT_LIST_RULES:
-               err = audit_receive_filter(msg_type, NETLINK_CB(skb).pid,
+               err = audit_receive_filter(msg_type, NETLINK_CB(skb).portid,
                                           seq, data, nlmsg_len(nlh),
                                           loginuid, sessionid, sid);
                break;
@@ -859,7 +859,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                        memcpy(sig_data->ctx, ctx, len);
                        security_release_secctx(ctx, len);
                }
-               audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_SIGNAL_INFO,
+               audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_SIGNAL_INFO,
                                0, 0, sig_data, sizeof(*sig_data) + len);
                kfree(sig_data);
                break;
@@ -871,7 +871,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                s.enabled = tsk->signal->audit_tty != 0;
                spin_unlock_irq(&tsk->sighand->siglock);
 
-               audit_send_reply(NETLINK_CB(skb).pid, seq,
+               audit_send_reply(NETLINK_CB(skb).portid, seq,
                                 AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
                break;
        }
@@ -946,8 +946,7 @@ static int __init audit_init(void)
 
        printk(KERN_INFO "audit: initializing netlink socket (%s)\n",
               audit_default ? "enabled" : "disabled");
-       audit_sock = netlink_kernel_create(&init_net, NETLINK_AUDIT,
-                                          THIS_MODULE, &cfg);
+       audit_sock = netlink_kernel_create(&init_net, NETLINK_AUDIT, &cfg);
        if (!audit_sock)
                audit_panic("cannot initialize netlink socket");
        else
index f65345f9e5bbe2aa06b69db5b91b1c4002857c0f..42f25952edd93a5a12304203eb91999f1638f33d 100644 (file)
@@ -1046,6 +1046,9 @@ void do_exit(long code)
        if (tsk->splice_pipe)
                __free_pipe_info(tsk->splice_pipe);
 
+       if (tsk->task_frag.page)
+               put_page(tsk->task_frag.page);
+
        validate_creds_for_do_exit(tsk);
 
        preempt_disable();
index 5a0e74d89a5aa2e459e42679d6105fda07b7e8bd..a2b1efc2092809904024e33e8bf0405d0d73de12 100644 (file)
@@ -330,6 +330,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
        tsk->btrace_seq = 0;
 #endif
        tsk->splice_pipe = NULL;
+       tsk->task_frag.page = NULL;
 
        account_kernel_stack(ti, 1);
 
index 3880df2acf053047efec942d6fefd2f8998a92c7..5eab1f3edfa574e0026c128c1b4cb6ca4dca4c6c 100644 (file)
@@ -476,7 +476,7 @@ static int cmd_attr_register_cpumask(struct genl_info *info)
        rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
        if (rc < 0)
                goto out;
-       rc = add_del_listener(info->snd_pid, mask, REGISTER);
+       rc = add_del_listener(info->snd_portid, mask, REGISTER);
 out:
        free_cpumask_var(mask);
        return rc;
@@ -492,7 +492,7 @@ static int cmd_attr_deregister_cpumask(struct genl_info *info)
        rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
        if (rc < 0)
                goto out;
-       rc = add_del_listener(info->snd_pid, mask, DEREGISTER);
+       rc = add_del_listener(info->snd_portid, mask, DEREGISTER);
 out:
        free_cpumask_var(mask);
        return rc;
index 0401d2916d9fa25515540c0483dd486adaddc858..52e5abbc41dbc0724e9b6664e7674b6bd680ca53 100644 (file)
@@ -375,14 +375,14 @@ static int uevent_net_init(struct net *net)
        struct uevent_sock *ue_sk;
        struct netlink_kernel_cfg cfg = {
                .groups = 1,
+               .flags  = NL_CFG_F_NONROOT_RECV,
        };
 
        ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL);
        if (!ue_sk)
                return -ENOMEM;
 
-       ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT,
-                                         THIS_MODULE, &cfg);
+       ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, &cfg);
        if (!ue_sk->sk) {
                printk(KERN_ERR
                       "kobject_uevent: unable to create netlink socket!\n");
@@ -422,7 +422,6 @@ static struct pernet_operations uevent_net_ops = {
 
 static int __init kobject_uevent_init(void)
 {
-       netlink_set_nonroot(NETLINK_KOBJECT_UEVENT, NL_NONROOT_RECV);
        return register_pernet_subsys(&uevent_net_ops);
 }
 
index 4226dfeb51786f4e0a926499dca36f9b4f64d9d6..18eca7809b08894cd135519f8991e138bbc0fa2a 100644 (file)
@@ -22,6 +22,10 @@ static const u16 nla_attr_minlen[NLA_TYPE_MAX+1] = {
        [NLA_U64]       = sizeof(u64),
        [NLA_MSECS]     = sizeof(u64),
        [NLA_NESTED]    = NLA_HDRLEN,
+       [NLA_S8]        = sizeof(s8),
+       [NLA_S16]       = sizeof(s16),
+       [NLA_S32]       = sizeof(s32),
+       [NLA_S64]       = sizeof(s64),
 };
 
 static int validate_nla(const struct nlattr *nla, int maxtype,
index 8ca533c95de0346f181c1acd067a787c20a49f62..b258da88f6756d4f3ad6d52c992f86fb5a23a93d 100644 (file)
@@ -368,3 +368,9 @@ void vlan_vids_del_by_dev(struct net_device *dev,
                vlan_vid_del(dev, vid_info->vid);
 }
 EXPORT_SYMBOL(vlan_vids_del_by_dev);
+
+bool vlan_uses_dev(const struct net_device *dev)
+{
+       return rtnl_dereference(dev->vlan_info) ? true : false;
+}
+EXPORT_SYMBOL(vlan_uses_dev);
index 245831bec09a56dd263cde753050d10208ee0200..30b48f523135be8f00025f32b887b51f0f7000c3 100644 (file)
@@ -52,6 +52,8 @@ source "net/iucv/Kconfig"
 
 config INET
        bool "TCP/IP networking"
+       select CRYPTO
+       select CRYPTO_AES
        ---help---
          These are the protocols used on the Internet and on most local
          Ethernets. It is highly recommended to say Y here (this will enlarge
index 23f45ce6f3510fb1e9b30b58b7e52fac6a456961..0447d5d0b63983b139bda2853eaa9980640dd32d 100644 (file)
@@ -432,7 +432,7 @@ int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat)
                        size = dev->ops->ioctl(dev, cmd, buf);
                }
                if (size < 0) {
-                       error = (size == -ENOIOCTLCMD ? -EINVAL : size);
+                       error = (size == -ENOIOCTLCMD ? -ENOTTY : size);
                        goto done;
                }
        }
index 469daabd90c7bf28572c3f9066ee2146ed590fd1..b02b75dae3a8a5dfeb015838ba5eca2ecb5d6109 100644 (file)
@@ -166,13 +166,15 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
        int16_t buff_pos;
        struct batadv_ogm_packet *batadv_ogm_packet;
        struct sk_buff *skb;
+       uint8_t *packet_pos;
 
        if (hard_iface->if_status != BATADV_IF_ACTIVE)
                return;
 
        packet_num = 0;
        buff_pos = 0;
-       batadv_ogm_packet = (struct batadv_ogm_packet *)forw_packet->skb->data;
+       packet_pos = forw_packet->skb->data;
+       batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
 
        /* adjust all flags and log packets */
        while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
@@ -181,15 +183,17 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
                /* we might have aggregated direct link packets with an
                 * ordinary base packet
                 */
-               if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
-                   (forw_packet->if_incoming == hard_iface))
+               if (forw_packet->direct_link_flags & BIT(packet_num) &&
+                   forw_packet->if_incoming == hard_iface)
                        batadv_ogm_packet->flags |= BATADV_DIRECTLINK;
                else
                        batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK;
 
-               fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
-                                                           "Sending own" :
-                                                           "Forwarding"));
+               if (packet_num > 0 || !forw_packet->own)
+                       fwd_str = "Forwarding";
+               else
+                       fwd_str = "Sending own";
+
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
                           "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
                           fwd_str, (packet_num > 0 ? "aggregated " : ""),
@@ -204,8 +208,8 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
                buff_pos += BATADV_OGM_HLEN;
                buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
                packet_num++;
-               batadv_ogm_packet = (struct batadv_ogm_packet *)
-                                       (forw_packet->skb->data + buff_pos);
+               packet_pos = forw_packet->skb->data + buff_pos;
+               batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
        }
 
        /* create clone because function is called more than once */
@@ -227,9 +231,10 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
        struct batadv_hard_iface *primary_if = NULL;
        struct batadv_ogm_packet *batadv_ogm_packet;
        unsigned char directlink;
+       uint8_t *packet_pos;
 
-       batadv_ogm_packet = (struct batadv_ogm_packet *)
-                                               (forw_packet->skb->data);
+       packet_pos = forw_packet->skb->data;
+       batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
        directlink = (batadv_ogm_packet->flags & BATADV_DIRECTLINK ? 1 : 0);
 
        if (!forw_packet->if_incoming) {
@@ -454,6 +459,7 @@ static void batadv_iv_ogm_aggregate(struct batadv_forw_packet *forw_packet_aggr,
                                    int packet_len, bool direct_link)
 {
        unsigned char *skb_buff;
+       unsigned long new_direct_link_flag;
 
        skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
        memcpy(skb_buff, packet_buff, packet_len);
@@ -461,9 +467,10 @@ static void batadv_iv_ogm_aggregate(struct batadv_forw_packet *forw_packet_aggr,
        forw_packet_aggr->num_packets++;
 
        /* save packet direct link flag status */
-       if (direct_link)
-               forw_packet_aggr->direct_link_flags |=
-                       (1 << forw_packet_aggr->num_packets);
+       if (direct_link) {
+               new_direct_link_flag = BIT(forw_packet_aggr->num_packets);
+               forw_packet_aggr->direct_link_flags |= new_direct_link_flag;
+       }
 }
 
 static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv,
@@ -586,6 +593,8 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
        struct batadv_ogm_packet *batadv_ogm_packet;
        struct batadv_hard_iface *primary_if;
        int vis_server, tt_num_changes = 0;
+       uint32_t seqno;
+       uint8_t bandwidth;
 
        vis_server = atomic_read(&bat_priv->vis_mode);
        primary_if = batadv_primary_if_get_selected(bat_priv);
@@ -599,12 +608,12 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
        batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
 
        /* change sequence number to network order */
-       batadv_ogm_packet->seqno =
-                       htonl((uint32_t)atomic_read(&hard_iface->seqno));
+       seqno = (uint32_t)atomic_read(&hard_iface->seqno);
+       batadv_ogm_packet->seqno = htonl(seqno);
        atomic_inc(&hard_iface->seqno);
 
-       batadv_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
-       batadv_ogm_packet->tt_crc = htons(bat_priv->tt_crc);
+       batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
+       batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
        if (tt_num_changes >= 0)
                batadv_ogm_packet->tt_num_changes = tt_num_changes;
 
@@ -613,12 +622,13 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
        else
                batadv_ogm_packet->flags &= ~BATADV_VIS_SERVER;
 
-       if ((hard_iface == primary_if) &&
-           (atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_SERVER))
-               batadv_ogm_packet->gw_flags =
-                               (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
-       else
+       if (hard_iface == primary_if &&
+           atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_SERVER) {
+               bandwidth = (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
+               batadv_ogm_packet->gw_flags = bandwidth;
+       } else {
                batadv_ogm_packet->gw_flags = BATADV_NO_FLAGS;
+       }
 
        batadv_slide_own_bcast_window(hard_iface);
        batadv_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
@@ -645,6 +655,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
        int if_num;
        uint8_t sum_orig, sum_neigh;
        uint8_t *neigh_addr;
+       uint8_t tq_avg;
 
        batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
                   "update_originator(): Searching and updating originator entry of received packet\n");
@@ -668,8 +679,8 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
                spin_lock_bh(&tmp_neigh_node->lq_update_lock);
                batadv_ring_buffer_set(tmp_neigh_node->tq_recv,
                                       &tmp_neigh_node->tq_index, 0);
-               tmp_neigh_node->tq_avg =
-                       batadv_ring_buffer_avg(tmp_neigh_node->tq_recv);
+               tq_avg = batadv_ring_buffer_avg(tmp_neigh_node->tq_recv);
+               tmp_neigh_node->tq_avg = tq_avg;
                spin_unlock_bh(&tmp_neigh_node->lq_update_lock);
        }
 
@@ -836,8 +847,10 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
        spin_unlock_bh(&orig_node->ogm_cnt_lock);
 
        /* pay attention to not get a value bigger than 100 % */
-       total_count = (orig_eq_count > neigh_rq_count ?
-                      neigh_rq_count : orig_eq_count);
+       if (orig_eq_count > neigh_rq_count)
+               total_count = neigh_rq_count;
+       else
+               total_count = orig_eq_count;
 
        /* if we have too few packets (too less data) we set tq_own to zero
         * if we receive too few packets it is not considered bidirectional
@@ -911,6 +924,7 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
        int set_mark, ret = -1;
        uint32_t seqno = ntohl(batadv_ogm_packet->seqno);
        uint8_t *neigh_addr;
+       uint8_t packet_count;
 
        orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig);
        if (!orig_node)
@@ -945,9 +959,9 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
                                                     tmp_neigh_node->real_bits,
                                                     seq_diff, set_mark);
 
-               tmp_neigh_node->real_packet_count =
-                       bitmap_weight(tmp_neigh_node->real_bits,
-                                     BATADV_TQ_LOCAL_WINDOW_SIZE);
+               packet_count = bitmap_weight(tmp_neigh_node->real_bits,
+                                            BATADV_TQ_LOCAL_WINDOW_SIZE);
+               tmp_neigh_node->real_packet_count = packet_count;
        }
        rcu_read_unlock();
 
@@ -1164,9 +1178,12 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
        /* if sender is a direct neighbor the sender mac equals
         * originator mac
         */
-       orig_neigh_node = (is_single_hop_neigh ?
-                          orig_node :
-                          batadv_get_orig_node(bat_priv, ethhdr->h_source));
+       if (is_single_hop_neigh)
+               orig_neigh_node = orig_node;
+       else
+               orig_neigh_node = batadv_get_orig_node(bat_priv,
+                                                      ethhdr->h_source);
+
        if (!orig_neigh_node)
                goto out;
 
@@ -1252,6 +1269,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
        int buff_pos = 0, packet_len;
        unsigned char *tt_buff, *packet_buff;
        bool ret;
+       uint8_t *packet_pos;
 
        ret = batadv_check_management_packet(skb, if_incoming, BATADV_OGM_HLEN);
        if (!ret)
@@ -1282,8 +1300,8 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
                buff_pos += BATADV_OGM_HLEN;
                buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
 
-               batadv_ogm_packet = (struct batadv_ogm_packet *)
-                                               (packet_buff + buff_pos);
+               packet_pos = packet_buff + buff_pos;
+               batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
        } while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len,
                                           batadv_ogm_packet->tt_num_changes));
 
index 6705d35b17cef3351ed9c202f5ce2bb17df933a7..0a9084ad19a60f71a2f9f70a8879e7ada24ccf6c 100644 (file)
@@ -133,7 +133,7 @@ static void batadv_claim_free_ref(struct batadv_claim *claim)
 static struct batadv_claim *batadv_claim_hash_find(struct batadv_priv *bat_priv,
                                                   struct batadv_claim *data)
 {
-       struct batadv_hashtable *hash = bat_priv->claim_hash;
+       struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
        struct hlist_head *head;
        struct hlist_node *node;
        struct batadv_claim *claim;
@@ -174,7 +174,7 @@ static struct batadv_backbone_gw *
 batadv_backbone_hash_find(struct batadv_priv *bat_priv,
                          uint8_t *addr, short vid)
 {
-       struct batadv_hashtable *hash = bat_priv->backbone_hash;
+       struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
        struct hlist_head *head;
        struct hlist_node *node;
        struct batadv_backbone_gw search_entry, *backbone_gw;
@@ -218,7 +218,7 @@ batadv_bla_del_backbone_claims(struct batadv_backbone_gw *backbone_gw)
        int i;
        spinlock_t *list_lock;  /* protects write access to the hash lists */
 
-       hash = backbone_gw->bat_priv->claim_hash;
+       hash = backbone_gw->bat_priv->bla.claim_hash;
        if (!hash)
                return;
 
@@ -265,7 +265,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
        if (!primary_if)
                return;
 
-       memcpy(&local_claim_dest, &bat_priv->claim_dest,
+       memcpy(&local_claim_dest, &bat_priv->bla.claim_dest,
               sizeof(local_claim_dest));
        local_claim_dest.type = claimtype;
 
@@ -281,7 +281,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
                         NULL,
                         /* Ethernet SRC/HW SRC:  originator mac */
                         primary_if->net_dev->dev_addr,
-                        /* HW DST: FF:43:05:XX:00:00
+                        /* HW DST: FF:43:05:XX:YY:YY
                          * with XX   = claim type
                          * and YY:YY = group id
                          */
@@ -295,7 +295,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
 
        /* now we pretend that the client would have sent this ... */
        switch (claimtype) {
-       case BATADV_CLAIM_TYPE_ADD:
+       case BATADV_CLAIM_TYPE_CLAIM:
                /* normal claim frame
                 * set Ethernet SRC to the clients mac
                 */
@@ -303,7 +303,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
                batadv_dbg(BATADV_DBG_BLA, bat_priv,
                           "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid);
                break;
-       case BATADV_CLAIM_TYPE_DEL:
+       case BATADV_CLAIM_TYPE_UNCLAIM:
                /* unclaim frame
                 * set HW SRC to the clients mac
                 */
@@ -323,7 +323,8 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
                break;
        case BATADV_CLAIM_TYPE_REQUEST:
                /* request frame
-                * set HW SRC to the special mac containg the crc
+                * set HW SRC and header destination to the receiving backbone
+                * gws mac
                 */
                memcpy(hw_src, mac, ETH_ALEN);
                memcpy(ethhdr->h_dest, mac, ETH_ALEN);
@@ -339,8 +340,9 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
 
        skb_reset_mac_header(skb);
        skb->protocol = eth_type_trans(skb, soft_iface);
-       bat_priv->stats.rx_packets++;
-       bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
+       batadv_inc_counter(bat_priv, BATADV_CNT_RX);
+       batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
+                          skb->len + ETH_HLEN);
        soft_iface->last_rx = jiffies;
 
        netif_rx(skb);
@@ -389,7 +391,7 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
        /* one for the hash, one for returning */
        atomic_set(&entry->refcount, 2);
 
-       hash_added = batadv_hash_add(bat_priv->backbone_hash,
+       hash_added = batadv_hash_add(bat_priv->bla.backbone_hash,
                                     batadv_compare_backbone_gw,
                                     batadv_choose_backbone_gw, entry,
                                     &entry->hash_entry);
@@ -456,7 +458,7 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
        if (!backbone_gw)
                return;
 
-       hash = bat_priv->claim_hash;
+       hash = bat_priv->bla.claim_hash;
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
@@ -467,7 +469,7 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
                                continue;
 
                        batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
-                                             BATADV_CLAIM_TYPE_ADD);
+                                             BATADV_CLAIM_TYPE_CLAIM);
                }
                rcu_read_unlock();
        }
@@ -497,7 +499,7 @@ static void batadv_bla_send_request(struct batadv_backbone_gw *backbone_gw)
 
        /* no local broadcasts should be sent or received, for now. */
        if (!atomic_read(&backbone_gw->request_sent)) {
-               atomic_inc(&backbone_gw->bat_priv->bla_num_requests);
+               atomic_inc(&backbone_gw->bat_priv->bla.num_requests);
                atomic_set(&backbone_gw->request_sent, 1);
        }
 }
@@ -557,7 +559,7 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
                batadv_dbg(BATADV_DBG_BLA, bat_priv,
                           "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
                           mac, vid);
-               hash_added = batadv_hash_add(bat_priv->claim_hash,
+               hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
                                             batadv_compare_claim,
                                             batadv_choose_claim, claim,
                                             &claim->hash_entry);
@@ -577,8 +579,7 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
                           "bla_add_claim(): changing ownership for %pM, vid %d\n",
                           mac, vid);
 
-               claim->backbone_gw->crc ^=
-                       crc16(0, claim->addr, ETH_ALEN);
+               claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
                batadv_backbone_gw_free_ref(claim->backbone_gw);
 
        }
@@ -610,7 +611,7 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
        batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n",
                   mac, vid);
 
-       batadv_hash_remove(bat_priv->claim_hash, batadv_compare_claim,
+       batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
                           batadv_choose_claim, claim);
        batadv_claim_free_ref(claim); /* reference from the hash is gone */
 
@@ -657,7 +658,7 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv,
                 * we can allow traffic again.
                 */
                if (atomic_read(&backbone_gw->request_sent)) {
-                       atomic_dec(&backbone_gw->bat_priv->bla_num_requests);
+                       atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
                        atomic_set(&backbone_gw->request_sent, 0);
                }
        }
@@ -702,7 +703,7 @@ static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
        if (primary_if && batadv_compare_eth(backbone_addr,
                                             primary_if->net_dev->dev_addr))
                batadv_bla_send_claim(bat_priv, claim_addr, vid,
-                                     BATADV_CLAIM_TYPE_DEL);
+                                     BATADV_CLAIM_TYPE_UNCLAIM);
 
        backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
 
@@ -738,7 +739,7 @@ static int batadv_handle_claim(struct batadv_priv *bat_priv,
        batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
        if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
                batadv_bla_send_claim(bat_priv, claim_addr, vid,
-                                     BATADV_CLAIM_TYPE_ADD);
+                                     BATADV_CLAIM_TYPE_CLAIM);
 
        /* TODO: we could call something like tt_local_del() here. */
 
@@ -772,7 +773,7 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
        struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
 
        bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
-       bla_dst_own = &bat_priv->claim_dest;
+       bla_dst_own = &bat_priv->bla.claim_dest;
 
        /* check if it is a claim packet in general */
        if (memcmp(bla_dst->magic, bla_dst_own->magic,
@@ -783,12 +784,12 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
         * otherwise assume it is in the hw_src
         */
        switch (bla_dst->type) {
-       case BATADV_CLAIM_TYPE_ADD:
+       case BATADV_CLAIM_TYPE_CLAIM:
                backbone_addr = hw_src;
                break;
        case BATADV_CLAIM_TYPE_REQUEST:
        case BATADV_CLAIM_TYPE_ANNOUNCE:
-       case BATADV_CLAIM_TYPE_DEL:
+       case BATADV_CLAIM_TYPE_UNCLAIM:
                backbone_addr = ethhdr->h_source;
                break;
        default:
@@ -904,12 +905,12 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
 
        /* check for the different types of claim frames ... */
        switch (bla_dst->type) {
-       case BATADV_CLAIM_TYPE_ADD:
+       case BATADV_CLAIM_TYPE_CLAIM:
                if (batadv_handle_claim(bat_priv, primary_if, hw_src,
                                        ethhdr->h_source, vid))
                        return 1;
                break;
-       case BATADV_CLAIM_TYPE_DEL:
+       case BATADV_CLAIM_TYPE_UNCLAIM:
                if (batadv_handle_unclaim(bat_priv, primary_if,
                                          ethhdr->h_source, hw_src, vid))
                        return 1;
@@ -945,7 +946,7 @@ static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
        spinlock_t *list_lock;  /* protects write access to the hash lists */
        int i;
 
-       hash = bat_priv->backbone_hash;
+       hash = bat_priv->bla.backbone_hash;
        if (!hash)
                return;
 
@@ -969,7 +970,7 @@ static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
 purge_now:
                        /* don't wait for the pending request anymore */
                        if (atomic_read(&backbone_gw->request_sent))
-                               atomic_dec(&bat_priv->bla_num_requests);
+                               atomic_dec(&bat_priv->bla.num_requests);
 
                        batadv_bla_del_backbone_claims(backbone_gw);
 
@@ -999,7 +1000,7 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
        struct batadv_hashtable *hash;
        int i;
 
-       hash = bat_priv->claim_hash;
+       hash = bat_priv->bla.claim_hash;
        if (!hash)
                return;
 
@@ -1046,11 +1047,12 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
        struct hlist_node *node;
        struct hlist_head *head;
        struct batadv_hashtable *hash;
+       __be16 group;
        int i;
 
        /* reset bridge loop avoidance group id */
-       bat_priv->claim_dest.group =
-               htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
+       group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
+       bat_priv->bla.claim_dest.group = group;
 
        if (!oldif) {
                batadv_bla_purge_claims(bat_priv, NULL, 1);
@@ -1058,7 +1060,7 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
                return;
        }
 
-       hash = bat_priv->backbone_hash;
+       hash = bat_priv->bla.backbone_hash;
        if (!hash)
                return;
 
@@ -1088,8 +1090,8 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
 /* (re)start the timer */
 static void batadv_bla_start_timer(struct batadv_priv *bat_priv)
 {
-       INIT_DELAYED_WORK(&bat_priv->bla_work, batadv_bla_periodic_work);
-       queue_delayed_work(batadv_event_workqueue, &bat_priv->bla_work,
+       INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);
+       queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
                           msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
 }
 
@@ -1099,9 +1101,9 @@ static void batadv_bla_start_timer(struct batadv_priv *bat_priv)
  */
 static void batadv_bla_periodic_work(struct work_struct *work)
 {
-       struct delayed_work *delayed_work =
-               container_of(work, struct delayed_work, work);
+       struct delayed_work *delayed_work;
        struct batadv_priv *bat_priv;
+       struct batadv_priv_bla *priv_bla;
        struct hlist_node *node;
        struct hlist_head *head;
        struct batadv_backbone_gw *backbone_gw;
@@ -1109,7 +1111,9 @@ static void batadv_bla_periodic_work(struct work_struct *work)
        struct batadv_hard_iface *primary_if;
        int i;
 
-       bat_priv = container_of(delayed_work, struct batadv_priv, bla_work);
+       delayed_work = container_of(work, struct delayed_work, work);
+       priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
+       bat_priv = container_of(priv_bla, struct batadv_priv, bla);
        primary_if = batadv_primary_if_get_selected(bat_priv);
        if (!primary_if)
                goto out;
@@ -1120,7 +1124,7 @@ static void batadv_bla_periodic_work(struct work_struct *work)
        if (!atomic_read(&bat_priv->bridge_loop_avoidance))
                goto out;
 
-       hash = bat_priv->backbone_hash;
+       hash = bat_priv->bla.backbone_hash;
        if (!hash)
                goto out;
 
@@ -1160,40 +1164,41 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
        int i;
        uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
        struct batadv_hard_iface *primary_if;
+       uint16_t crc;
+       unsigned long entrytime;
 
        batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
 
        /* setting claim destination address */
-       memcpy(&bat_priv->claim_dest.magic, claim_dest, 3);
-       bat_priv->claim_dest.type = 0;
+       memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3);
+       bat_priv->bla.claim_dest.type = 0;
        primary_if = batadv_primary_if_get_selected(bat_priv);
        if (primary_if) {
-               bat_priv->claim_dest.group =
-                       htons(crc16(0, primary_if->net_dev->dev_addr,
-                                   ETH_ALEN));
+               crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN);
+               bat_priv->bla.claim_dest.group = htons(crc);
                batadv_hardif_free_ref(primary_if);
        } else {
-               bat_priv->claim_dest.group = 0; /* will be set later */
+               bat_priv->bla.claim_dest.group = 0; /* will be set later */
        }
 
        /* initialize the duplicate list */
+       entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
        for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
-               bat_priv->bcast_duplist[i].entrytime =
-                       jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
-       bat_priv->bcast_duplist_curr = 0;
+               bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
+       bat_priv->bla.bcast_duplist_curr = 0;
 
-       if (bat_priv->claim_hash)
+       if (bat_priv->bla.claim_hash)
                return 0;
 
-       bat_priv->claim_hash = batadv_hash_new(128);
-       bat_priv->backbone_hash = batadv_hash_new(32);
+       bat_priv->bla.claim_hash = batadv_hash_new(128);
+       bat_priv->bla.backbone_hash = batadv_hash_new(32);
 
-       if (!bat_priv->claim_hash || !bat_priv->backbone_hash)
+       if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
                return -ENOMEM;
 
-       batadv_hash_set_lock_class(bat_priv->claim_hash,
+       batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
                                   &batadv_claim_hash_lock_class_key);
-       batadv_hash_set_lock_class(bat_priv->backbone_hash,
+       batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
                                   &batadv_backbone_hash_lock_class_key);
 
        batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
@@ -1234,8 +1239,9 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
        crc = crc16(0, content, length);
 
        for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
-               curr = (bat_priv->bcast_duplist_curr + i) % BATADV_DUPLIST_SIZE;
-               entry = &bat_priv->bcast_duplist[curr];
+               curr = (bat_priv->bla.bcast_duplist_curr + i);
+               curr %= BATADV_DUPLIST_SIZE;
+               entry = &bat_priv->bla.bcast_duplist[curr];
 
                /* we can stop searching if the entry is too old ;
                 * later entries will be even older
@@ -1256,13 +1262,13 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
                return 1;
        }
        /* not found, add a new entry (overwrite the oldest entry) */
-       curr = (bat_priv->bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
+       curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
        curr %= BATADV_DUPLIST_SIZE;
-       entry = &bat_priv->bcast_duplist[curr];
+       entry = &bat_priv->bla.bcast_duplist[curr];
        entry->crc = crc;
        entry->entrytime = jiffies;
        memcpy(entry->orig, bcast_packet->orig, ETH_ALEN);
-       bat_priv->bcast_duplist_curr = curr;
+       bat_priv->bla.bcast_duplist_curr = curr;
 
        /* allow it, its the first occurence. */
        return 0;
@@ -1279,7 +1285,7 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
  */
 int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
 {
-       struct batadv_hashtable *hash = bat_priv->backbone_hash;
+       struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
        struct hlist_head *head;
        struct hlist_node *node;
        struct batadv_backbone_gw *backbone_gw;
@@ -1339,8 +1345,7 @@ int batadv_bla_is_backbone_gw(struct sk_buff *skb,
                if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr)))
                        return 0;
 
-               vhdr = (struct vlan_ethhdr *)(((uint8_t *)skb->data) +
-                                             hdr_size);
+               vhdr = (struct vlan_ethhdr *)(skb->data + hdr_size);
                vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
        }
 
@@ -1359,18 +1364,18 @@ void batadv_bla_free(struct batadv_priv *bat_priv)
 {
        struct batadv_hard_iface *primary_if;
 
-       cancel_delayed_work_sync(&bat_priv->bla_work);
+       cancel_delayed_work_sync(&bat_priv->bla.work);
        primary_if = batadv_primary_if_get_selected(bat_priv);
 
-       if (bat_priv->claim_hash) {
+       if (bat_priv->bla.claim_hash) {
                batadv_bla_purge_claims(bat_priv, primary_if, 1);
-               batadv_hash_destroy(bat_priv->claim_hash);
-               bat_priv->claim_hash = NULL;
+               batadv_hash_destroy(bat_priv->bla.claim_hash);
+               bat_priv->bla.claim_hash = NULL;
        }
-       if (bat_priv->backbone_hash) {
+       if (bat_priv->bla.backbone_hash) {
                batadv_bla_purge_backbone_gw(bat_priv, 1);
-               batadv_hash_destroy(bat_priv->backbone_hash);
-               bat_priv->backbone_hash = NULL;
+               batadv_hash_destroy(bat_priv->bla.backbone_hash);
+               bat_priv->bla.backbone_hash = NULL;
        }
        if (primary_if)
                batadv_hardif_free_ref(primary_if);
@@ -1409,7 +1414,7 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid,
                goto allow;
 
 
-       if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
+       if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
                /* don't allow broadcasts while requests are in flight */
                if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
                        goto handled;
@@ -1508,7 +1513,7 @@ int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid)
 
        ethhdr = (struct ethhdr *)skb_mac_header(skb);
 
-       if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
+       if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
                /* don't allow broadcasts while requests are in flight */
                if (is_multicast_ether_addr(ethhdr->h_dest))
                        goto handled;
@@ -1564,7 +1569,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
 {
        struct net_device *net_dev = (struct net_device *)seq->private;
        struct batadv_priv *bat_priv = netdev_priv(net_dev);
-       struct batadv_hashtable *hash = bat_priv->claim_hash;
+       struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
        struct batadv_claim *claim;
        struct batadv_hard_iface *primary_if;
        struct hlist_node *node;
@@ -1593,7 +1598,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
        seq_printf(seq,
                   "Claims announced for the mesh %s (orig %pM, group id %04x)\n",
                   net_dev->name, primary_addr,
-                  ntohs(bat_priv->claim_dest.group));
+                  ntohs(bat_priv->bla.claim_dest.group));
        seq_printf(seq, "   %-17s    %-5s    %-17s [o] (%-4s)\n",
                   "Client", "VID", "Originator", "CRC");
        for (i = 0; i < hash->size; i++) {
@@ -1616,3 +1621,68 @@ out:
                batadv_hardif_free_ref(primary_if);
        return ret;
 }
+
+int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
+{
+       struct net_device *net_dev = (struct net_device *)seq->private;
+       struct batadv_priv *bat_priv = netdev_priv(net_dev);
+       struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
+       struct batadv_backbone_gw *backbone_gw;
+       struct batadv_hard_iface *primary_if;
+       struct hlist_node *node;
+       struct hlist_head *head;
+       int secs, msecs;
+       uint32_t i;
+       bool is_own;
+       int ret = 0;
+       uint8_t *primary_addr;
+
+       primary_if = batadv_primary_if_get_selected(bat_priv);
+       if (!primary_if) {
+               ret = seq_printf(seq,
+                                "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
+                                net_dev->name);
+               goto out;
+       }
+
+       if (primary_if->if_status != BATADV_IF_ACTIVE) {
+               ret = seq_printf(seq,
+                                "BATMAN mesh %s disabled - primary interface not active\n",
+                                net_dev->name);
+               goto out;
+       }
+
+       primary_addr = primary_if->net_dev->dev_addr;
+       seq_printf(seq,
+                  "Backbones announced for the mesh %s (orig %pM, group id %04x)\n",
+                  net_dev->name, primary_addr,
+                  ntohs(bat_priv->bla.claim_dest.group));
+       seq_printf(seq, "   %-17s    %-5s %-9s (%-4s)\n",
+                  "Originator", "VID", "last seen", "CRC");
+       for (i = 0; i < hash->size; i++) {
+               head = &hash->table[i];
+
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+                       msecs = jiffies_to_msecs(jiffies -
+                                                backbone_gw->lasttime);
+                       secs = msecs / 1000;
+                       msecs = msecs % 1000;
+
+                       is_own = batadv_compare_eth(backbone_gw->orig,
+                                                   primary_addr);
+                       if (is_own)
+                               continue;
+
+                       seq_printf(seq,
+                                  " * %pM on % 5d % 4i.%03is (%04x)\n",
+                                  backbone_gw->orig, backbone_gw->vid,
+                                  secs, msecs, backbone_gw->crc);
+               }
+               rcu_read_unlock();
+       }
+out:
+       if (primary_if)
+               batadv_hardif_free_ref(primary_if);
+       return ret;
+}
index 563cfbf94a7f5110c9dcd5eeffb6ff10da32b86b..789cb73bde67acf8f19375d67e8c0ec2322f509f 100644 (file)
@@ -27,6 +27,8 @@ int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid);
 int batadv_bla_is_backbone_gw(struct sk_buff *skb,
                              struct batadv_orig_node *orig_node, int hdr_size);
 int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
+int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
+                                            void *offset);
 int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig);
 int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
                                   struct batadv_bcast_packet *bcast_packet,
@@ -41,8 +43,7 @@ void batadv_bla_free(struct batadv_priv *bat_priv);
 #else /* ifdef CONFIG_BATMAN_ADV_BLA */
 
 static inline int batadv_bla_rx(struct batadv_priv *bat_priv,
-                               struct sk_buff *skb, short vid,
-                               bool is_bcast)
+                               struct sk_buff *skb, short vid, bool is_bcast)
 {
        return 0;
 }
@@ -66,6 +67,12 @@ static inline int batadv_bla_claim_table_seq_print_text(struct seq_file *seq,
        return 0;
 }
 
+static inline int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
+                                                          void *offset)
+{
+       return 0;
+}
+
 static inline int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv,
                                                 uint8_t *orig)
 {
index 34fbb1667bcd5194ae391abf8ca3a03944246c8d..391d4fb2026f9acf7ce1b1f85a0c2f5366152c35 100644 (file)
@@ -267,6 +267,15 @@ static int batadv_bla_claim_table_open(struct inode *inode, struct file *file)
        return single_open(file, batadv_bla_claim_table_seq_print_text,
                           net_dev);
 }
+
+static int batadv_bla_backbone_table_open(struct inode *inode,
+                                         struct file *file)
+{
+       struct net_device *net_dev = (struct net_device *)inode->i_private;
+       return single_open(file, batadv_bla_backbone_table_seq_print_text,
+                          net_dev);
+}
+
 #endif
 
 static int batadv_transtable_local_open(struct inode *inode, struct file *file)
@@ -305,6 +314,8 @@ static BATADV_DEBUGINFO(transtable_global, S_IRUGO,
                        batadv_transtable_global_open);
 #ifdef CONFIG_BATMAN_ADV_BLA
 static BATADV_DEBUGINFO(bla_claim_table, S_IRUGO, batadv_bla_claim_table_open);
+static BATADV_DEBUGINFO(bla_backbone_table, S_IRUGO,
+                       batadv_bla_backbone_table_open);
 #endif
 static BATADV_DEBUGINFO(transtable_local, S_IRUGO,
                        batadv_transtable_local_open);
@@ -316,6 +327,7 @@ static struct batadv_debuginfo *batadv_mesh_debuginfos[] = {
        &batadv_debuginfo_transtable_global,
 #ifdef CONFIG_BATMAN_ADV_BLA
        &batadv_debuginfo_bla_claim_table,
+       &batadv_debuginfo_bla_backbone_table,
 #endif
        &batadv_debuginfo_transtable_local,
        &batadv_debuginfo_vis_data,
index fc866f2e4528c71c82ba125c23fb48fbd430b390..15d67abc10a43b329ef3cf561bd3fdf456c3c286 100644 (file)
@@ -48,7 +48,7 @@ batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv)
        struct batadv_gw_node *gw_node;
 
        rcu_read_lock();
-       gw_node = rcu_dereference(bat_priv->curr_gw);
+       gw_node = rcu_dereference(bat_priv->gw.curr_gw);
        if (!gw_node)
                goto out;
 
@@ -91,23 +91,23 @@ static void batadv_gw_select(struct batadv_priv *bat_priv,
 {
        struct batadv_gw_node *curr_gw_node;
 
-       spin_lock_bh(&bat_priv->gw_list_lock);
+       spin_lock_bh(&bat_priv->gw.list_lock);
 
        if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount))
                new_gw_node = NULL;
 
-       curr_gw_node = rcu_dereference_protected(bat_priv->curr_gw, 1);
-       rcu_assign_pointer(bat_priv->curr_gw, new_gw_node);
+       curr_gw_node = rcu_dereference_protected(bat_priv->gw.curr_gw, 1);
+       rcu_assign_pointer(bat_priv->gw.curr_gw, new_gw_node);
 
        if (curr_gw_node)
                batadv_gw_node_free_ref(curr_gw_node);
 
-       spin_unlock_bh(&bat_priv->gw_list_lock);
+       spin_unlock_bh(&bat_priv->gw.list_lock);
 }
 
 void batadv_gw_deselect(struct batadv_priv *bat_priv)
 {
-       atomic_set(&bat_priv->gw_reselect, 1);
+       atomic_set(&bat_priv->gw.reselect, 1);
 }
 
 static struct batadv_gw_node *
@@ -117,12 +117,17 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
        struct hlist_node *node;
        struct batadv_gw_node *gw_node, *curr_gw = NULL;
        uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
+       uint32_t gw_divisor;
        uint8_t max_tq = 0;
        int down, up;
+       uint8_t tq_avg;
        struct batadv_orig_node *orig_node;
 
+       gw_divisor = BATADV_TQ_LOCAL_WINDOW_SIZE * BATADV_TQ_LOCAL_WINDOW_SIZE;
+       gw_divisor *= 64;
+
        rcu_read_lock();
-       hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
+       hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) {
                if (gw_node->deleted)
                        continue;
 
@@ -134,19 +139,19 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
                if (!atomic_inc_not_zero(&gw_node->refcount))
                        goto next;
 
+               tq_avg = router->tq_avg;
+
                switch (atomic_read(&bat_priv->gw_sel_class)) {
                case 1: /* fast connection */
                        batadv_gw_bandwidth_to_kbit(orig_node->gw_flags,
                                                    &down, &up);
 
-                       tmp_gw_factor = (router->tq_avg * router->tq_avg *
-                                        down * 100 * 100) /
-                                        (BATADV_TQ_LOCAL_WINDOW_SIZE *
-                                         BATADV_TQ_LOCAL_WINDOW_SIZE * 64);
+                       tmp_gw_factor = tq_avg * tq_avg * down * 100 * 100;
+                       tmp_gw_factor /= gw_divisor;
 
                        if ((tmp_gw_factor > max_gw_factor) ||
                            ((tmp_gw_factor == max_gw_factor) &&
-                            (router->tq_avg > max_tq))) {
+                            (tq_avg > max_tq))) {
                                if (curr_gw)
                                        batadv_gw_node_free_ref(curr_gw);
                                curr_gw = gw_node;
@@ -161,7 +166,7 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
                          *     soon as a better gateway appears which has
                          *     $routing_class more tq points)
                          */
-                       if (router->tq_avg > max_tq) {
+                       if (tq_avg > max_tq) {
                                if (curr_gw)
                                        batadv_gw_node_free_ref(curr_gw);
                                curr_gw = gw_node;
@@ -170,8 +175,8 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
                        break;
                }
 
-               if (router->tq_avg > max_tq)
-                       max_tq = router->tq_avg;
+               if (tq_avg > max_tq)
+                       max_tq = tq_avg;
 
                if (tmp_gw_factor > max_gw_factor)
                        max_gw_factor = tmp_gw_factor;
@@ -202,7 +207,7 @@ void batadv_gw_election(struct batadv_priv *bat_priv)
 
        curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
 
-       if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect) && curr_gw)
+       if (!batadv_atomic_dec_not_zero(&bat_priv->gw.reselect) && curr_gw)
                goto out;
 
        next_gw = batadv_gw_get_best_gw_node(bat_priv);
@@ -321,9 +326,9 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
        gw_node->orig_node = orig_node;
        atomic_set(&gw_node->refcount, 1);
 
-       spin_lock_bh(&bat_priv->gw_list_lock);
-       hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list);
-       spin_unlock_bh(&bat_priv->gw_list_lock);
+       spin_lock_bh(&bat_priv->gw.list_lock);
+       hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.list);
+       spin_unlock_bh(&bat_priv->gw.list_lock);
 
        batadv_gw_bandwidth_to_kbit(new_gwflags, &down, &up);
        batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
@@ -350,7 +355,7 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
        curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
+       hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) {
                if (gw_node->orig_node != orig_node)
                        continue;
 
@@ -404,10 +409,10 @@ void batadv_gw_node_purge(struct batadv_priv *bat_priv)
 
        curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
 
-       spin_lock_bh(&bat_priv->gw_list_lock);
+       spin_lock_bh(&bat_priv->gw.list_lock);
 
        hlist_for_each_entry_safe(gw_node, node, node_tmp,
-                                 &bat_priv->gw_list, list) {
+                                 &bat_priv->gw.list, list) {
                if (((!gw_node->deleted) ||
                     (time_before(jiffies, gw_node->deleted + timeout))) &&
                    atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE)
@@ -420,7 +425,7 @@ void batadv_gw_node_purge(struct batadv_priv *bat_priv)
                batadv_gw_node_free_ref(gw_node);
        }
 
-       spin_unlock_bh(&bat_priv->gw_list_lock);
+       spin_unlock_bh(&bat_priv->gw.list_lock);
 
        /* gw_deselect() needs to acquire the gw_list_lock */
        if (do_deselect)
@@ -496,7 +501,7 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
                   primary_if->net_dev->dev_addr, net_dev->name);
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
+       hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) {
                if (gw_node->deleted)
                        continue;
 
index 282bf6e9353e6ad5ab657e0c2722ec9635701f7c..d112fd6750b0564e1b08232482eb8c1261b749ad 100644 (file)
@@ -103,13 +103,14 @@ static void batadv_primary_if_update_addr(struct batadv_priv *bat_priv,
 {
        struct batadv_vis_packet *vis_packet;
        struct batadv_hard_iface *primary_if;
+       struct sk_buff *skb;
 
        primary_if = batadv_primary_if_get_selected(bat_priv);
        if (!primary_if)
                goto out;
 
-       vis_packet = (struct batadv_vis_packet *)
-                               bat_priv->my_vis_info->skb_packet->data;
+       skb = bat_priv->vis.my_info->skb_packet;
+       vis_packet = (struct batadv_vis_packet *)skb->data;
        memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
        memcpy(vis_packet->sender_orig,
               primary_if->net_dev->dev_addr, ETH_ALEN);
@@ -313,7 +314,13 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
        hard_iface->if_num = bat_priv->num_ifaces;
        bat_priv->num_ifaces++;
        hard_iface->if_status = BATADV_IF_INACTIVE;
-       batadv_orig_hash_add_if(hard_iface, bat_priv->num_ifaces);
+       ret = batadv_orig_hash_add_if(hard_iface, bat_priv->num_ifaces);
+       if (ret < 0) {
+               bat_priv->bat_algo_ops->bat_iface_disable(hard_iface);
+               bat_priv->num_ifaces--;
+               hard_iface->if_status = BATADV_IF_NOT_IN_USE;
+               goto err_dev;
+       }
 
        hard_iface->batman_adv_ptype.type = ethertype;
        hard_iface->batman_adv_ptype.func = batadv_batman_skb_recv;
index 13c88b25ab319a15260e77d12c65957180c46b9c..b4aa470bc4a6ec675d84fb373a973dd4d224cd5d 100644 (file)
@@ -58,9 +58,6 @@ static int __init batadv_init(void)
 
        batadv_iv_init();
 
-       /* the name should not be longer than 10 chars - see
-        * http://lwn.net/Articles/23634/
-        */
        batadv_event_workqueue = create_singlethread_workqueue("bat_events");
 
        if (!batadv_event_workqueue)
@@ -97,20 +94,20 @@ int batadv_mesh_init(struct net_device *soft_iface)
 
        spin_lock_init(&bat_priv->forw_bat_list_lock);
        spin_lock_init(&bat_priv->forw_bcast_list_lock);
-       spin_lock_init(&bat_priv->tt_changes_list_lock);
-       spin_lock_init(&bat_priv->tt_req_list_lock);
-       spin_lock_init(&bat_priv->tt_roam_list_lock);
-       spin_lock_init(&bat_priv->tt_buff_lock);
-       spin_lock_init(&bat_priv->gw_list_lock);
-       spin_lock_init(&bat_priv->vis_hash_lock);
-       spin_lock_init(&bat_priv->vis_list_lock);
+       spin_lock_init(&bat_priv->tt.changes_list_lock);
+       spin_lock_init(&bat_priv->tt.req_list_lock);
+       spin_lock_init(&bat_priv->tt.roam_list_lock);
+       spin_lock_init(&bat_priv->tt.last_changeset_lock);
+       spin_lock_init(&bat_priv->gw.list_lock);
+       spin_lock_init(&bat_priv->vis.hash_lock);
+       spin_lock_init(&bat_priv->vis.list_lock);
 
        INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
        INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
-       INIT_HLIST_HEAD(&bat_priv->gw_list);
-       INIT_LIST_HEAD(&bat_priv->tt_changes_list);
-       INIT_LIST_HEAD(&bat_priv->tt_req_list);
-       INIT_LIST_HEAD(&bat_priv->tt_roam_list);
+       INIT_HLIST_HEAD(&bat_priv->gw.list);
+       INIT_LIST_HEAD(&bat_priv->tt.changes_list);
+       INIT_LIST_HEAD(&bat_priv->tt.req_list);
+       INIT_LIST_HEAD(&bat_priv->tt.roam_list);
 
        ret = batadv_originator_init(bat_priv);
        if (ret < 0)
@@ -131,7 +128,7 @@ int batadv_mesh_init(struct net_device *soft_iface)
        if (ret < 0)
                goto err;
 
-       atomic_set(&bat_priv->gw_reselect, 0);
+       atomic_set(&bat_priv->gw.reselect, 0);
        atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
 
        return 0;
index 5d8fa07579477e4c82ec8aa160af256fa53ac94f..d57b746219de057c931d8459f1ccef84ce711cee 100644 (file)
@@ -26,7 +26,7 @@
 #define BATADV_DRIVER_DEVICE "batman-adv"
 
 #ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2012.3.0"
+#define BATADV_SOURCE_VERSION "2012.4.0"
 #endif
 
 /* B.A.T.M.A.N. parameters */
  * -> TODO: check influence on BATADV_TQ_LOCAL_WINDOW_SIZE
  */
 #define BATADV_PURGE_TIMEOUT 200000 /* 200 seconds */
-#define BATADV_TT_LOCAL_TIMEOUT 3600000 /* in miliseconds */
-#define BATADV_TT_CLIENT_ROAM_TIMEOUT 600000 /* in miliseconds */
+#define BATADV_TT_LOCAL_TIMEOUT 3600000 /* in milliseconds */
+#define BATADV_TT_CLIENT_ROAM_TIMEOUT 600000 /* in milliseconds */
+#define BATADV_TT_CLIENT_TEMP_TIMEOUT 600000 /* in milliseconds */
 /* sliding packet range of received originator messages in sequence numbers
  * (should be a multiple of our word size)
  */
 #define BATADV_TQ_LOCAL_WINDOW_SIZE 64
-/* miliseconds we have to keep pending tt_req */
+/* milliseconds we have to keep pending tt_req */
 #define BATADV_TT_REQUEST_TIMEOUT 3000
 
 #define BATADV_TQ_GLOBAL_WINDOW_SIZE 5
@@ -59,7 +60,7 @@
 #define BATADV_TT_OGM_APPEND_MAX 3
 
 /* Time in which a client can roam at most ROAMING_MAX_COUNT times in
- * miliseconds
+ * milliseconds
  */
 #define BATADV_ROAMING_MAX_TIME 20000
 #define BATADV_ROAMING_MAX_COUNT 5
@@ -123,15 +124,6 @@ enum batadv_uev_type {
 /* Append 'batman-adv: ' before kernel messages */
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-/* all messages related to routing / flooding / broadcasting / etc */
-enum batadv_dbg_level {
-       BATADV_DBG_BATMAN = 1 << 0,
-       BATADV_DBG_ROUTES = 1 << 1, /* route added / changed / deleted */
-       BATADV_DBG_TT     = 1 << 2, /* translation table operations */
-       BATADV_DBG_BLA    = 1 << 3, /* bridge loop avoidance */
-       BATADV_DBG_ALL    = 15,
-};
-
 /* Kernel headers */
 
 #include <linux/mutex.h>       /* mutex */
@@ -173,6 +165,15 @@ int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops);
 int batadv_algo_select(struct batadv_priv *bat_priv, char *name);
 int batadv_algo_seq_print_text(struct seq_file *seq, void *offset);
 
+/* all messages related to routing / flooding / broadcasting / etc */
+enum batadv_dbg_level {
+       BATADV_DBG_BATMAN = BIT(0),
+       BATADV_DBG_ROUTES = BIT(1), /* route added / changed / deleted */
+       BATADV_DBG_TT     = BIT(2), /* translation table operations */
+       BATADV_DBG_BLA    = BIT(3), /* bridge loop avoidance */
+       BATADV_DBG_ALL    = 15,
+};
+
 #ifdef CONFIG_BATMAN_ADV_DEBUG
 int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
 __printf(2, 3);
index 8d3e55a96adc8cc1a71d9994e90af4ddd81c50fc..2d23a14c220eb281a839c58650fb9a70802bb415 100644 (file)
@@ -37,10 +37,10 @@ enum batadv_packettype {
 #define BATADV_COMPAT_VERSION 14
 
 enum batadv_iv_flags {
-       BATADV_NOT_BEST_NEXT_HOP   = 1 << 3,
-       BATADV_PRIMARIES_FIRST_HOP = 1 << 4,
-       BATADV_VIS_SERVER          = 1 << 5,
-       BATADV_DIRECTLINK          = 1 << 6,
+       BATADV_NOT_BEST_NEXT_HOP   = BIT(3),
+       BATADV_PRIMARIES_FIRST_HOP = BIT(4),
+       BATADV_VIS_SERVER          = BIT(5),
+       BATADV_DIRECTLINK          = BIT(6),
 };
 
 /* ICMP message types */
@@ -60,8 +60,8 @@ enum batadv_vis_packettype {
 
 /* fragmentation defines */
 enum batadv_unicast_frag_flags {
-       BATADV_UNI_FRAG_HEAD      = 1 << 0,
-       BATADV_UNI_FRAG_LARGETAIL = 1 << 1,
+       BATADV_UNI_FRAG_HEAD      = BIT(0),
+       BATADV_UNI_FRAG_LARGETAIL = BIT(1),
 };
 
 /* TT_QUERY subtypes */
@@ -74,26 +74,27 @@ enum batadv_tt_query_packettype {
 
 /* TT_QUERY flags */
 enum batadv_tt_query_flags {
-       BATADV_TT_FULL_TABLE = 1 << 2,
+       BATADV_TT_FULL_TABLE = BIT(2),
 };
 
 /* BATADV_TT_CLIENT flags.
- * Flags from 1 to 1 << 7 are sent on the wire, while flags from 1 << 8 to
- * 1 << 15 are used for local computation only
+ * Flags from BIT(0) to BIT(7) are sent on the wire, while flags from BIT(8) to
+ * BIT(15) are used for local computation only
  */
 enum batadv_tt_client_flags {
-       BATADV_TT_CLIENT_DEL     = 1 << 0,
-       BATADV_TT_CLIENT_ROAM    = 1 << 1,
-       BATADV_TT_CLIENT_WIFI    = 1 << 2,
-       BATADV_TT_CLIENT_NOPURGE = 1 << 8,
-       BATADV_TT_CLIENT_NEW     = 1 << 9,
-       BATADV_TT_CLIENT_PENDING = 1 << 10,
+       BATADV_TT_CLIENT_DEL     = BIT(0),
+       BATADV_TT_CLIENT_ROAM    = BIT(1),
+       BATADV_TT_CLIENT_WIFI    = BIT(2),
+       BATADV_TT_CLIENT_TEMP    = BIT(3),
+       BATADV_TT_CLIENT_NOPURGE = BIT(8),
+       BATADV_TT_CLIENT_NEW     = BIT(9),
+       BATADV_TT_CLIENT_PENDING = BIT(10),
 };
 
 /* claim frame types for the bridge loop avoidance */
 enum batadv_bla_claimframe {
-       BATADV_CLAIM_TYPE_ADD           = 0x00,
-       BATADV_CLAIM_TYPE_DEL           = 0x01,
+       BATADV_CLAIM_TYPE_CLAIM         = 0x00,
+       BATADV_CLAIM_TYPE_UNCLAIM       = 0x01,
        BATADV_CLAIM_TYPE_ANNOUNCE      = 0x02,
        BATADV_CLAIM_TYPE_REQUEST       = 0x03,
 };
index bc2b88bbea1fb5561a9ac2c199eb6926f3af2347..939fc01371dff0c209665b297b69ec6502887b1a 100644 (file)
@@ -579,32 +579,45 @@ batadv_find_ifalter_router(struct batadv_orig_node *primary_orig,
        return router;
 }
 
-int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
+static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size)
 {
-       struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
-       struct batadv_tt_query_packet *tt_query;
-       uint16_t tt_size;
        struct ethhdr *ethhdr;
-       char tt_flag;
-       size_t packet_size;
 
        /* drop packet if it has not necessary minimum size */
-       if (unlikely(!pskb_may_pull(skb,
-                                   sizeof(struct batadv_tt_query_packet))))
-               goto out;
-
-       /* I could need to modify it */
-       if (skb_cow(skb, sizeof(struct batadv_tt_query_packet)) < 0)
-               goto out;
+       if (unlikely(!pskb_may_pull(skb, hdr_size)))
+               return -1;
 
        ethhdr = (struct ethhdr *)skb_mac_header(skb);
 
        /* packet with unicast indication but broadcast recipient */
        if (is_broadcast_ether_addr(ethhdr->h_dest))
-               goto out;
+               return -1;
 
        /* packet with broadcast sender address */
        if (is_broadcast_ether_addr(ethhdr->h_source))
+               return -1;
+
+       /* not for me */
+       if (!batadv_is_my_mac(ethhdr->h_dest))
+               return -1;
+
+       return 0;
+}
+
+int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
+{
+       struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+       struct batadv_tt_query_packet *tt_query;
+       uint16_t tt_size;
+       int hdr_size = sizeof(*tt_query);
+       char tt_flag;
+       size_t packet_size;
+
+       if (batadv_check_unicast_packet(skb, hdr_size) < 0)
+               return NET_RX_DROP;
+
+       /* I could need to modify it */
+       if (skb_cow(skb, sizeof(struct batadv_tt_query_packet)) < 0)
                goto out;
 
        tt_query = (struct batadv_tt_query_packet *)skb->data;
@@ -721,7 +734,7 @@ int batadv_recv_roam_adv(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
         * been incremented yet. This flag will make me check all the incoming
         * packets for the correct destination.
         */
-       bat_priv->tt_poss_change = true;
+       bat_priv->tt.poss_change = true;
 
        batadv_orig_node_free_ref(orig_node);
 out:
@@ -819,31 +832,6 @@ err:
        return NULL;
 }
 
-static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size)
-{
-       struct ethhdr *ethhdr;
-
-       /* drop packet if it has not necessary minimum size */
-       if (unlikely(!pskb_may_pull(skb, hdr_size)))
-               return -1;
-
-       ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
-       /* packet with unicast indication but broadcast recipient */
-       if (is_broadcast_ether_addr(ethhdr->h_dest))
-               return -1;
-
-       /* packet with broadcast sender address */
-       if (is_broadcast_ether_addr(ethhdr->h_source))
-               return -1;
-
-       /* not for me */
-       if (!batadv_is_my_mac(ethhdr->h_dest))
-               return -1;
-
-       return 0;
-}
-
 static int batadv_route_unicast_packet(struct sk_buff *skb,
                                       struct batadv_hard_iface *recv_if)
 {
@@ -947,8 +935,8 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
        unicast_packet = (struct batadv_unicast_packet *)skb->data;
 
        if (batadv_is_my_mac(unicast_packet->dest)) {
-               tt_poss_change = bat_priv->tt_poss_change;
-               curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
+               tt_poss_change = bat_priv->tt.poss_change;
+               curr_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
        } else {
                orig_node = batadv_orig_hash_find(bat_priv,
                                                  unicast_packet->dest);
@@ -993,8 +981,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
                } else {
                        memcpy(unicast_packet->dest, orig_node->orig,
                               ETH_ALEN);
-                       curr_ttvn = (uint8_t)
-                               atomic_read(&orig_node->last_ttvn);
+                       curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
                        batadv_orig_node_free_ref(orig_node);
                }
 
@@ -1025,8 +1012,9 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
 
        /* packet for me */
        if (batadv_is_my_mac(unicast_packet->dest)) {
-               batadv_interface_rx(recv_if->soft_iface, skb, recv_if,
-                                   hdr_size);
+               batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size,
+                                   NULL);
+
                return NET_RX_SUCCESS;
        }
 
@@ -1063,7 +1051,7 @@ int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
                        return NET_RX_SUCCESS;
 
                batadv_interface_rx(recv_if->soft_iface, new_skb, recv_if,
-                                   sizeof(struct batadv_unicast_packet));
+                                   sizeof(struct batadv_unicast_packet), NULL);
                return NET_RX_SUCCESS;
        }
 
@@ -1150,7 +1138,8 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,
                goto out;
 
        /* broadcast for me */
-       batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
+       batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size,
+                           orig_node);
        ret = NET_RX_SUCCESS;
        goto out;
 
index 3b4b2daa3b3e1b40315a2afa657b13d67517ebb0..570a8bce0364ea08ea45341b57b362a72beedc45 100644 (file)
@@ -190,13 +190,13 @@ out:
 static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
 {
        struct batadv_hard_iface *hard_iface;
-       struct delayed_work *delayed_work =
-               container_of(work, struct delayed_work, work);
+       struct delayed_work *delayed_work;
        struct batadv_forw_packet *forw_packet;
        struct sk_buff *skb1;
        struct net_device *soft_iface;
        struct batadv_priv *bat_priv;
 
+       delayed_work = container_of(work, struct delayed_work, work);
        forw_packet = container_of(delayed_work, struct batadv_forw_packet,
                                   delayed_work);
        soft_iface = forw_packet->if_incoming->soft_iface;
@@ -239,11 +239,11 @@ out:
 
 void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
 {
-       struct delayed_work *delayed_work =
-               container_of(work, struct delayed_work, work);
+       struct delayed_work *delayed_work;
        struct batadv_forw_packet *forw_packet;
        struct batadv_priv *bat_priv;
 
+       delayed_work = container_of(work, struct delayed_work, work);
        forw_packet = container_of(delayed_work, struct batadv_forw_packet,
                                   delayed_work);
        bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
index 21c53577c8d6a5e65599aa5c01d8d9d93c6fb838..b9a28d2dd3e8d907526ebd51dfedba9e993fd526 100644 (file)
@@ -93,7 +93,14 @@ static int batadv_interface_release(struct net_device *dev)
 static struct net_device_stats *batadv_interface_stats(struct net_device *dev)
 {
        struct batadv_priv *bat_priv = netdev_priv(dev);
-       return &bat_priv->stats;
+       struct net_device_stats *stats = &bat_priv->stats;
+
+       stats->tx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_TX);
+       stats->tx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_TX_BYTES);
+       stats->tx_dropped = batadv_sum_counter(bat_priv, BATADV_CNT_TX_DROPPED);
+       stats->rx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_RX);
+       stats->rx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_RX_BYTES);
+       return stats;
 }
 
 static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
@@ -145,6 +152,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
        int data_len = skb->len, ret;
        short vid __maybe_unused = -1;
        bool do_bcast = false;
+       uint32_t seqno;
 
        if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
                goto dropped;
@@ -226,8 +234,8 @@ static int batadv_interface_tx(struct sk_buff *skb,
                       primary_if->net_dev->dev_addr, ETH_ALEN);
 
                /* set broadcast sequence number */
-               bcast_packet->seqno =
-                       htonl(atomic_inc_return(&bat_priv->bcast_seqno));
+               seqno = atomic_inc_return(&bat_priv->bcast_seqno);
+               bcast_packet->seqno = htonl(seqno);
 
                batadv_add_bcast_packet_to_list(bat_priv, skb, 1);
 
@@ -249,14 +257,14 @@ static int batadv_interface_tx(struct sk_buff *skb,
                        goto dropped_freed;
        }
 
-       bat_priv->stats.tx_packets++;
-       bat_priv->stats.tx_bytes += data_len;
+       batadv_inc_counter(bat_priv, BATADV_CNT_TX);
+       batadv_add_counter(bat_priv, BATADV_CNT_TX_BYTES, data_len);
        goto end;
 
 dropped:
        kfree_skb(skb);
 dropped_freed:
-       bat_priv->stats.tx_dropped++;
+       batadv_inc_counter(bat_priv, BATADV_CNT_TX_DROPPED);
 end:
        if (primary_if)
                batadv_hardif_free_ref(primary_if);
@@ -265,7 +273,7 @@ end:
 
 void batadv_interface_rx(struct net_device *soft_iface,
                         struct sk_buff *skb, struct batadv_hard_iface *recv_if,
-                        int hdr_size)
+                        int hdr_size, struct batadv_orig_node *orig_node)
 {
        struct batadv_priv *bat_priv = netdev_priv(soft_iface);
        struct ethhdr *ethhdr;
@@ -311,11 +319,16 @@ void batadv_interface_rx(struct net_device *soft_iface,
 
        /* skb->ip_summed = CHECKSUM_UNNECESSARY; */
 
-       bat_priv->stats.rx_packets++;
-       bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
+       batadv_inc_counter(bat_priv, BATADV_CNT_RX);
+       batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
+                          skb->len + ETH_HLEN);
 
        soft_iface->last_rx = jiffies;
 
+       if (orig_node)
+               batadv_tt_add_temporary_global_entry(bat_priv, orig_node,
+                                                    ethhdr->h_source);
+
        if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
                goto dropped;
 
@@ -382,15 +395,22 @@ struct net_device *batadv_softif_create(const char *name)
        if (!soft_iface)
                goto out;
 
+       bat_priv = netdev_priv(soft_iface);
+
+       /* batadv_interface_stats() needs to be available as soon as
+        * register_netdevice() has been called
+        */
+       bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t));
+       if (!bat_priv->bat_counters)
+               goto free_soft_iface;
+
        ret = register_netdevice(soft_iface);
        if (ret < 0) {
                pr_err("Unable to register the batman interface '%s': %i\n",
                       name, ret);
-               goto free_soft_iface;
+               goto free_bat_counters;
        }
 
-       bat_priv = netdev_priv(soft_iface);
-
        atomic_set(&bat_priv->aggregated_ogms, 1);
        atomic_set(&bat_priv->bonding, 0);
        atomic_set(&bat_priv->bridge_loop_avoidance, 0);
@@ -408,29 +428,26 @@ struct net_device *batadv_softif_create(const char *name)
 
        atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
        atomic_set(&bat_priv->bcast_seqno, 1);
-       atomic_set(&bat_priv->ttvn, 0);
-       atomic_set(&bat_priv->tt_local_changes, 0);
-       atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
-       atomic_set(&bat_priv->bla_num_requests, 0);
-
-       bat_priv->tt_buff = NULL;
-       bat_priv->tt_buff_len = 0;
-       bat_priv->tt_poss_change = false;
+       atomic_set(&bat_priv->tt.vn, 0);
+       atomic_set(&bat_priv->tt.local_changes, 0);
+       atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
+#ifdef CONFIG_BATMAN_ADV_BLA
+       atomic_set(&bat_priv->bla.num_requests, 0);
+#endif
+       bat_priv->tt.last_changeset = NULL;
+       bat_priv->tt.last_changeset_len = 0;
+       bat_priv->tt.poss_change = false;
 
        bat_priv->primary_if = NULL;
        bat_priv->num_ifaces = 0;
 
-       bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t));
-       if (!bat_priv->bat_counters)
-               goto unreg_soft_iface;
-
        ret = batadv_algo_select(bat_priv, batadv_routing_algo);
        if (ret < 0)
-               goto free_bat_counters;
+               goto unreg_soft_iface;
 
        ret = batadv_sysfs_add_meshif(soft_iface);
        if (ret < 0)
-               goto free_bat_counters;
+               goto unreg_soft_iface;
 
        ret = batadv_debugfs_add_meshif(soft_iface);
        if (ret < 0)
@@ -446,12 +463,13 @@ unreg_debugfs:
        batadv_debugfs_del_meshif(soft_iface);
 unreg_sysfs:
        batadv_sysfs_del_meshif(soft_iface);
-free_bat_counters:
-       free_percpu(bat_priv->bat_counters);
 unreg_soft_iface:
+       free_percpu(bat_priv->bat_counters);
        unregister_netdevice(soft_iface);
        return NULL;
 
+free_bat_counters:
+       free_percpu(bat_priv->bat_counters);
 free_soft_iface:
        free_netdev(soft_iface);
 out:
@@ -521,6 +539,11 @@ static u32 batadv_get_link(struct net_device *dev)
 static const struct {
        const char name[ETH_GSTRING_LEN];
 } batadv_counters_strings[] = {
+       { "tx" },
+       { "tx_bytes" },
+       { "tx_dropped" },
+       { "rx" },
+       { "rx_bytes" },
        { "forward" },
        { "forward_bytes" },
        { "mgmt_tx" },
index 852c683b06a187a1eecaa7c68ad2430f747c34f3..07a08fed28b97ae2739e8a7cb36040f75345a951 100644 (file)
@@ -21,8 +21,9 @@
 #define _NET_BATMAN_ADV_SOFT_INTERFACE_H_
 
 int batadv_skb_head_push(struct sk_buff *skb, unsigned int len);
-void batadv_interface_rx(struct net_device *soft_iface, struct sk_buff *skb,
-                        struct batadv_hard_iface *recv_if, int hdr_size);
+void batadv_interface_rx(struct net_device *soft_iface,
+                        struct sk_buff *skb, struct batadv_hard_iface *recv_if,
+                        int hdr_size, struct batadv_orig_node *orig_node);
 struct net_device *batadv_softif_create(const char *name);
 void batadv_softif_destroy(struct net_device *soft_iface);
 int batadv_softif_is_valid(const struct net_device *net_dev);
index 99dd8f75b3ff20f0d2a277e1f65ffc284b002f2e..112edd371b2f81c79f431c6e99402165405c1b10 100644 (file)
@@ -34,6 +34,10 @@ static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
 static void batadv_tt_purge(struct work_struct *work);
 static void
 batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry);
+static void batadv_tt_global_del(struct batadv_priv *bat_priv,
+                                struct batadv_orig_node *orig_node,
+                                const unsigned char *addr,
+                                const char *message, bool roaming);
 
 /* returns 1 if they are the same mac addr */
 static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
@@ -46,8 +50,8 @@ static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
 
 static void batadv_tt_start_timer(struct batadv_priv *bat_priv)
 {
-       INIT_DELAYED_WORK(&bat_priv->tt_work, batadv_tt_purge);
-       queue_delayed_work(batadv_event_workqueue, &bat_priv->tt_work,
+       INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge);
+       queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
                           msecs_to_jiffies(5000));
 }
 
@@ -88,7 +92,7 @@ batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const void *data)
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_local_entry *tt_local_entry = NULL;
 
-       tt_common_entry = batadv_tt_hash_find(bat_priv->tt_local_hash, data);
+       tt_common_entry = batadv_tt_hash_find(bat_priv->tt.local_hash, data);
        if (tt_common_entry)
                tt_local_entry = container_of(tt_common_entry,
                                              struct batadv_tt_local_entry,
@@ -102,7 +106,7 @@ batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const void *data)
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_global_entry *tt_global_entry = NULL;
 
-       tt_common_entry = batadv_tt_hash_find(bat_priv->tt_global_hash, data);
+       tt_common_entry = batadv_tt_hash_find(bat_priv->tt.global_hash, data);
        if (tt_common_entry)
                tt_global_entry = container_of(tt_common_entry,
                                               struct batadv_tt_global_entry,
@@ -152,6 +156,8 @@ static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
 static void
 batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry)
 {
+       if (!atomic_dec_and_test(&orig_entry->refcount))
+               return;
        /* to avoid race conditions, immediately decrease the tt counter */
        atomic_dec(&orig_entry->orig_node->tt_size);
        call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
@@ -175,8 +181,8 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
        del_op_requested = flags & BATADV_TT_CLIENT_DEL;
 
        /* check for ADD+DEL or DEL+ADD events */
-       spin_lock_bh(&bat_priv->tt_changes_list_lock);
-       list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
+       spin_lock_bh(&bat_priv->tt.changes_list_lock);
+       list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
                                 list) {
                if (!batadv_compare_eth(entry->change.addr, addr))
                        continue;
@@ -203,15 +209,15 @@ del:
        }
 
        /* track the change in the OGMinterval list */
-       list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list);
+       list_add_tail(&tt_change_node->list, &bat_priv->tt.changes_list);
 
 unlock:
-       spin_unlock_bh(&bat_priv->tt_changes_list_lock);
+       spin_unlock_bh(&bat_priv->tt.changes_list_lock);
 
        if (event_removed)
-               atomic_dec(&bat_priv->tt_local_changes);
+               atomic_dec(&bat_priv->tt.local_changes);
        else
-               atomic_inc(&bat_priv->tt_local_changes);
+               atomic_inc(&bat_priv->tt.local_changes);
 }
 
 int batadv_tt_len(int changes_num)
@@ -221,12 +227,12 @@ int batadv_tt_len(int changes_num)
 
 static int batadv_tt_local_init(struct batadv_priv *bat_priv)
 {
-       if (bat_priv->tt_local_hash)
+       if (bat_priv->tt.local_hash)
                return 0;
 
-       bat_priv->tt_local_hash = batadv_hash_new(1024);
+       bat_priv->tt.local_hash = batadv_hash_new(1024);
 
-       if (!bat_priv->tt_local_hash)
+       if (!bat_priv->tt.local_hash)
                return -ENOMEM;
 
        return 0;
@@ -258,7 +264,7 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
-                  (uint8_t)atomic_read(&bat_priv->ttvn));
+                  (uint8_t)atomic_read(&bat_priv->tt.vn));
 
        memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
        tt_local_entry->common.flags = BATADV_NO_FLAGS;
@@ -266,6 +272,7 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
                tt_local_entry->common.flags |= BATADV_TT_CLIENT_WIFI;
        atomic_set(&tt_local_entry->common.refcount, 2);
        tt_local_entry->last_seen = jiffies;
+       tt_local_entry->common.added_at = tt_local_entry->last_seen;
 
        /* the batman interface mac address should never be purged */
        if (batadv_compare_eth(addr, soft_iface->dev_addr))
@@ -277,7 +284,7 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
         */
        tt_local_entry->common.flags |= BATADV_TT_CLIENT_NEW;
 
-       hash_added = batadv_hash_add(bat_priv->tt_local_hash, batadv_compare_tt,
+       hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt,
                                     batadv_choose_orig,
                                     &tt_local_entry->common,
                                     &tt_local_entry->common.hash_entry);
@@ -348,7 +355,7 @@ static void batadv_tt_prepare_packet_buff(struct batadv_priv *bat_priv,
        primary_if = batadv_primary_if_get_selected(bat_priv);
 
        req_len = min_packet_len;
-       req_len += batadv_tt_len(atomic_read(&bat_priv->tt_local_changes));
+       req_len += batadv_tt_len(atomic_read(&bat_priv->tt.local_changes));
 
        /* if we have too many changes for one packet don't send any
         * and wait for the tt table request which will be fragmented
@@ -381,10 +388,10 @@ static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
        if (new_len > 0)
                tot_changes = new_len / batadv_tt_len(1);
 
-       spin_lock_bh(&bat_priv->tt_changes_list_lock);
-       atomic_set(&bat_priv->tt_local_changes, 0);
+       spin_lock_bh(&bat_priv->tt.changes_list_lock);
+       atomic_set(&bat_priv->tt.local_changes, 0);
 
-       list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
+       list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
                                 list) {
                if (count < tot_changes) {
                        memcpy(tt_buff + batadv_tt_len(count),
@@ -394,25 +401,25 @@ static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
                list_del(&entry->list);
                kfree(entry);
        }
-       spin_unlock_bh(&bat_priv->tt_changes_list_lock);
+       spin_unlock_bh(&bat_priv->tt.changes_list_lock);
 
        /* Keep the buffer for possible tt_request */
-       spin_lock_bh(&bat_priv->tt_buff_lock);
-       kfree(bat_priv->tt_buff);
-       bat_priv->tt_buff_len = 0;
-       bat_priv->tt_buff = NULL;
+       spin_lock_bh(&bat_priv->tt.last_changeset_lock);
+       kfree(bat_priv->tt.last_changeset);
+       bat_priv->tt.last_changeset_len = 0;
+       bat_priv->tt.last_changeset = NULL;
        /* check whether this new OGM has no changes due to size problems */
        if (new_len > 0) {
                /* if kmalloc() fails we will reply with the full table
                 * instead of providing the diff
                 */
-               bat_priv->tt_buff = kmalloc(new_len, GFP_ATOMIC);
-               if (bat_priv->tt_buff) {
-                       memcpy(bat_priv->tt_buff, tt_buff, new_len);
-                       bat_priv->tt_buff_len = new_len;
+               bat_priv->tt.last_changeset = kmalloc(new_len, GFP_ATOMIC);
+               if (bat_priv->tt.last_changeset) {
+                       memcpy(bat_priv->tt.last_changeset, tt_buff, new_len);
+                       bat_priv->tt.last_changeset_len = new_len;
                }
        }
-       spin_unlock_bh(&bat_priv->tt_buff_lock);
+       spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
 
        return count;
 }
@@ -421,7 +428,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
 {
        struct net_device *net_dev = (struct net_device *)seq->private;
        struct batadv_priv *bat_priv = netdev_priv(net_dev);
-       struct batadv_hashtable *hash = bat_priv->tt_local_hash;
+       struct batadv_hashtable *hash = bat_priv->tt.local_hash;
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_hard_iface *primary_if;
        struct hlist_node *node;
@@ -446,7 +453,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
 
        seq_printf(seq,
                   "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
-                  net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
+                  net_dev->name, (uint8_t)atomic_read(&bat_priv->tt.vn));
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
@@ -544,7 +551,7 @@ static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
 
 static void batadv_tt_local_purge(struct batadv_priv *bat_priv)
 {
-       struct batadv_hashtable *hash = bat_priv->tt_local_hash;
+       struct batadv_hashtable *hash = bat_priv->tt.local_hash;
        struct hlist_head *head;
        spinlock_t *list_lock; /* protects write access to the hash lists */
        uint32_t i;
@@ -570,10 +577,10 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
        struct hlist_head *head;
        uint32_t i;
 
-       if (!bat_priv->tt_local_hash)
+       if (!bat_priv->tt.local_hash)
                return;
 
-       hash = bat_priv->tt_local_hash;
+       hash = bat_priv->tt.local_hash;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
@@ -593,17 +600,17 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
 
        batadv_hash_destroy(hash);
 
-       bat_priv->tt_local_hash = NULL;
+       bat_priv->tt.local_hash = NULL;
 }
 
 static int batadv_tt_global_init(struct batadv_priv *bat_priv)
 {
-       if (bat_priv->tt_global_hash)
+       if (bat_priv->tt.global_hash)
                return 0;
 
-       bat_priv->tt_global_hash = batadv_hash_new(1024);
+       bat_priv->tt.global_hash = batadv_hash_new(1024);
 
-       if (!bat_priv->tt_global_hash)
+       if (!bat_priv->tt.global_hash)
                return -ENOMEM;
 
        return 0;
@@ -613,62 +620,99 @@ static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv)
 {
        struct batadv_tt_change_node *entry, *safe;
 
-       spin_lock_bh(&bat_priv->tt_changes_list_lock);
+       spin_lock_bh(&bat_priv->tt.changes_list_lock);
 
-       list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
+       list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
                                 list) {
                list_del(&entry->list);
                kfree(entry);
        }
 
-       atomic_set(&bat_priv->tt_local_changes, 0);
-       spin_unlock_bh(&bat_priv->tt_changes_list_lock);
+       atomic_set(&bat_priv->tt.local_changes, 0);
+       spin_unlock_bh(&bat_priv->tt.changes_list_lock);
 }
 
-/* find out if an orig_node is already in the list of a tt_global_entry.
- * returns 1 if found, 0 otherwise
+/* retrieves the orig_tt_list_entry belonging to orig_node from the
+ * batadv_tt_global_entry list
+ *
+ * returns it with an increased refcounter, NULL if not found
  */
-static bool
-batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
-                               const struct batadv_orig_node *orig_node)
+static struct batadv_tt_orig_list_entry *
+batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
+                                const struct batadv_orig_node *orig_node)
 {
-       struct batadv_tt_orig_list_entry *tmp_orig_entry;
+       struct batadv_tt_orig_list_entry *tmp_orig_entry, *orig_entry = NULL;
        const struct hlist_head *head;
        struct hlist_node *node;
-       bool found = false;
 
        rcu_read_lock();
        head = &entry->orig_list;
        hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) {
-               if (tmp_orig_entry->orig_node == orig_node) {
-                       found = true;
-                       break;
-               }
+               if (tmp_orig_entry->orig_node != orig_node)
+                       continue;
+               if (!atomic_inc_not_zero(&tmp_orig_entry->refcount))
+                       continue;
+
+               orig_entry = tmp_orig_entry;
+               break;
        }
        rcu_read_unlock();
+
+       return orig_entry;
+}
+
+/* find out if an orig_node is already in the list of a tt_global_entry.
+ * returns true if found, false otherwise
+ */
+static bool
+batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
+                               const struct batadv_orig_node *orig_node)
+{
+       struct batadv_tt_orig_list_entry *orig_entry;
+       bool found = false;
+
+       orig_entry = batadv_tt_global_orig_entry_find(entry, orig_node);
+       if (orig_entry) {
+               found = true;
+               batadv_tt_orig_list_entry_free_ref(orig_entry);
+       }
+
        return found;
 }
 
 static void
-batadv_tt_global_add_orig_entry(struct batadv_tt_global_entry *tt_global_entry,
+batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
                                struct batadv_orig_node *orig_node, int ttvn)
 {
        struct batadv_tt_orig_list_entry *orig_entry;
 
+       orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node);
+       if (orig_entry) {
+               /* refresh the ttvn: the current value could be a bogus one that
+                * was added during a "temporary client detection"
+                */
+               orig_entry->ttvn = ttvn;
+               goto out;
+       }
+
        orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
        if (!orig_entry)
-               return;
+               goto out;
 
        INIT_HLIST_NODE(&orig_entry->list);
        atomic_inc(&orig_node->refcount);
        atomic_inc(&orig_node->tt_size);
        orig_entry->orig_node = orig_node;
        orig_entry->ttvn = ttvn;
+       atomic_set(&orig_entry->refcount, 2);
 
-       spin_lock_bh(&tt_global_entry->list_lock);
+       spin_lock_bh(&tt_global->list_lock);
        hlist_add_head_rcu(&orig_entry->list,
-                          &tt_global_entry->orig_list);
-       spin_unlock_bh(&tt_global_entry->list_lock);
+                          &tt_global->orig_list);
+       spin_unlock_bh(&tt_global->list_lock);
+out:
+       if (orig_entry)
+               batadv_tt_orig_list_entry_free_ref(orig_entry);
 }
 
 /* caller must hold orig_node refcount */
@@ -695,11 +739,12 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
                common->flags = flags;
                tt_global_entry->roam_at = 0;
                atomic_set(&common->refcount, 2);
+               common->added_at = jiffies;
 
                INIT_HLIST_HEAD(&tt_global_entry->orig_list);
                spin_lock_init(&tt_global_entry->list_lock);
 
-               hash_added = batadv_hash_add(bat_priv->tt_global_hash,
+               hash_added = batadv_hash_add(bat_priv->tt.global_hash,
                                             batadv_compare_tt,
                                             batadv_choose_orig, common,
                                             &common->hash_entry);
@@ -709,11 +754,20 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
                        batadv_tt_global_entry_free_ref(tt_global_entry);
                        goto out_remove;
                }
-
-               batadv_tt_global_add_orig_entry(tt_global_entry, orig_node,
-                                               ttvn);
        } else {
-               /* there is already a global entry, use this one. */
+               /* If there is already a global entry, we can use this one for
+                * our processing.
+                * But if we are trying to add a temporary client we can exit
+                * directly because the temporary information should never
+                * override any already known client state (whatever it is)
+                */
+               if (flags & BATADV_TT_CLIENT_TEMP)
+                       goto out;
+
+               /* if the client was temporary added before receiving the first
+                * OGM announcing it, we have to clear the TEMP flag
+                */
+               tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_TEMP;
 
                /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
                 * one originator left in the list and we previously received a
@@ -727,12 +781,9 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
                        tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_ROAM;
                        tt_global_entry->roam_at = 0;
                }
-
-               if (!batadv_tt_global_entry_has_orig(tt_global_entry,
-                                                    orig_node))
-                       batadv_tt_global_add_orig_entry(tt_global_entry,
-                                                       orig_node, ttvn);
        }
+       /* add the new orig_entry (if needed) or update it */
+       batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn);
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Creating new global tt entry: %pM (via %pM)\n",
@@ -771,11 +822,12 @@ batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
        hlist_for_each_entry_rcu(orig_entry, node, head, list) {
                flags = tt_common_entry->flags;
                last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
-               seq_printf(seq, " * %pM  (%3u) via %pM     (%3u)   [%c%c]\n",
+               seq_printf(seq, " * %pM  (%3u) via %pM     (%3u)   [%c%c%c]\n",
                           tt_global_entry->common.addr, orig_entry->ttvn,
                           orig_entry->orig_node->orig, last_ttvn,
                           (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
-                          (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'));
+                          (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
+                          (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.'));
        }
 }
 
@@ -783,7 +835,7 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
 {
        struct net_device *net_dev = (struct net_device *)seq->private;
        struct batadv_priv *bat_priv = netdev_priv(net_dev);
-       struct batadv_hashtable *hash = bat_priv->tt_global_hash;
+       struct batadv_hashtable *hash = bat_priv->tt.global_hash;
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_global_entry *tt_global;
        struct batadv_hard_iface *primary_if;
@@ -884,7 +936,7 @@ batadv_tt_global_del_struct(struct batadv_priv *bat_priv,
                   "Deleting global tt entry %pM: %s\n",
                   tt_global_entry->common.addr, message);
 
-       batadv_hash_remove(bat_priv->tt_global_hash, batadv_compare_tt,
+       batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
                           batadv_choose_orig, tt_global_entry->common.addr);
        batadv_tt_global_entry_free_ref(tt_global_entry);
 
@@ -995,7 +1047,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
        struct batadv_tt_global_entry *tt_global;
        struct batadv_tt_common_entry *tt_common_entry;
        uint32_t i;
-       struct batadv_hashtable *hash = bat_priv->tt_global_hash;
+       struct batadv_hashtable *hash = bat_priv->tt.global_hash;
        struct hlist_node *node, *safe;
        struct hlist_head *head;
        spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -1030,49 +1082,63 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
        orig_node->tt_initialised = false;
 }
 
-static void batadv_tt_global_roam_purge_list(struct batadv_priv *bat_priv,
-                                            struct hlist_head *head)
+static bool batadv_tt_global_to_purge(struct batadv_tt_global_entry *tt_global,
+                                     char **msg)
 {
-       struct batadv_tt_common_entry *tt_common_entry;
-       struct batadv_tt_global_entry *tt_global_entry;
-       struct hlist_node *node, *node_tmp;
-
-       hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head,
-                                 hash_entry) {
-               tt_global_entry = container_of(tt_common_entry,
-                                              struct batadv_tt_global_entry,
-                                              common);
-               if (!(tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM))
-                       continue;
-               if (!batadv_has_timed_out(tt_global_entry->roam_at,
-                                         BATADV_TT_CLIENT_ROAM_TIMEOUT))
-                       continue;
+       bool purge = false;
+       unsigned long roam_timeout = BATADV_TT_CLIENT_ROAM_TIMEOUT;
+       unsigned long temp_timeout = BATADV_TT_CLIENT_TEMP_TIMEOUT;
 
-               batadv_dbg(BATADV_DBG_TT, bat_priv,
-                          "Deleting global tt entry (%pM): Roaming timeout\n",
-                          tt_global_entry->common.addr);
+       if ((tt_global->common.flags & BATADV_TT_CLIENT_ROAM) &&
+           batadv_has_timed_out(tt_global->roam_at, roam_timeout)) {
+               purge = true;
+               *msg = "Roaming timeout\n";
+       }
 
-               hlist_del_rcu(node);
-               batadv_tt_global_entry_free_ref(tt_global_entry);
+       if ((tt_global->common.flags & BATADV_TT_CLIENT_TEMP) &&
+           batadv_has_timed_out(tt_global->common.added_at, temp_timeout)) {
+               purge = true;
+               *msg = "Temporary client timeout\n";
        }
+
+       return purge;
 }
 
-static void batadv_tt_global_roam_purge(struct batadv_priv *bat_priv)
+static void batadv_tt_global_purge(struct batadv_priv *bat_priv)
 {
-       struct batadv_hashtable *hash = bat_priv->tt_global_hash;
+       struct batadv_hashtable *hash = bat_priv->tt.global_hash;
        struct hlist_head *head;
+       struct hlist_node *node, *node_tmp;
        spinlock_t *list_lock; /* protects write access to the hash lists */
        uint32_t i;
+       char *msg = NULL;
+       struct batadv_tt_common_entry *tt_common;
+       struct batadv_tt_global_entry *tt_global;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               batadv_tt_global_roam_purge_list(bat_priv, head);
+               hlist_for_each_entry_safe(tt_common, node, node_tmp, head,
+                                         hash_entry) {
+                       tt_global = container_of(tt_common,
+                                                struct batadv_tt_global_entry,
+                                                common);
+
+                       if (!batadv_tt_global_to_purge(tt_global, &msg))
+                               continue;
+
+                       batadv_dbg(BATADV_DBG_TT, bat_priv,
+                                  "Deleting global tt entry (%pM): %s\n",
+                                  tt_global->common.addr, msg);
+
+                       hlist_del_rcu(node);
+
+                       batadv_tt_global_entry_free_ref(tt_global);
+               }
                spin_unlock_bh(list_lock);
        }
-
 }
 
 static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
@@ -1085,10 +1151,10 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
        struct hlist_head *head;
        uint32_t i;
 
-       if (!bat_priv->tt_global_hash)
+       if (!bat_priv->tt.global_hash)
                return;
 
-       hash = bat_priv->tt_global_hash;
+       hash = bat_priv->tt.global_hash;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
@@ -1108,7 +1174,7 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
 
        batadv_hash_destroy(hash);
 
-       bat_priv->tt_global_hash = NULL;
+       bat_priv->tt.global_hash = NULL;
 }
 
 static bool
@@ -1187,7 +1253,7 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
                                     struct batadv_orig_node *orig_node)
 {
        uint16_t total = 0, total_one;
-       struct batadv_hashtable *hash = bat_priv->tt_global_hash;
+       struct batadv_hashtable *hash = bat_priv->tt.global_hash;
        struct batadv_tt_common_entry *tt_common;
        struct batadv_tt_global_entry *tt_global;
        struct hlist_node *node;
@@ -1210,6 +1276,12 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
                         */
                        if (tt_common->flags & BATADV_TT_CLIENT_ROAM)
                                continue;
+                       /* Temporary clients have not been announced yet, so
+                        * they have to be skipped while computing the global
+                        * crc
+                        */
+                       if (tt_common->flags & BATADV_TT_CLIENT_TEMP)
+                               continue;
 
                        /* find out if this global entry is announced by this
                         * originator
@@ -1234,7 +1306,7 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
 static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv)
 {
        uint16_t total = 0, total_one;
-       struct batadv_hashtable *hash = bat_priv->tt_local_hash;
+       struct batadv_hashtable *hash = bat_priv->tt.local_hash;
        struct batadv_tt_common_entry *tt_common;
        struct hlist_node *node;
        struct hlist_head *head;
@@ -1267,14 +1339,14 @@ static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
 {
        struct batadv_tt_req_node *node, *safe;
 
-       spin_lock_bh(&bat_priv->tt_req_list_lock);
+       spin_lock_bh(&bat_priv->tt.req_list_lock);
 
-       list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
+       list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
                list_del(&node->list);
                kfree(node);
        }
 
-       spin_unlock_bh(&bat_priv->tt_req_list_lock);
+       spin_unlock_bh(&bat_priv->tt.req_list_lock);
 }
 
 static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv,
@@ -1304,15 +1376,15 @@ static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
 {
        struct batadv_tt_req_node *node, *safe;
 
-       spin_lock_bh(&bat_priv->tt_req_list_lock);
-       list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
+       spin_lock_bh(&bat_priv->tt.req_list_lock);
+       list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
                if (batadv_has_timed_out(node->issued_at,
                                         BATADV_TT_REQUEST_TIMEOUT)) {
                        list_del(&node->list);
                        kfree(node);
                }
        }
-       spin_unlock_bh(&bat_priv->tt_req_list_lock);
+       spin_unlock_bh(&bat_priv->tt.req_list_lock);
 }
 
 /* returns the pointer to the new tt_req_node struct if no request
@@ -1324,8 +1396,8 @@ batadv_new_tt_req_node(struct batadv_priv *bat_priv,
 {
        struct batadv_tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
 
-       spin_lock_bh(&bat_priv->tt_req_list_lock);
-       list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) {
+       spin_lock_bh(&bat_priv->tt.req_list_lock);
+       list_for_each_entry(tt_req_node_tmp, &bat_priv->tt.req_list, list) {
                if (batadv_compare_eth(tt_req_node_tmp, orig_node) &&
                    !batadv_has_timed_out(tt_req_node_tmp->issued_at,
                                          BATADV_TT_REQUEST_TIMEOUT))
@@ -1339,9 +1411,9 @@ batadv_new_tt_req_node(struct batadv_priv *bat_priv,
        memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN);
        tt_req_node->issued_at = jiffies;
 
-       list_add(&tt_req_node->list, &bat_priv->tt_req_list);
+       list_add(&tt_req_node->list, &bat_priv->tt.req_list);
 unlock:
-       spin_unlock_bh(&bat_priv->tt_req_list_lock);
+       spin_unlock_bh(&bat_priv->tt.req_list_lock);
        return tt_req_node;
 }
 
@@ -1363,7 +1435,8 @@ static int batadv_tt_global_valid(const void *entry_ptr,
        const struct batadv_tt_global_entry *tt_global_entry;
        const struct batadv_orig_node *orig_node = data_ptr;
 
-       if (tt_common_entry->flags & BATADV_TT_CLIENT_ROAM)
+       if (tt_common_entry->flags & BATADV_TT_CLIENT_ROAM ||
+           tt_common_entry->flags & BATADV_TT_CLIENT_TEMP)
                return 0;
 
        tt_global_entry = container_of(tt_common_entry,
@@ -1507,9 +1580,9 @@ out:
        if (ret)
                kfree_skb(skb);
        if (ret && tt_req_node) {
-               spin_lock_bh(&bat_priv->tt_req_list_lock);
+               spin_lock_bh(&bat_priv->tt.req_list_lock);
                list_del(&tt_req_node->list);
-               spin_unlock_bh(&bat_priv->tt_req_list_lock);
+               spin_unlock_bh(&bat_priv->tt.req_list_lock);
                kfree(tt_req_node);
        }
        return ret;
@@ -1530,6 +1603,7 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
        uint16_t tt_len, tt_tot;
        struct sk_buff *skb = NULL;
        struct batadv_tt_query_packet *tt_response;
+       uint8_t *packet_pos;
        size_t len;
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
@@ -1583,8 +1657,8 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
                        goto unlock;
 
                skb_reserve(skb, ETH_HLEN);
-               tt_response = (struct batadv_tt_query_packet *)skb_put(skb,
-                                                                      len);
+               packet_pos = skb_put(skb, len);
+               tt_response = (struct batadv_tt_query_packet *)packet_pos;
                tt_response->ttvn = req_ttvn;
                tt_response->tt_data = htons(tt_tot);
 
@@ -1600,7 +1674,7 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
                ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
 
                skb = batadv_tt_response_fill_table(tt_len, ttvn,
-                                                   bat_priv->tt_global_hash,
+                                                   bat_priv->tt.global_hash,
                                                    primary_if,
                                                    batadv_tt_global_valid,
                                                    req_dst_orig_node);
@@ -1663,6 +1737,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
        uint16_t tt_len, tt_tot;
        struct sk_buff *skb = NULL;
        struct batadv_tt_query_packet *tt_response;
+       uint8_t *packet_pos;
        size_t len;
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
@@ -1671,7 +1746,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
                   (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
 
 
-       my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
+       my_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
        req_ttvn = tt_request->ttvn;
 
        orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
@@ -1690,7 +1765,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
         * is too big send the whole local translation table
         */
        if (tt_request->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn ||
-           !bat_priv->tt_buff)
+           !bat_priv->tt.last_changeset)
                full_table = true;
        else
                full_table = false;
@@ -1699,8 +1774,8 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
         * I'll send only one packet with as much TT entries as I can
         */
        if (!full_table) {
-               spin_lock_bh(&bat_priv->tt_buff_lock);
-               tt_len = bat_priv->tt_buff_len;
+               spin_lock_bh(&bat_priv->tt.last_changeset_lock);
+               tt_len = bat_priv->tt.last_changeset_len;
                tt_tot = tt_len / sizeof(struct batadv_tt_change);
 
                len = sizeof(*tt_response) + tt_len;
@@ -1709,22 +1784,22 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
                        goto unlock;
 
                skb_reserve(skb, ETH_HLEN);
-               tt_response = (struct batadv_tt_query_packet *)skb_put(skb,
-                                                                      len);
+               packet_pos = skb_put(skb, len);
+               tt_response = (struct batadv_tt_query_packet *)packet_pos;
                tt_response->ttvn = req_ttvn;
                tt_response->tt_data = htons(tt_tot);
 
                tt_buff = skb->data + sizeof(*tt_response);
-               memcpy(tt_buff, bat_priv->tt_buff,
-                      bat_priv->tt_buff_len);
-               spin_unlock_bh(&bat_priv->tt_buff_lock);
+               memcpy(tt_buff, bat_priv->tt.last_changeset,
+                      bat_priv->tt.last_changeset_len);
+               spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
        } else {
-               tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt);
+               tt_len = (uint16_t)atomic_read(&bat_priv->tt.local_entry_num);
                tt_len *= sizeof(struct batadv_tt_change);
-               ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
+               ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
 
                skb = batadv_tt_response_fill_table(tt_len, ttvn,
-                                                   bat_priv->tt_local_hash,
+                                                   bat_priv->tt.local_hash,
                                                    primary_if,
                                                    batadv_tt_local_valid_entry,
                                                    NULL);
@@ -1756,7 +1831,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
        goto out;
 
 unlock:
-       spin_unlock_bh(&bat_priv->tt_buff_lock);
+       spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
 out:
        if (orig_node)
                batadv_orig_node_free_ref(orig_node);
@@ -1909,14 +1984,14 @@ void batadv_handle_tt_response(struct batadv_priv *bat_priv,
        }
 
        /* Delete the tt_req_node from pending tt_requests list */
-       spin_lock_bh(&bat_priv->tt_req_list_lock);
-       list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
+       spin_lock_bh(&bat_priv->tt.req_list_lock);
+       list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
                if (!batadv_compare_eth(node->addr, tt_response->src))
                        continue;
                list_del(&node->list);
                kfree(node);
        }
-       spin_unlock_bh(&bat_priv->tt_req_list_lock);
+       spin_unlock_bh(&bat_priv->tt.req_list_lock);
 
        /* Recalculate the CRC for this orig_node and store it */
        orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
@@ -1950,22 +2025,22 @@ static void batadv_tt_roam_list_free(struct batadv_priv *bat_priv)
 {
        struct batadv_tt_roam_node *node, *safe;
 
-       spin_lock_bh(&bat_priv->tt_roam_list_lock);
+       spin_lock_bh(&bat_priv->tt.roam_list_lock);
 
-       list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
+       list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
                list_del(&node->list);
                kfree(node);
        }
 
-       spin_unlock_bh(&bat_priv->tt_roam_list_lock);
+       spin_unlock_bh(&bat_priv->tt.roam_list_lock);
 }
 
 static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
 {
        struct batadv_tt_roam_node *node, *safe;
 
-       spin_lock_bh(&bat_priv->tt_roam_list_lock);
-       list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
+       spin_lock_bh(&bat_priv->tt.roam_list_lock);
+       list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
                if (!batadv_has_timed_out(node->first_time,
                                          BATADV_ROAMING_MAX_TIME))
                        continue;
@@ -1973,7 +2048,7 @@ static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
                list_del(&node->list);
                kfree(node);
        }
-       spin_unlock_bh(&bat_priv->tt_roam_list_lock);
+       spin_unlock_bh(&bat_priv->tt.roam_list_lock);
 }
 
 /* This function checks whether the client already reached the
@@ -1988,11 +2063,11 @@ static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv,
        struct batadv_tt_roam_node *tt_roam_node;
        bool ret = false;
 
-       spin_lock_bh(&bat_priv->tt_roam_list_lock);
+       spin_lock_bh(&bat_priv->tt.roam_list_lock);
        /* The new tt_req will be issued only if I'm not waiting for a
         * reply from the same orig_node yet
         */
-       list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) {
+       list_for_each_entry(tt_roam_node, &bat_priv->tt.roam_list, list) {
                if (!batadv_compare_eth(tt_roam_node->addr, client))
                        continue;
 
@@ -2017,12 +2092,12 @@ static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv,
                           BATADV_ROAMING_MAX_COUNT - 1);
                memcpy(tt_roam_node->addr, client, ETH_ALEN);
 
-               list_add(&tt_roam_node->list, &bat_priv->tt_roam_list);
+               list_add(&tt_roam_node->list, &bat_priv->tt.roam_list);
                ret = true;
        }
 
 unlock:
-       spin_unlock_bh(&bat_priv->tt_roam_list_lock);
+       spin_unlock_bh(&bat_priv->tt.roam_list_lock);
        return ret;
 }
 
@@ -2086,13 +2161,15 @@ out:
 static void batadv_tt_purge(struct work_struct *work)
 {
        struct delayed_work *delayed_work;
+       struct batadv_priv_tt *priv_tt;
        struct batadv_priv *bat_priv;
 
        delayed_work = container_of(work, struct delayed_work, work);
-       bat_priv = container_of(delayed_work, struct batadv_priv, tt_work);
+       priv_tt = container_of(delayed_work, struct batadv_priv_tt, work);
+       bat_priv = container_of(priv_tt, struct batadv_priv, tt);
 
        batadv_tt_local_purge(bat_priv);
-       batadv_tt_global_roam_purge(bat_priv);
+       batadv_tt_global_purge(bat_priv);
        batadv_tt_req_purge(bat_priv);
        batadv_tt_roam_purge(bat_priv);
 
@@ -2101,7 +2178,7 @@ static void batadv_tt_purge(struct work_struct *work)
 
 void batadv_tt_free(struct batadv_priv *bat_priv)
 {
-       cancel_delayed_work_sync(&bat_priv->tt_work);
+       cancel_delayed_work_sync(&bat_priv->tt.work);
 
        batadv_tt_local_table_free(bat_priv);
        batadv_tt_global_table_free(bat_priv);
@@ -2109,7 +2186,7 @@ void batadv_tt_free(struct batadv_priv *bat_priv)
        batadv_tt_changes_list_free(bat_priv);
        batadv_tt_roam_list_free(bat_priv);
 
-       kfree(bat_priv->tt_buff);
+       kfree(bat_priv->tt.last_changeset);
 }
 
 /* This function will enable or disable the specified flags for all the entries
@@ -2153,7 +2230,7 @@ out:
 /* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */
 static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
 {
-       struct batadv_hashtable *hash = bat_priv->tt_local_hash;
+       struct batadv_hashtable *hash = bat_priv->tt.local_hash;
        struct batadv_tt_common_entry *tt_common;
        struct batadv_tt_local_entry *tt_local;
        struct hlist_node *node, *node_tmp;
@@ -2178,7 +2255,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
                                   "Deleting local tt entry (%pM): pending\n",
                                   tt_common->addr);
 
-                       atomic_dec(&bat_priv->num_local_tt);
+                       atomic_dec(&bat_priv->tt.local_entry_num);
                        hlist_del_rcu(node);
                        tt_local = container_of(tt_common,
                                                struct batadv_tt_local_entry,
@@ -2196,26 +2273,26 @@ static int batadv_tt_commit_changes(struct batadv_priv *bat_priv,
 {
        uint16_t changed_num = 0;
 
-       if (atomic_read(&bat_priv->tt_local_changes) < 1)
+       if (atomic_read(&bat_priv->tt.local_changes) < 1)
                return -ENOENT;
 
-       changed_num = batadv_tt_set_flags(bat_priv->tt_local_hash,
+       changed_num = batadv_tt_set_flags(bat_priv->tt.local_hash,
                                          BATADV_TT_CLIENT_NEW, false);
 
        /* all reset entries have to be counted as local entries */
-       atomic_add(changed_num, &bat_priv->num_local_tt);
+       atomic_add(changed_num, &bat_priv->tt.local_entry_num);
        batadv_tt_local_purge_pending_clients(bat_priv);
-       bat_priv->tt_crc = batadv_tt_local_crc(bat_priv);
+       bat_priv->tt.local_crc = batadv_tt_local_crc(bat_priv);
 
        /* Increment the TTVN only once per OGM interval */
-       atomic_inc(&bat_priv->ttvn);
+       atomic_inc(&bat_priv->tt.vn);
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Local changes committed, updating to ttvn %u\n",
-                  (uint8_t)atomic_read(&bat_priv->ttvn));
-       bat_priv->tt_poss_change = false;
+                  (uint8_t)atomic_read(&bat_priv->tt.vn));
+       bat_priv->tt.poss_change = false;
 
        /* reset the sending counter */
-       atomic_set(&bat_priv->tt_ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
+       atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
 
        return batadv_tt_changes_fill_buff(bat_priv, packet_buff,
                                           packet_buff_len, packet_min_len);
@@ -2235,7 +2312,7 @@ int batadv_tt_append_diff(struct batadv_priv *bat_priv,
 
        /* if the changes have been sent often enough */
        if ((tt_num_changes < 0) &&
-           (!batadv_atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))) {
+           (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt))) {
                batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
                                              packet_min_len, packet_min_len);
                tt_num_changes = 0;
@@ -2366,3 +2443,22 @@ bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
 out:
        return ret;
 }
+
+bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
+                                         struct batadv_orig_node *orig_node,
+                                         const unsigned char *addr)
+{
+       bool ret = false;
+
+       if (!batadv_tt_global_add(bat_priv, orig_node, addr,
+                                 BATADV_TT_CLIENT_TEMP,
+                                 atomic_read(&orig_node->last_ttvn)))
+               goto out;
+
+       batadv_dbg(BATADV_DBG_TT, bat_priv,
+                  "Added temporary global client (addr: %pM orig: %pM)\n",
+                  addr, orig_node->orig);
+       ret = true;
+out:
+       return ret;
+}
index ffa87355096b3396bd0ffcf8e7cbba7fd9d86581..811fffd4760c3678a60994e027896277289e751a 100644 (file)
@@ -59,6 +59,8 @@ int batadv_tt_append_diff(struct batadv_priv *bat_priv,
                          int packet_min_len);
 bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
                                        uint8_t *addr);
-
+bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
+                                         struct batadv_orig_node *orig_node,
+                                         const unsigned char *addr);
 
 #endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
index 12635fd2c3d3fe68985d53a00e10d684045bfc32..2ed82caacdca4bfa0cf429d74a7311523874b4c2 100644 (file)
@@ -145,6 +145,11 @@ struct batadv_bcast_duplist_entry {
 #endif
 
 enum batadv_counters {
+       BATADV_CNT_TX,
+       BATADV_CNT_TX_BYTES,
+       BATADV_CNT_TX_DROPPED,
+       BATADV_CNT_RX,
+       BATADV_CNT_RX_BYTES,
        BATADV_CNT_FORWARD,
        BATADV_CNT_FORWARD_BYTES,
        BATADV_CNT_MGMT_TX,
@@ -160,6 +165,67 @@ enum batadv_counters {
        BATADV_CNT_NUM,
 };
 
+/**
+ * struct batadv_priv_tt - per mesh interface translation table data
+ * @vn: translation table version number
+ * @local_changes: changes registered in an originator interval
+ * @poss_change: Detect an ongoing roaming phase. If true, then this node
+ *  received a roaming_adv and has to inspect every packet directed to it to
+ *  check whether it still is the true destination or not. This flag will be
+ *  reset to false as soon as the this node's ttvn is increased
+ * @changes_list: tracks tt local changes within an originator interval
+ * @req_list: list of pending tt_requests
+ * @local_crc: Checksum of the local table, recomputed before sending a new OGM
+ */
+struct batadv_priv_tt {
+       atomic_t vn;
+       atomic_t ogm_append_cnt;
+       atomic_t local_changes;
+       bool poss_change;
+       struct list_head changes_list;
+       struct batadv_hashtable *local_hash;
+       struct batadv_hashtable *global_hash;
+       struct list_head req_list;
+       struct list_head roam_list;
+       spinlock_t changes_list_lock; /* protects changes */
+       spinlock_t req_list_lock; /* protects req_list */
+       spinlock_t roam_list_lock; /* protects roam_list */
+       atomic_t local_entry_num;
+       uint16_t local_crc;
+       unsigned char *last_changeset;
+       int16_t last_changeset_len;
+       spinlock_t last_changeset_lock; /* protects last_changeset */
+       struct delayed_work work;
+};
+
+#ifdef CONFIG_BATMAN_ADV_BLA
+struct batadv_priv_bla {
+       atomic_t num_requests; /* number of bla requests in flight */
+       struct batadv_hashtable *claim_hash;
+       struct batadv_hashtable *backbone_hash;
+       struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
+       int bcast_duplist_curr;
+       struct batadv_bla_claim_dst claim_dest;
+       struct delayed_work work;
+};
+#endif
+
+struct batadv_priv_gw {
+       struct hlist_head list;
+       spinlock_t list_lock; /* protects gw_list and curr_gw */
+       struct batadv_gw_node __rcu *curr_gw;  /* rcu protected pointer */
+       atomic_t reselect;
+};
+
+struct batadv_priv_vis {
+       struct list_head send_list;
+       struct batadv_hashtable *hash;
+       spinlock_t hash_lock; /* protects hash */
+       spinlock_t list_lock; /* protects info::recv_list */
+       struct delayed_work work;
+       struct batadv_vis_info *my_info;
+};
+
 struct batadv_priv {
        atomic_t mesh_state;
        struct net_device_stats stats;
@@ -179,64 +245,24 @@ struct batadv_priv {
        atomic_t bcast_seqno;
        atomic_t bcast_queue_left;
        atomic_t batman_queue_left;
-       atomic_t ttvn; /* translation table version number */
-       atomic_t tt_ogm_append_cnt;
-       atomic_t tt_local_changes; /* changes registered in a OGM interval */
-       atomic_t bla_num_requests; /* number of bla requests in flight */
-       /* The tt_poss_change flag is used to detect an ongoing roaming phase.
-        * If true, then I received a Roaming_adv and I have to inspect every
-        * packet directed to me to check whether I am still the true
-        * destination or not. This flag will be reset to false as soon as I
-        * increase my TTVN
-        */
-       bool tt_poss_change;
        char num_ifaces;
        struct batadv_debug_log *debug_log;
        struct kobject *mesh_obj;
        struct dentry *debug_dir;
        struct hlist_head forw_bat_list;
        struct hlist_head forw_bcast_list;
-       struct hlist_head gw_list;
-       struct list_head tt_changes_list; /* tracks changes in a OGM int */
-       struct list_head vis_send_list;
        struct batadv_hashtable *orig_hash;
-       struct batadv_hashtable *tt_local_hash;
-       struct batadv_hashtable *tt_global_hash;
-#ifdef CONFIG_BATMAN_ADV_BLA
-       struct batadv_hashtable *claim_hash;
-       struct batadv_hashtable *backbone_hash;
-#endif
-       struct list_head tt_req_list; /* list of pending tt_requests */
-       struct list_head tt_roam_list;
-       struct batadv_hashtable *vis_hash;
-#ifdef CONFIG_BATMAN_ADV_BLA
-       struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
-       int bcast_duplist_curr;
-       struct batadv_bla_claim_dst claim_dest;
-#endif
        spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
        spinlock_t forw_bcast_list_lock; /* protects  */
-       spinlock_t tt_changes_list_lock; /* protects tt_changes */
-       spinlock_t tt_req_list_lock; /* protects tt_req_list */
-       spinlock_t tt_roam_list_lock; /* protects tt_roam_list */
-       spinlock_t gw_list_lock; /* protects gw_list and curr_gw */
-       spinlock_t vis_hash_lock; /* protects vis_hash */
-       spinlock_t vis_list_lock; /* protects vis_info::recv_list */
-       atomic_t num_local_tt;
-       /* Checksum of the local table, recomputed before sending a new OGM */
-       uint16_t tt_crc;
-       unsigned char *tt_buff;
-       int16_t tt_buff_len;
-       spinlock_t tt_buff_lock; /* protects tt_buff */
-       struct delayed_work tt_work;
        struct delayed_work orig_work;
-       struct delayed_work vis_work;
-       struct delayed_work bla_work;
-       struct batadv_gw_node __rcu *curr_gw;  /* rcu protected pointer */
-       atomic_t gw_reselect;
        struct batadv_hard_iface __rcu *primary_if;  /* rcu protected pointer */
-       struct batadv_vis_info *my_vis_info;
        struct batadv_algo_ops *bat_algo_ops;
+#ifdef CONFIG_BATMAN_ADV_BLA
+       struct batadv_priv_bla bla;
+#endif
+       struct batadv_priv_gw gw;
+       struct batadv_priv_tt tt;
+       struct batadv_priv_vis vis;
 };
 
 struct batadv_socket_client {
@@ -258,6 +284,7 @@ struct batadv_tt_common_entry {
        uint8_t addr[ETH_ALEN];
        struct hlist_node hash_entry;
        uint16_t flags;
+       unsigned long added_at;
        atomic_t refcount;
        struct rcu_head rcu;
 };
@@ -277,6 +304,7 @@ struct batadv_tt_global_entry {
 struct batadv_tt_orig_list_entry {
        struct batadv_orig_node *orig_node;
        uint8_t ttvn;
+       atomic_t refcount;
        struct rcu_head rcu;
        struct hlist_node list;
 };
index 00164645b3f74763ce1ca33cde11ea598c279107..f39723281ca1f7cd96dd9f7535a2bea8fbc5aa07 100644 (file)
@@ -39,6 +39,7 @@ batadv_frag_merge_packet(struct list_head *head,
        struct batadv_unicast_packet *unicast_packet;
        int hdr_len = sizeof(*unicast_packet);
        int uni_diff = sizeof(*up) - hdr_len;
+       uint8_t *packet_pos;
 
        up = (struct batadv_unicast_frag_packet *)skb->data;
        /* set skb to the first part and tmp_skb to the second part */
@@ -65,8 +66,8 @@ batadv_frag_merge_packet(struct list_head *head,
        kfree_skb(tmp_skb);
 
        memmove(skb->data + uni_diff, skb->data, hdr_len);
-       unicast_packet = (struct batadv_unicast_packet *)skb_pull(skb,
-                                                                 uni_diff);
+       packet_pos = skb_pull(skb, uni_diff);
+       unicast_packet = (struct batadv_unicast_packet *)packet_pos;
        unicast_packet->header.packet_type = BATADV_UNICAST;
 
        return skb;
@@ -121,6 +122,7 @@ batadv_frag_search_packet(struct list_head *head,
 {
        struct batadv_frag_packet_list_entry *tfp;
        struct batadv_unicast_frag_packet *tmp_up = NULL;
+       int is_head_tmp, is_head;
        uint16_t search_seqno;
 
        if (up->flags & BATADV_UNI_FRAG_HEAD)
@@ -128,6 +130,8 @@ batadv_frag_search_packet(struct list_head *head,
        else
                search_seqno = ntohs(up->seqno)-1;
 
+       is_head = !!(up->flags & BATADV_UNI_FRAG_HEAD);
+
        list_for_each_entry(tfp, head, list) {
 
                if (!tfp->skb)
@@ -139,9 +143,8 @@ batadv_frag_search_packet(struct list_head *head,
                tmp_up = (struct batadv_unicast_frag_packet *)tfp->skb->data;
 
                if (tfp->seqno == search_seqno) {
-
-                       if ((tmp_up->flags & BATADV_UNI_FRAG_HEAD) !=
-                           (up->flags & BATADV_UNI_FRAG_HEAD))
+                       is_head_tmp = !!(tmp_up->flags & BATADV_UNI_FRAG_HEAD);
+                       if (is_head_tmp != is_head)
                                return tfp;
                        else
                                goto mov_tail;
@@ -334,8 +337,7 @@ find_router:
        /* copy the destination for faster routing */
        memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
        /* set the destination tt version number */
-       unicast_packet->ttvn =
-               (uint8_t)atomic_read(&orig_node->last_ttvn);
+       unicast_packet->ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
 
        /* inform the destination node that we are still missing a correct route
         * for this client. The destination will receive this packet and will
index 2a2ea06814695f4356f38de38c83cd3465184630..5abd1454fb07d3025184de6731afad654176ae02 100644 (file)
@@ -41,13 +41,13 @@ static void batadv_free_info(struct kref *ref)
        bat_priv = info->bat_priv;
 
        list_del_init(&info->send_list);
-       spin_lock_bh(&bat_priv->vis_list_lock);
+       spin_lock_bh(&bat_priv->vis.list_lock);
        list_for_each_entry_safe(entry, tmp, &info->recv_list, list) {
                list_del(&entry->list);
                kfree(entry);
        }
 
-       spin_unlock_bh(&bat_priv->vis_list_lock);
+       spin_unlock_bh(&bat_priv->vis.list_lock);
        kfree_skb(info->skb_packet);
        kfree(info);
 }
@@ -94,7 +94,7 @@ static uint32_t batadv_vis_info_choose(const void *data, uint32_t size)
 static struct batadv_vis_info *
 batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data)
 {
-       struct batadv_hashtable *hash = bat_priv->vis_hash;
+       struct batadv_hashtable *hash = bat_priv->vis.hash;
        struct hlist_head *head;
        struct hlist_node *node;
        struct batadv_vis_info *vis_info, *vis_info_tmp = NULL;
@@ -252,7 +252,7 @@ int batadv_vis_seq_print_text(struct seq_file *seq, void *offset)
        struct hlist_head *head;
        struct net_device *net_dev = (struct net_device *)seq->private;
        struct batadv_priv *bat_priv = netdev_priv(net_dev);
-       struct batadv_hashtable *hash = bat_priv->vis_hash;
+       struct batadv_hashtable *hash = bat_priv->vis.hash;
        uint32_t i;
        int ret = 0;
        int vis_server = atomic_read(&bat_priv->vis_mode);
@@ -264,12 +264,12 @@ int batadv_vis_seq_print_text(struct seq_file *seq, void *offset)
        if (vis_server == BATADV_VIS_TYPE_CLIENT_UPDATE)
                goto out;
 
-       spin_lock_bh(&bat_priv->vis_hash_lock);
+       spin_lock_bh(&bat_priv->vis.hash_lock);
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
                batadv_vis_seq_print_text_bucket(seq, head);
        }
-       spin_unlock_bh(&bat_priv->vis_hash_lock);
+       spin_unlock_bh(&bat_priv->vis.hash_lock);
 
 out:
        if (primary_if)
@@ -285,7 +285,7 @@ static void batadv_send_list_add(struct batadv_priv *bat_priv,
 {
        if (list_empty(&info->send_list)) {
                kref_get(&info->refcount);
-               list_add_tail(&info->send_list, &bat_priv->vis_send_list);
+               list_add_tail(&info->send_list, &bat_priv->vis.send_list);
        }
 }
 
@@ -311,9 +311,9 @@ static void batadv_recv_list_add(struct batadv_priv *bat_priv,
                return;
 
        memcpy(entry->mac, mac, ETH_ALEN);
-       spin_lock_bh(&bat_priv->vis_list_lock);
+       spin_lock_bh(&bat_priv->vis.list_lock);
        list_add_tail(&entry->list, recv_list);
-       spin_unlock_bh(&bat_priv->vis_list_lock);
+       spin_unlock_bh(&bat_priv->vis.list_lock);
 }
 
 /* returns 1 if this mac is in the recv_list */
@@ -323,14 +323,14 @@ static int batadv_recv_list_is_in(struct batadv_priv *bat_priv,
 {
        const struct batadv_recvlist_node *entry;
 
-       spin_lock_bh(&bat_priv->vis_list_lock);
+       spin_lock_bh(&bat_priv->vis.list_lock);
        list_for_each_entry(entry, recv_list, list) {
                if (batadv_compare_eth(entry->mac, mac)) {
-                       spin_unlock_bh(&bat_priv->vis_list_lock);
+                       spin_unlock_bh(&bat_priv->vis.list_lock);
                        return 1;
                }
        }
-       spin_unlock_bh(&bat_priv->vis_list_lock);
+       spin_unlock_bh(&bat_priv->vis.list_lock);
        return 0;
 }
 
@@ -354,7 +354,7 @@ batadv_add_packet(struct batadv_priv *bat_priv,
 
        *is_new = 0;
        /* sanity check */
-       if (!bat_priv->vis_hash)
+       if (!bat_priv->vis.hash)
                return NULL;
 
        /* see if the packet is already in vis_hash */
@@ -385,7 +385,7 @@ batadv_add_packet(struct batadv_priv *bat_priv,
                        }
                }
                /* remove old entry */
-               batadv_hash_remove(bat_priv->vis_hash, batadv_vis_info_cmp,
+               batadv_hash_remove(bat_priv->vis.hash, batadv_vis_info_cmp,
                                   batadv_vis_info_choose, old_info);
                batadv_send_list_del(old_info);
                kref_put(&old_info->refcount, batadv_free_info);
@@ -426,7 +426,7 @@ batadv_add_packet(struct batadv_priv *bat_priv,
        batadv_recv_list_add(bat_priv, &info->recv_list, packet->sender_orig);
 
        /* try to add it */
-       hash_added = batadv_hash_add(bat_priv->vis_hash, batadv_vis_info_cmp,
+       hash_added = batadv_hash_add(bat_priv->vis.hash, batadv_vis_info_cmp,
                                     batadv_vis_info_choose, info,
                                     &info->hash_entry);
        if (hash_added != 0) {
@@ -449,7 +449,7 @@ void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv,
 
        make_broadcast = (vis_server == BATADV_VIS_TYPE_SERVER_SYNC);
 
-       spin_lock_bh(&bat_priv->vis_hash_lock);
+       spin_lock_bh(&bat_priv->vis.hash_lock);
        info = batadv_add_packet(bat_priv, vis_packet, vis_info_len,
                                 &is_new, make_broadcast);
        if (!info)
@@ -461,7 +461,7 @@ void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv,
        if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC && is_new)
                batadv_send_list_add(bat_priv, info);
 end:
-       spin_unlock_bh(&bat_priv->vis_hash_lock);
+       spin_unlock_bh(&bat_priv->vis.hash_lock);
 }
 
 /* handle an incoming client update packet and schedule forward if needed. */
@@ -484,7 +484,7 @@ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
            batadv_is_my_mac(vis_packet->target_orig))
                are_target = 1;
 
-       spin_lock_bh(&bat_priv->vis_hash_lock);
+       spin_lock_bh(&bat_priv->vis.hash_lock);
        info = batadv_add_packet(bat_priv, vis_packet, vis_info_len,
                                 &is_new, are_target);
 
@@ -505,7 +505,7 @@ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
        }
 
 end:
-       spin_unlock_bh(&bat_priv->vis_hash_lock);
+       spin_unlock_bh(&bat_priv->vis.hash_lock);
 }
 
 /* Walk the originators and find the VIS server with the best tq. Set the packet
@@ -574,10 +574,11 @@ static int batadv_generate_vis_packet(struct batadv_priv *bat_priv)
        struct hlist_head *head;
        struct batadv_orig_node *orig_node;
        struct batadv_neigh_node *router;
-       struct batadv_vis_info *info = bat_priv->my_vis_info;
+       struct batadv_vis_info *info = bat_priv->vis.my_info;
        struct batadv_vis_packet *packet;
        struct batadv_vis_info_entry *entry;
        struct batadv_tt_common_entry *tt_common_entry;
+       uint8_t *packet_pos;
        int best_tq = -1;
        uint32_t i;
 
@@ -618,8 +619,8 @@ static int batadv_generate_vis_packet(struct batadv_priv *bat_priv)
                                goto next;
 
                        /* fill one entry into buffer. */
-                       entry = (struct batadv_vis_info_entry *)
-                                     skb_put(info->skb_packet, sizeof(*entry));
+                       packet_pos = skb_put(info->skb_packet, sizeof(*entry));
+                       entry = (struct batadv_vis_info_entry *)packet_pos;
                        memcpy(entry->src,
                               router->if_incoming->net_dev->dev_addr,
                               ETH_ALEN);
@@ -636,7 +637,7 @@ next:
                rcu_read_unlock();
        }
 
-       hash = bat_priv->tt_local_hash;
+       hash = bat_priv->tt.local_hash;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
@@ -644,9 +645,8 @@ next:
                rcu_read_lock();
                hlist_for_each_entry_rcu(tt_common_entry, node, head,
                                         hash_entry) {
-                       entry = (struct batadv_vis_info_entry *)
-                                       skb_put(info->skb_packet,
-                                               sizeof(*entry));
+                       packet_pos = skb_put(info->skb_packet, sizeof(*entry));
+                       entry = (struct batadv_vis_info_entry *)packet_pos;
                        memset(entry->src, 0, ETH_ALEN);
                        memcpy(entry->dest, tt_common_entry->addr, ETH_ALEN);
                        entry->quality = 0; /* 0 means TT */
@@ -671,7 +671,7 @@ unlock:
 static void batadv_purge_vis_packets(struct batadv_priv *bat_priv)
 {
        uint32_t i;
-       struct batadv_hashtable *hash = bat_priv->vis_hash;
+       struct batadv_hashtable *hash = bat_priv->vis.hash;
        struct hlist_node *node, *node_tmp;
        struct hlist_head *head;
        struct batadv_vis_info *info;
@@ -682,7 +682,7 @@ static void batadv_purge_vis_packets(struct batadv_priv *bat_priv)
                hlist_for_each_entry_safe(info, node, node_tmp,
                                          head, hash_entry) {
                        /* never purge own data. */
-                       if (info == bat_priv->my_vis_info)
+                       if (info == bat_priv->vis.my_info)
                                continue;
 
                        if (batadv_has_timed_out(info->first_seen,
@@ -814,34 +814,36 @@ out:
 /* called from timer; send (and maybe generate) vis packet. */
 static void batadv_send_vis_packets(struct work_struct *work)
 {
-       struct delayed_work *delayed_work =
-               container_of(work, struct delayed_work, work);
+       struct delayed_work *delayed_work;
        struct batadv_priv *bat_priv;
+       struct batadv_priv_vis *priv_vis;
        struct batadv_vis_info *info;
 
-       bat_priv = container_of(delayed_work, struct batadv_priv, vis_work);
-       spin_lock_bh(&bat_priv->vis_hash_lock);
+       delayed_work = container_of(work, struct delayed_work, work);
+       priv_vis = container_of(delayed_work, struct batadv_priv_vis, work);
+       bat_priv = container_of(priv_vis, struct batadv_priv, vis);
+       spin_lock_bh(&bat_priv->vis.hash_lock);
        batadv_purge_vis_packets(bat_priv);
 
        if (batadv_generate_vis_packet(bat_priv) == 0) {
                /* schedule if generation was successful */
-               batadv_send_list_add(bat_priv, bat_priv->my_vis_info);
+               batadv_send_list_add(bat_priv, bat_priv->vis.my_info);
        }
 
-       while (!list_empty(&bat_priv->vis_send_list)) {
-               info = list_first_entry(&bat_priv->vis_send_list,
+       while (!list_empty(&bat_priv->vis.send_list)) {
+               info = list_first_entry(&bat_priv->vis.send_list,
                                        typeof(*info), send_list);
 
                kref_get(&info->refcount);
-               spin_unlock_bh(&bat_priv->vis_hash_lock);
+               spin_unlock_bh(&bat_priv->vis.hash_lock);
 
                batadv_send_vis_packet(bat_priv, info);
 
-               spin_lock_bh(&bat_priv->vis_hash_lock);
+               spin_lock_bh(&bat_priv->vis.hash_lock);
                batadv_send_list_del(info);
                kref_put(&info->refcount, batadv_free_info);
        }
-       spin_unlock_bh(&bat_priv->vis_hash_lock);
+       spin_unlock_bh(&bat_priv->vis.hash_lock);
        batadv_start_vis_timer(bat_priv);
 }
 
@@ -856,37 +858,37 @@ int batadv_vis_init(struct batadv_priv *bat_priv)
        unsigned long first_seen;
        struct sk_buff *tmp_skb;
 
-       if (bat_priv->vis_hash)
+       if (bat_priv->vis.hash)
                return 0;
 
-       spin_lock_bh(&bat_priv->vis_hash_lock);
+       spin_lock_bh(&bat_priv->vis.hash_lock);
 
-       bat_priv->vis_hash = batadv_hash_new(256);
-       if (!bat_priv->vis_hash) {
+       bat_priv->vis.hash = batadv_hash_new(256);
+       if (!bat_priv->vis.hash) {
                pr_err("Can't initialize vis_hash\n");
                goto err;
        }
 
-       bat_priv->my_vis_info = kmalloc(BATADV_MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
-       if (!bat_priv->my_vis_info)
+       bat_priv->vis.my_info = kmalloc(BATADV_MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
+       if (!bat_priv->vis.my_info)
                goto err;
 
        len = sizeof(*packet) + BATADV_MAX_VIS_PACKET_SIZE + ETH_HLEN;
-       bat_priv->my_vis_info->skb_packet = dev_alloc_skb(len);
-       if (!bat_priv->my_vis_info->skb_packet)
+       bat_priv->vis.my_info->skb_packet = dev_alloc_skb(len);
+       if (!bat_priv->vis.my_info->skb_packet)
                goto free_info;
 
-       skb_reserve(bat_priv->my_vis_info->skb_packet, ETH_HLEN);
-       tmp_skb = bat_priv->my_vis_info->skb_packet;
+       skb_reserve(bat_priv->vis.my_info->skb_packet, ETH_HLEN);
+       tmp_skb = bat_priv->vis.my_info->skb_packet;
        packet = (struct batadv_vis_packet *)skb_put(tmp_skb, sizeof(*packet));
 
        /* prefill the vis info */
        first_seen = jiffies - msecs_to_jiffies(BATADV_VIS_INTERVAL);
-       bat_priv->my_vis_info->first_seen = first_seen;
-       INIT_LIST_HEAD(&bat_priv->my_vis_info->recv_list);
-       INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list);
-       kref_init(&bat_priv->my_vis_info->refcount);
-       bat_priv->my_vis_info->bat_priv = bat_priv;
+       bat_priv->vis.my_info->first_seen = first_seen;
+       INIT_LIST_HEAD(&bat_priv->vis.my_info->recv_list);
+       INIT_LIST_HEAD(&bat_priv->vis.my_info->send_list);
+       kref_init(&bat_priv->vis.my_info->refcount);
+       bat_priv->vis.my_info->bat_priv = bat_priv;
        packet->header.version = BATADV_COMPAT_VERSION;
        packet->header.packet_type = BATADV_VIS;
        packet->header.ttl = BATADV_TTL;
@@ -894,28 +896,28 @@ int batadv_vis_init(struct batadv_priv *bat_priv)
        packet->reserved = 0;
        packet->entries = 0;
 
-       INIT_LIST_HEAD(&bat_priv->vis_send_list);
+       INIT_LIST_HEAD(&bat_priv->vis.send_list);
 
-       hash_added = batadv_hash_add(bat_priv->vis_hash, batadv_vis_info_cmp,
+       hash_added = batadv_hash_add(bat_priv->vis.hash, batadv_vis_info_cmp,
                                     batadv_vis_info_choose,
-                                    bat_priv->my_vis_info,
-                                    &bat_priv->my_vis_info->hash_entry);
+                                    bat_priv->vis.my_info,
+                                    &bat_priv->vis.my_info->hash_entry);
        if (hash_added != 0) {
                pr_err("Can't add own vis packet into hash\n");
                /* not in hash, need to remove it manually. */
-               kref_put(&bat_priv->my_vis_info->refcount, batadv_free_info);
+               kref_put(&bat_priv->vis.my_info->refcount, batadv_free_info);
                goto err;
        }
 
-       spin_unlock_bh(&bat_priv->vis_hash_lock);
+       spin_unlock_bh(&bat_priv->vis.hash_lock);
        batadv_start_vis_timer(bat_priv);
        return 0;
 
 free_info:
-       kfree(bat_priv->my_vis_info);
-       bat_priv->my_vis_info = NULL;
+       kfree(bat_priv->vis.my_info);
+       bat_priv->vis.my_info = NULL;
 err:
-       spin_unlock_bh(&bat_priv->vis_hash_lock);
+       spin_unlock_bh(&bat_priv->vis.hash_lock);
        batadv_vis_quit(bat_priv);
        return -ENOMEM;
 }
@@ -933,23 +935,23 @@ static void batadv_free_info_ref(struct hlist_node *node, void *arg)
 /* shutdown vis-server */
 void batadv_vis_quit(struct batadv_priv *bat_priv)
 {
-       if (!bat_priv->vis_hash)
+       if (!bat_priv->vis.hash)
                return;
 
-       cancel_delayed_work_sync(&bat_priv->vis_work);
+       cancel_delayed_work_sync(&bat_priv->vis.work);
 
-       spin_lock_bh(&bat_priv->vis_hash_lock);
+       spin_lock_bh(&bat_priv->vis.hash_lock);
        /* properly remove, kill timers ... */
-       batadv_hash_delete(bat_priv->vis_hash, batadv_free_info_ref, NULL);
-       bat_priv->vis_hash = NULL;
-       bat_priv->my_vis_info = NULL;
-       spin_unlock_bh(&bat_priv->vis_hash_lock);
+       batadv_hash_delete(bat_priv->vis.hash, batadv_free_info_ref, NULL);
+       bat_priv->vis.hash = NULL;
+       bat_priv->vis.my_info = NULL;
+       spin_unlock_bh(&bat_priv->vis.hash_lock);
 }
 
 /* schedule packets for (re)transmission */
 static void batadv_start_vis_timer(struct batadv_priv *bat_priv)
 {
-       INIT_DELAYED_WORK(&bat_priv->vis_work, batadv_send_vis_packets);
-       queue_delayed_work(batadv_event_workqueue, &bat_priv->vis_work,
+       INIT_DELAYED_WORK(&bat_priv->vis.work, batadv_send_vis_packets);
+       queue_delayed_work(batadv_event_workqueue, &bat_priv->vis.work,
                           msecs_to_jiffies(BATADV_VIS_INTERVAL));
 }
index 84e716ed8963af8df1053299b433bdb60f9d7fa3..873282fa86dadce0671c1c928a44903229462543 100644 (file)
@@ -20,7 +20,7 @@
 #ifndef _NET_BATMAN_ADV_VIS_H_
 #define _NET_BATMAN_ADV_VIS_H_
 
-/* timeout of vis packets in miliseconds */
+/* timeout of vis packets in milliseconds */
 #define BATADV_VIS_TIMEOUT             200000
 
 int batadv_vis_seq_print_text(struct seq_file *seq, void *offset);
index 4ff0bf3ba9a516bcb99e6a8165c38b81419a28c5..0760d1fed6f08bb13404a11622b83cf3dcf02484 100644 (file)
@@ -316,7 +316,7 @@ send_rsp:
 static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
                               struct a2mp_cmd *hdr)
 {
-       BT_DBG("ident %d code %d", hdr->ident, hdr->code);
+       BT_DBG("ident %d code 0x%2.2x", hdr->ident, hdr->code);
 
        skb_pull(skb, le16_to_cpu(hdr->len));
        return 0;
@@ -325,17 +325,19 @@ static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
 /* Handle A2MP signalling */
 static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
 {
-       struct a2mp_cmd *hdr = (void *) skb->data;
+       struct a2mp_cmd *hdr;
        struct amp_mgr *mgr = chan->data;
        int err = 0;
 
        amp_mgr_get(mgr);
 
        while (skb->len >= sizeof(*hdr)) {
-               struct a2mp_cmd *hdr = (void *) skb->data;
-               u16 len = le16_to_cpu(hdr->len);
+               u16 len;
 
-               BT_DBG("code 0x%02x id %d len %d", hdr->code, hdr->ident, len);
+               hdr = (void *) skb->data;
+               len = le16_to_cpu(hdr->len);
+
+               BT_DBG("code 0x%2.2x id %d len %u", hdr->code, hdr->ident, len);
 
                skb_pull(skb, sizeof(*hdr));
 
@@ -393,7 +395,9 @@ static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
 
        if (err) {
                struct a2mp_cmd_rej rej;
+
                rej.reason = __constant_cpu_to_le16(0);
+               hdr = (void *) skb->data;
 
                BT_DBG("Send A2MP Rej: cmd 0x%2.2x err %d", hdr->code, err);
 
@@ -412,7 +416,7 @@ static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
 
 static void a2mp_chan_close_cb(struct l2cap_chan *chan)
 {
-       l2cap_chan_destroy(chan);
+       l2cap_chan_put(chan);
 }
 
 static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state)
index f7db5792ec648d3078d047cb46d0b269a68028bf..9d49ee6d72190c8f9b727ed98ee1ba9f0c256f48 100644 (file)
@@ -28,6 +28,7 @@
 #include <asm/ioctls.h>
 
 #include <net/bluetooth/bluetooth.h>
+#include <linux/proc_fs.h>
 
 #define VERSION "2.16"
 
@@ -532,6 +533,144 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
 }
 EXPORT_SYMBOL(bt_sock_wait_state);
 
+#ifdef CONFIG_PROC_FS
+struct bt_seq_state {
+       struct bt_sock_list *l;
+};
+
+static void *bt_seq_start(struct seq_file *seq, loff_t *pos)
+       __acquires(seq->private->l->lock)
+{
+       struct bt_seq_state *s = seq->private;
+       struct bt_sock_list *l = s->l;
+
+       read_lock(&l->lock);
+       return seq_hlist_start_head(&l->head, *pos);
+}
+
+static void *bt_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       struct bt_seq_state *s = seq->private;
+       struct bt_sock_list *l = s->l;
+
+       return seq_hlist_next(v, &l->head, pos);
+}
+
+static void bt_seq_stop(struct seq_file *seq, void *v)
+       __releases(seq->private->l->lock)
+{
+       struct bt_seq_state *s = seq->private;
+       struct bt_sock_list *l = s->l;
+
+       read_unlock(&l->lock);
+}
+
+static int bt_seq_show(struct seq_file *seq, void *v)
+{
+       struct bt_seq_state *s = seq->private;
+       struct bt_sock_list *l = s->l;
+       bdaddr_t src_baswapped, dst_baswapped;
+
+       if (v == SEQ_START_TOKEN) {
+               seq_puts(seq ,"sk               RefCnt Rmem   Wmem   User   Inode  Src Dst Parent");
+
+               if (l->custom_seq_show) {
+                       seq_putc(seq, ' ');
+                       l->custom_seq_show(seq, v);
+               }
+
+               seq_putc(seq, '\n');
+       } else {
+               struct sock *sk = sk_entry(v);
+               struct bt_sock *bt = bt_sk(sk);
+               baswap(&src_baswapped, &bt->src);
+               baswap(&dst_baswapped, &bt->dst);
+
+               seq_printf(seq, "%pK %-6d %-6u %-6u %-6u %-6lu %pM %pM %-6lu",
+                          sk,
+                          atomic_read(&sk->sk_refcnt),
+                          sk_rmem_alloc_get(sk),
+                          sk_wmem_alloc_get(sk),
+                          sock_i_uid(sk),
+                          sock_i_ino(sk),
+                          &src_baswapped,
+                          &dst_baswapped,
+                          bt->parent? sock_i_ino(bt->parent): 0LU);
+
+               if (l->custom_seq_show) {
+                       seq_putc(seq, ' ');
+                       l->custom_seq_show(seq, v);
+               }
+
+               seq_putc(seq, '\n');
+       }
+       return 0;
+}
+
+static struct seq_operations bt_seq_ops = {
+       .start = bt_seq_start,
+       .next  = bt_seq_next,
+       .stop  = bt_seq_stop,
+       .show  = bt_seq_show,
+};
+
+static int bt_seq_open(struct inode *inode, struct file *file)
+{
+       struct bt_sock_list *sk_list;
+       struct bt_seq_state *s;
+
+       sk_list = PDE(inode)->data;
+       s = __seq_open_private(file, &bt_seq_ops,
+                              sizeof(struct bt_seq_state));
+       if (!s)
+               return -ENOMEM;
+
+       s->l = sk_list;
+       return 0;
+}
+
+int bt_procfs_init(struct module* module, struct net *net, const char *name,
+                  struct bt_sock_list* sk_list,
+                  int (* seq_show)(struct seq_file *, void *))
+{
+       struct proc_dir_entry * pde;
+
+       sk_list->custom_seq_show = seq_show;
+
+       sk_list->fops.owner     = module;
+       sk_list->fops.open      = bt_seq_open;
+       sk_list->fops.read      = seq_read;
+       sk_list->fops.llseek    = seq_lseek;
+       sk_list->fops.release   = seq_release_private;
+
+       pde = proc_net_fops_create(net, name, 0, &sk_list->fops);
+       if (!pde)
+               return -ENOMEM;
+
+       pde->data = sk_list;
+
+       return 0;
+}
+
+void bt_procfs_cleanup(struct net *net, const char *name)
+{
+       proc_net_remove(net, name);
+}
+#else
+int bt_procfs_init(struct module* module, struct net *net, const char *name,
+                  struct bt_sock_list* sk_list,
+                  int (* seq_show)(struct seq_file *, void *))
+{
+       return 0;
+}
+
+void bt_procfs_cleanup(struct net *net, const char *name)
+{
+}
+#endif
+EXPORT_SYMBOL(bt_procfs_init);
+EXPORT_SYMBOL(bt_procfs_cleanup);
+
 static struct net_proto_family bt_sock_family_ops = {
        .owner  = THIS_MODULE,
        .family = PF_BLUETOOTH,
index 1eaacf10d19d9ea078f49f225423dff96ebee8a2..e7154a58465f6b9136f527ab868024464d961564 100644 (file)
 
 #include "bnep.h"
 
+static struct bt_sock_list bnep_sk_list = {
+       .lock = __RW_LOCK_UNLOCKED(bnep_sk_list.lock)
+};
+
 static int bnep_sock_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
@@ -38,6 +42,8 @@ static int bnep_sock_release(struct socket *sock)
        if (!sk)
                return 0;
 
+       bt_sock_unlink(&bnep_sk_list, sk);
+
        sock_orphan(sk);
        sock_put(sk);
        return 0;
@@ -204,6 +210,7 @@ static int bnep_sock_create(struct net *net, struct socket *sock, int protocol,
        sk->sk_protocol = protocol;
        sk->sk_state    = BT_OPEN;
 
+       bt_sock_link(&bnep_sk_list, sk);
        return 0;
 }
 
@@ -222,19 +229,30 @@ int __init bnep_sock_init(void)
                return err;
 
        err = bt_sock_register(BTPROTO_BNEP, &bnep_sock_family_ops);
-       if (err < 0)
+       if (err < 0) {
+               BT_ERR("Can't register BNEP socket");
                goto error;
+       }
+
+       err = bt_procfs_init(THIS_MODULE, &init_net, "bnep", &bnep_sk_list, NULL);
+       if (err < 0) {
+               BT_ERR("Failed to create BNEP proc file");
+               bt_sock_unregister(BTPROTO_BNEP);
+               goto error;
+       }
+
+       BT_INFO("BNEP socket layer initialized");
 
        return 0;
 
 error:
-       BT_ERR("Can't register BNEP socket");
        proto_unregister(&bnep_proto);
        return err;
 }
 
 void __exit bnep_sock_cleanup(void)
 {
+       bt_procfs_cleanup(&init_net, "bnep");
        if (bt_sock_unregister(BTPROTO_BNEP) < 0)
                BT_ERR("Can't unregister BNEP socket");
 
index 32dc83dcb6b2edd669d7a9ce2fc00480d249b4be..aacb802d1ee45d419aac1555442bb08edf227dc9 100644 (file)
 
 #include "cmtp.h"
 
+static struct bt_sock_list cmtp_sk_list = {
+       .lock = __RW_LOCK_UNLOCKED(cmtp_sk_list.lock)
+};
+
 static int cmtp_sock_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
@@ -51,6 +55,8 @@ static int cmtp_sock_release(struct socket *sock)
        if (!sk)
                return 0;
 
+       bt_sock_unlink(&cmtp_sk_list, sk);
+
        sock_orphan(sk);
        sock_put(sk);
 
@@ -214,6 +220,8 @@ static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol,
        sk->sk_protocol = protocol;
        sk->sk_state    = BT_OPEN;
 
+       bt_sock_link(&cmtp_sk_list, sk);
+
        return 0;
 }
 
@@ -232,19 +240,30 @@ int cmtp_init_sockets(void)
                return err;
 
        err = bt_sock_register(BTPROTO_CMTP, &cmtp_sock_family_ops);
-       if (err < 0)
+       if (err < 0) {
+               BT_ERR("Can't register CMTP socket");
                goto error;
+       }
+
+       err = bt_procfs_init(THIS_MODULE, &init_net, "cmtp", &cmtp_sk_list, NULL);
+       if (err < 0) {
+               BT_ERR("Failed to create CMTP proc file");
+               bt_sock_unregister(BTPROTO_HIDP);
+               goto error;
+       }
+
+       BT_INFO("CMTP socket layer initialized");
 
        return 0;
 
 error:
-       BT_ERR("Can't register CMTP socket");
        proto_unregister(&cmtp_proto);
        return err;
 }
 
 void cmtp_cleanup_sockets(void)
 {
+       bt_procfs_cleanup(&init_net, "cmtp");
        if (bt_sock_unregister(BTPROTO_CMTP) < 0)
                BT_ERR("Can't unregister CMTP socket");
 
index 3c094e78dde98cafed3ac893abd3b2fa86b76a92..b9196a44f7598bf33b0c2bff6d0764eeeba8fc11 100644 (file)
@@ -31,7 +31,7 @@
 #include <net/bluetooth/a2mp.h>
 #include <net/bluetooth/smp.h>
 
-static void hci_le_connect(struct hci_conn *conn)
+static void hci_le_create_connection(struct hci_conn *conn)
 {
        struct hci_dev *hdev = conn->hdev;
        struct hci_cp_le_create_conn cp;
@@ -55,12 +55,12 @@ static void hci_le_connect(struct hci_conn *conn)
        hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
 }
 
-static void hci_le_connect_cancel(struct hci_conn *conn)
+static void hci_le_create_connection_cancel(struct hci_conn *conn)
 {
        hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
 }
 
-void hci_acl_connect(struct hci_conn *conn)
+static void hci_acl_create_connection(struct hci_conn *conn)
 {
        struct hci_dev *hdev = conn->hdev;
        struct inquiry_entry *ie;
@@ -104,7 +104,7 @@ void hci_acl_connect(struct hci_conn *conn)
        hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
 }
 
-static void hci_acl_connect_cancel(struct hci_conn *conn)
+static void hci_acl_create_connection_cancel(struct hci_conn *conn)
 {
        struct hci_cp_create_conn_cancel cp;
 
@@ -130,7 +130,7 @@ void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
        hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
 }
 
-void hci_add_sco(struct hci_conn *conn, __u16 handle)
+static void hci_add_sco(struct hci_conn *conn, __u16 handle)
 {
        struct hci_dev *hdev = conn->hdev;
        struct hci_cp_add_sco cp;
@@ -246,9 +246,9 @@ static void hci_conn_timeout(struct work_struct *work)
        case BT_CONNECT2:
                if (conn->out) {
                        if (conn->type == ACL_LINK)
-                               hci_acl_connect_cancel(conn);
+                               hci_acl_create_connection_cancel(conn);
                        else if (conn->type == LE_LINK)
-                               hci_le_connect_cancel(conn);
+                               hci_le_create_connection_cancel(conn);
                }
                break;
        case BT_CONFIG:
@@ -471,40 +471,37 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
 }
 EXPORT_SYMBOL(hci_get_route);
 
-/* Create SCO, ACL or LE connection.
- * Device _must_ be locked */
-struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
-                            __u8 dst_type, __u8 sec_level, __u8 auth_type)
+static struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
+                                   u8 dst_type, u8 sec_level, u8 auth_type)
 {
-       struct hci_conn *acl;
-       struct hci_conn *sco;
        struct hci_conn *le;
 
-       BT_DBG("%s dst %s", hdev->name, batostr(dst));
+       le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
+       if (!le) {
+               le = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+               if (le)
+                       return ERR_PTR(-EBUSY);
 
-       if (type == LE_LINK) {
-               le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
-               if (!le) {
-                       le = hci_conn_hash_lookup_state(hdev, LE_LINK,
-                                                       BT_CONNECT);
-                       if (le)
-                               return ERR_PTR(-EBUSY);
+               le = hci_conn_add(hdev, LE_LINK, dst);
+               if (!le)
+                       return ERR_PTR(-ENOMEM);
 
-                       le = hci_conn_add(hdev, LE_LINK, dst);
-                       if (!le)
-                               return ERR_PTR(-ENOMEM);
+               le->dst_type = bdaddr_to_le(dst_type);
+               hci_le_create_connection(le);
+       }
 
-                       le->dst_type = bdaddr_to_le(dst_type);
-                       hci_le_connect(le);
-               }
+       le->pending_sec_level = sec_level;
+       le->auth_type = auth_type;
 
-               le->pending_sec_level = sec_level;
-               le->auth_type = auth_type;
+       hci_conn_hold(le);
 
-               hci_conn_hold(le);
+       return le;
+}
 
-               return le;
-       }
+static struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
+                                               u8 sec_level, u8 auth_type)
+{
+       struct hci_conn *acl;
 
        acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
        if (!acl) {
@@ -519,10 +516,20 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
                acl->sec_level = BT_SECURITY_LOW;
                acl->pending_sec_level = sec_level;
                acl->auth_type = auth_type;
-               hci_acl_connect(acl);
+               hci_acl_create_connection(acl);
        }
 
-       if (type == ACL_LINK)
+       return acl;
+}
+
+static struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type,
+                               bdaddr_t *dst, u8 sec_level, u8 auth_type)
+{
+       struct hci_conn *acl;
+       struct hci_conn *sco;
+
+       acl = hci_connect_acl(hdev, dst, sec_level, auth_type);
+       if (IS_ERR(acl))
                return acl;
 
        sco = hci_conn_hash_lookup_ba(hdev, type, dst);
@@ -556,6 +563,25 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
        return sco;
 }
 
+/* Create SCO, ACL or LE connection. */
+struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
+                            __u8 dst_type, __u8 sec_level, __u8 auth_type)
+{
+       BT_DBG("%s dst %s type 0x%x", hdev->name, batostr(dst), type);
+
+       switch (type) {
+       case LE_LINK:
+               return hci_connect_le(hdev, dst, dst_type, sec_level, auth_type);
+       case ACL_LINK:
+               return hci_connect_acl(hdev, dst, sec_level, auth_type);
+       case SCO_LINK:
+       case ESCO_LINK:
+               return hci_connect_sco(hdev, type, dst, sec_level, auth_type);
+       }
+
+       return ERR_PTR(-EINVAL);
+}
+
 /* Check link security requirement */
 int hci_conn_check_link_mode(struct hci_conn *conn)
 {
@@ -775,7 +801,7 @@ void hci_conn_check_pending(struct hci_dev *hdev)
 
        conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
        if (conn)
-               hci_acl_connect(conn);
+               hci_acl_create_connection(conn);
 
        hci_dev_unlock(hdev);
 }
@@ -913,7 +939,7 @@ struct hci_chan *hci_chan_create(struct hci_conn *conn)
        return chan;
 }
 
-int hci_chan_del(struct hci_chan *chan)
+void hci_chan_del(struct hci_chan *chan)
 {
        struct hci_conn *conn = chan->conn;
        struct hci_dev *hdev = conn->hdev;
@@ -926,8 +952,6 @@ int hci_chan_del(struct hci_chan *chan)
 
        skb_queue_purge(&chan->data_q);
        kfree(chan);
-
-       return 0;
 }
 
 void hci_chan_list_flush(struct hci_conn *conn)
index 0b997c8f965531d22da050339d17011ab9dd5b3c..8a0ce706aebd624ae7fd1c50b9670780dc4f6761 100644 (file)
@@ -231,6 +231,9 @@ static void amp_init(struct hci_dev *hdev)
 
        /* Read Local AMP Info */
        hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
+
+       /* Read Data Blk size */
+       hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
 }
 
 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
@@ -268,7 +271,6 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
                BT_ERR("Unknown device type %d", hdev->dev_type);
                break;
        }
-
 }
 
 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
@@ -696,7 +698,8 @@ int hci_dev_open(__u16 dev)
                hci_dev_hold(hdev);
                set_bit(HCI_UP, &hdev->flags);
                hci_notify(hdev, HCI_DEV_UP);
-               if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
+               if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
+                   mgmt_valid_hdev(hdev)) {
                        hci_dev_lock(hdev);
                        mgmt_powered(hdev, 1);
                        hci_dev_unlock(hdev);
@@ -799,7 +802,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
         * and no tasks are scheduled. */
        hdev->close(hdev);
 
-       if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
+       if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
+           mgmt_valid_hdev(hdev)) {
                hci_dev_lock(hdev);
                mgmt_powered(hdev, 0);
                hci_dev_unlock(hdev);
@@ -1652,6 +1656,7 @@ struct hci_dev *hci_alloc_dev(void)
        INIT_LIST_HEAD(&hdev->link_keys);
        INIT_LIST_HEAD(&hdev->long_term_keys);
        INIT_LIST_HEAD(&hdev->remote_oob_data);
+       INIT_LIST_HEAD(&hdev->conn_hash.list);
 
        INIT_WORK(&hdev->rx_work, hci_rx_work);
        INIT_WORK(&hdev->cmd_work, hci_cmd_work);
@@ -1674,7 +1679,6 @@ struct hci_dev *hci_alloc_dev(void)
 
        hci_init_sysfs(hdev);
        discovery_init(hdev);
-       hci_conn_hash_init(hdev);
 
        return hdev;
 }
index 715d7e33fba0639d1556088e5ea84a08f6b4ccd4..2022b43c7353ee98d7546d6c9e0ef67c43811d3f 100644 (file)
@@ -29,6 +29,7 @@
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/mgmt.h>
 
 /* Handle HCI Event packets */
 
@@ -303,7 +304,7 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
 
        hci_dev_lock(hdev);
 
-       if (status != 0) {
+       if (status) {
                mgmt_write_scan_failed(hdev, param, status);
                hdev->discov_timeout = 0;
                goto done;
@@ -513,7 +514,7 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
        if (hdev->features[3] & LMP_RSSI_INQ)
                events[4] |= 0x02; /* Inquiry Result with RSSI */
 
-       if (hdev->features[5] & LMP_SNIFF_SUBR)
+       if (lmp_sniffsubr_capable(hdev))
                events[5] |= 0x20; /* Sniff Subrating */
 
        if (hdev->features[5] & LMP_PAUSE_ENC)
@@ -522,13 +523,13 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
        if (hdev->features[6] & LMP_EXT_INQ)
                events[5] |= 0x40; /* Extended Inquiry Result */
 
-       if (hdev->features[6] & LMP_NO_FLUSH)
+       if (lmp_no_flush_capable(hdev))
                events[7] |= 0x01; /* Enhanced Flush Complete */
 
        if (hdev->features[7] & LMP_LSTO)
                events[6] |= 0x80; /* Link Supervision Timeout Changed */
 
-       if (hdev->features[6] & LMP_SIMPLE_PAIR) {
+       if (lmp_ssp_capable(hdev)) {
                events[6] |= 0x01;      /* IO Capability Request */
                events[6] |= 0x02;      /* IO Capability Response */
                events[6] |= 0x04;      /* User Confirmation Request */
@@ -541,7 +542,7 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
                                         * Features Notification */
        }
 
-       if (hdev->features[4] & LMP_LE)
+       if (lmp_le_capable(hdev))
                events[7] |= 0x20;      /* LE Meta-Event */
 
        hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
@@ -623,11 +624,11 @@ static void hci_setup_link_policy(struct hci_dev *hdev)
        struct hci_cp_write_def_link_policy cp;
        u16 link_policy = 0;
 
-       if (hdev->features[0] & LMP_RSWITCH)
+       if (lmp_rswitch_capable(hdev))
                link_policy |= HCI_LP_RSWITCH;
        if (hdev->features[0] & LMP_HOLD)
                link_policy |= HCI_LP_HOLD;
-       if (hdev->features[0] & LMP_SNIFF)
+       if (lmp_sniff_capable(hdev))
                link_policy |= HCI_LP_SNIFF;
        if (hdev->features[1] & LMP_PARK)
                link_policy |= HCI_LP_PARK;
@@ -686,7 +687,7 @@ static void hci_cc_read_local_features(struct hci_dev *hdev,
                hdev->esco_type |= (ESCO_HV3);
        }
 
-       if (hdev->features[3] & LMP_ESCO)
+       if (lmp_esco_capable(hdev))
                hdev->esco_type |= (ESCO_EV3);
 
        if (hdev->features[4] & LMP_EV4)
@@ -746,7 +747,7 @@ static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
                break;
        }
 
-       if (test_bit(HCI_INIT, &hdev->flags) && hdev->features[4] & LMP_LE)
+       if (test_bit(HCI_INIT, &hdev->flags) && lmp_le_capable(hdev))
                hci_set_le_support(hdev);
 
 done:
@@ -925,7 +926,7 @@ static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
        if (test_bit(HCI_MGMT, &hdev->dev_flags))
                mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
 
-       if (rp->status != 0)
+       if (rp->status)
                goto unlock;
 
        cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
@@ -1625,43 +1626,30 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
 
 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
 {
-       struct hci_cp_le_create_conn *cp;
        struct hci_conn *conn;
 
        BT_DBG("%s status 0x%2.2x", hdev->name, status);
 
-       cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
-       if (!cp)
-               return;
+       if (status) {
+               hci_dev_lock(hdev);
 
-       hci_dev_lock(hdev);
+               conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+               if (!conn) {
+                       hci_dev_unlock(hdev);
+                       return;
+               }
 
-       conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
+               BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&conn->dst),
+                      conn);
 
-       BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
-              conn);
+               conn->state = BT_CLOSED;
+               mgmt_connect_failed(hdev, &conn->dst, conn->type,
+                                   conn->dst_type, status);
+               hci_proto_connect_cfm(conn, status);
+               hci_conn_del(conn);
 
-       if (status) {
-               if (conn && conn->state == BT_CONNECT) {
-                       conn->state = BT_CLOSED;
-                       mgmt_connect_failed(hdev, &cp->peer_addr, conn->type,
-                                           conn->dst_type, status);
-                       hci_proto_connect_cfm(conn, status);
-                       hci_conn_del(conn);
-               }
-       } else {
-               if (!conn) {
-                       conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
-                       if (conn) {
-                               conn->dst_type = cp->peer_addr_type;
-                               conn->out = true;
-                       } else {
-                               BT_ERR("No memory for new connection");
-                       }
-               }
+               hci_dev_unlock(hdev);
        }
-
-       hci_dev_unlock(hdev);
 }
 
 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
@@ -1904,6 +1892,22 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
        }
 }
 
+static u8 hci_to_mgmt_reason(u8 err)
+{
+       switch (err) {
+       case HCI_ERROR_CONNECTION_TIMEOUT:
+               return MGMT_DEV_DISCONN_TIMEOUT;
+       case HCI_ERROR_REMOTE_USER_TERM:
+       case HCI_ERROR_REMOTE_LOW_RESOURCES:
+       case HCI_ERROR_REMOTE_POWER_OFF:
+               return MGMT_DEV_DISCONN_REMOTE;
+       case HCI_ERROR_LOCAL_HOST_TERM:
+               return MGMT_DEV_DISCONN_LOCAL_HOST;
+       default:
+               return MGMT_DEV_DISCONN_UNKNOWN;
+       }
+}
+
 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_ev_disconn_complete *ev = (void *) skb->data;
@@ -1922,12 +1926,15 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
        if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
            (conn->type == ACL_LINK || conn->type == LE_LINK)) {
-               if (ev->status != 0)
+               if (ev->status) {
                        mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
                                               conn->dst_type, ev->status);
-               else
+               } else {
+                       u8 reason = hci_to_mgmt_reason(ev->reason);
+
                        mgmt_device_disconnected(hdev, &conn->dst, conn->type,
-                                                conn->dst_type);
+                                                conn->dst_type, reason);
+               }
        }
 
        if (ev->status == 0) {
@@ -3268,12 +3275,67 @@ static void hci_user_passkey_request_evt(struct hci_dev *hdev,
 
        BT_DBG("%s", hdev->name);
 
-       hci_dev_lock(hdev);
-
        if (test_bit(HCI_MGMT, &hdev->dev_flags))
                mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
+}
 
-       hci_dev_unlock(hdev);
+static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
+                                       struct sk_buff *skb)
+{
+       struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
+       struct hci_conn *conn;
+
+       BT_DBG("%s", hdev->name);
+
+       conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+       if (!conn)
+               return;
+
+       conn->passkey_notify = __le32_to_cpu(ev->passkey);
+       conn->passkey_entered = 0;
+
+       if (test_bit(HCI_MGMT, &hdev->dev_flags))
+               mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
+                                        conn->dst_type, conn->passkey_notify,
+                                        conn->passkey_entered);
+}
+
+static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hci_ev_keypress_notify *ev = (void *) skb->data;
+       struct hci_conn *conn;
+
+       BT_DBG("%s", hdev->name);
+
+       conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+       if (!conn)
+               return;
+
+       switch (ev->type) {
+       case HCI_KEYPRESS_STARTED:
+               conn->passkey_entered = 0;
+               return;
+
+       case HCI_KEYPRESS_ENTERED:
+               conn->passkey_entered++;
+               break;
+
+       case HCI_KEYPRESS_ERASED:
+               conn->passkey_entered--;
+               break;
+
+       case HCI_KEYPRESS_CLEARED:
+               conn->passkey_entered = 0;
+               break;
+
+       case HCI_KEYPRESS_COMPLETED:
+               return;
+       }
+
+       if (test_bit(HCI_MGMT, &hdev->dev_flags))
+               mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
+                                        conn->dst_type, conn->passkey_notify,
+                                        conn->passkey_entered);
 }
 
 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
@@ -3295,7 +3357,7 @@ static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
         * initiated the authentication. A traditional auth_complete
         * event gets always produced as initiator and is also mapped to
         * the mgmt_auth_failed event */
-       if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0)
+       if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
                mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
                                 ev->status);
 
@@ -3366,11 +3428,23 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
        hci_dev_lock(hdev);
 
-       if (ev->status) {
-               conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
-               if (!conn)
+       conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+       if (!conn) {
+               conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
+               if (!conn) {
+                       BT_ERR("No memory for new connection");
                        goto unlock;
+               }
+
+               conn->dst_type = ev->bdaddr_type;
 
+               if (ev->role == LE_CONN_ROLE_MASTER) {
+                       conn->out = true;
+                       conn->link_mode |= HCI_LM_MASTER;
+               }
+       }
+
+       if (ev->status) {
                mgmt_connect_failed(hdev, &conn->dst, conn->type,
                                    conn->dst_type, ev->status);
                hci_proto_connect_cfm(conn, ev->status);
@@ -3379,18 +3453,6 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                goto unlock;
        }
 
-       conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
-       if (!conn) {
-               conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
-               if (!conn) {
-                       BT_ERR("No memory for new connection");
-                       hci_dev_unlock(hdev);
-                       return;
-               }
-
-               conn->dst_type = ev->bdaddr_type;
-       }
-
        if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
                mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
                                      conn->dst_type, 0, NULL, 0, NULL);
@@ -3640,6 +3702,14 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
                hci_user_passkey_request_evt(hdev, skb);
                break;
 
+       case HCI_EV_USER_PASSKEY_NOTIFY:
+               hci_user_passkey_notify_evt(hdev, skb);
+               break;
+
+       case HCI_EV_KEYPRESS_NOTIFY:
+               hci_keypress_notify_evt(hdev, skb);
+               break;
+
        case HCI_EV_SIMPLE_PAIR_COMPLETE:
                hci_simple_pair_complete_evt(hdev, skb);
                break;
index d5ace1eda3ed8c3fd06f422bb7cbf340d3187c6c..07f073935811b86292136a59ecb544d323d3ea86 100644 (file)
@@ -1102,21 +1102,30 @@ int __init hci_sock_init(void)
                return err;
 
        err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
-       if (err < 0)
+       if (err < 0) {
+               BT_ERR("HCI socket registration failed");
                goto error;
+       }
+
+       err = bt_procfs_init(THIS_MODULE, &init_net, "hci", &hci_sk_list, NULL);
+       if (err < 0) {
+               BT_ERR("Failed to create HCI proc file");
+               bt_sock_unregister(BTPROTO_HCI);
+               goto error;
+       }
 
        BT_INFO("HCI socket layer initialized");
 
        return 0;
 
 error:
-       BT_ERR("HCI socket registration failed");
        proto_unregister(&hci_sk_proto);
        return err;
 }
 
 void hci_sock_cleanup(void)
 {
+       bt_procfs_cleanup(&init_net, "hci");
        if (bt_sock_unregister(BTPROTO_HCI) < 0)
                BT_ERR("HCI socket unregistration failed");
 
index b24fb3bd862555c81746fe664e7f90270cc6a2df..82a829d90b0f4a8f60013653bf0eed34237cb5b0 100644 (file)
 
 #include "hidp.h"
 
+static struct bt_sock_list hidp_sk_list = {
+       .lock = __RW_LOCK_UNLOCKED(hidp_sk_list.lock)
+};
+
 static int hidp_sock_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
@@ -34,6 +38,8 @@ static int hidp_sock_release(struct socket *sock)
        if (!sk)
                return 0;
 
+       bt_sock_unlink(&hidp_sk_list, sk);
+
        sock_orphan(sk);
        sock_put(sk);
 
@@ -253,6 +259,8 @@ static int hidp_sock_create(struct net *net, struct socket *sock, int protocol,
        sk->sk_protocol = protocol;
        sk->sk_state    = BT_OPEN;
 
+       bt_sock_link(&hidp_sk_list, sk);
+
        return 0;
 }
 
@@ -271,8 +279,19 @@ int __init hidp_init_sockets(void)
                return err;
 
        err = bt_sock_register(BTPROTO_HIDP, &hidp_sock_family_ops);
-       if (err < 0)
+       if (err < 0) {
+               BT_ERR("Can't register HIDP socket");
                goto error;
+       }
+
+       err = bt_procfs_init(THIS_MODULE, &init_net, "hidp", &hidp_sk_list, NULL);
+       if (err < 0) {
+               BT_ERR("Failed to create HIDP proc file");
+               bt_sock_unregister(BTPROTO_HIDP);
+               goto error;
+       }
+
+       BT_INFO("HIDP socket layer initialized");
 
        return 0;
 
@@ -284,6 +303,7 @@ error:
 
 void __exit hidp_cleanup_sockets(void)
 {
+       bt_procfs_cleanup(&init_net, "hidp");
        if (bt_sock_unregister(BTPROTO_HIDP) < 0)
                BT_ERR("Can't unregister HIDP socket");
 
index 38c00f142203505d3a3c809e8e6162f159ae0196..a91239dcda417f5a862346c981bb941aba44d086 100644 (file)
@@ -406,7 +406,7 @@ struct l2cap_chan *l2cap_chan_create(void)
 
        chan->state = BT_OPEN;
 
-       atomic_set(&chan->refcnt, 1);
+       kref_init(&chan->kref);
 
        /* This flag is cleared in l2cap_chan_ready() */
        set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
@@ -416,13 +416,31 @@ struct l2cap_chan *l2cap_chan_create(void)
        return chan;
 }
 
-void l2cap_chan_destroy(struct l2cap_chan *chan)
+static void l2cap_chan_destroy(struct kref *kref)
 {
+       struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
+
+       BT_DBG("chan %p", chan);
+
        write_lock(&chan_list_lock);
        list_del(&chan->global_l);
        write_unlock(&chan_list_lock);
 
-       l2cap_chan_put(chan);
+       kfree(chan);
+}
+
+void l2cap_chan_hold(struct l2cap_chan *c)
+{
+       BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
+
+       kref_get(&c->kref);
+}
+
+void l2cap_chan_put(struct l2cap_chan *c)
+{
+       BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
+
+       kref_put(&c->kref, l2cap_chan_destroy);
 }
 
 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
@@ -1431,7 +1449,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
        int err;
 
        BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
-              dst_type, __le16_to_cpu(chan->psm));
+              dst_type, __le16_to_cpu(psm));
 
        hdev = hci_get_route(dst, src);
        if (!hdev)
@@ -5331,7 +5349,7 @@ int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
        return exact ? lm1 : lm2;
 }
 
-int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
+void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
 {
        struct l2cap_conn *conn;
 
@@ -5344,7 +5362,6 @@ int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
        } else
                l2cap_conn_del(hcon, bt_to_errno(status));
 
-       return 0;
 }
 
 int l2cap_disconn_ind(struct hci_conn *hcon)
@@ -5358,12 +5375,11 @@ int l2cap_disconn_ind(struct hci_conn *hcon)
        return conn->disc_reason;
 }
 
-int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
+void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
 {
        BT_DBG("hcon %p reason %d", hcon, reason);
 
        l2cap_conn_del(hcon, bt_to_errno(reason));
-       return 0;
 }
 
 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
@@ -5406,6 +5422,11 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
                BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
                       state_to_string(chan->state));
 
+               if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
+                       l2cap_chan_unlock(chan);
+                       continue;
+               }
+
                if (chan->scid == L2CAP_CID_LE_DATA) {
                        if (!status && encrypt) {
                                chan->sec_level = hcon->sec_level;
index 34bbe1c5e389500f080e15b30c194e95ea36f189..083f2bf065d4d788e59702d29b71b39aaa7bd688 100644 (file)
 #include <net/bluetooth/l2cap.h>
 #include <net/bluetooth/smp.h>
 
+static struct bt_sock_list l2cap_sk_list = {
+       .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
+};
+
 static const struct proto_ops l2cap_sock_ops;
 static void l2cap_sock_init(struct sock *sk, struct sock *parent);
 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio);
@@ -824,7 +828,7 @@ static void l2cap_sock_kill(struct sock *sk)
 
        /* Kill poor orphan */
 
-       l2cap_chan_destroy(l2cap_pi(sk)->chan);
+       l2cap_chan_put(l2cap_pi(sk)->chan);
        sock_set_flag(sk, SOCK_DEAD);
        sock_put(sk);
 }
@@ -887,6 +891,8 @@ static int l2cap_sock_release(struct socket *sock)
        if (!sk)
                return 0;
 
+       bt_sock_unlink(&l2cap_sk_list, sk);
+
        err = l2cap_sock_shutdown(sock, 2);
 
        sock_orphan(sk);
@@ -1211,6 +1217,7 @@ static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
                return -ENOMEM;
 
        l2cap_sock_init(sk, NULL);
+       bt_sock_link(&l2cap_sk_list, sk);
        return 0;
 }
 
@@ -1249,21 +1256,30 @@ int __init l2cap_init_sockets(void)
                return err;
 
        err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
-       if (err < 0)
+       if (err < 0) {
+               BT_ERR("L2CAP socket registration failed");
                goto error;
+       }
+
+       err = bt_procfs_init(THIS_MODULE, &init_net, "l2cap", &l2cap_sk_list, NULL);
+       if (err < 0) {
+               BT_ERR("Failed to create L2CAP proc file");
+               bt_sock_unregister(BTPROTO_L2CAP);
+               goto error;
+       }
 
        BT_INFO("L2CAP socket layer initialized");
 
        return 0;
 
 error:
-       BT_ERR("L2CAP socket registration failed");
        proto_unregister(&l2cap_proto);
        return err;
 }
 
 void l2cap_cleanup_sockets(void)
 {
+       bt_procfs_cleanup(&init_net, "l2cap");
        if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
                BT_ERR("L2CAP socket unregistration failed");
 
index eba022de3c205bb55f2ed9639e4faf21d78ae9a8..aa2ea0a8142cc0d6c7378ced06be1257b2f846d7 100644 (file)
@@ -35,7 +35,7 @@
 bool enable_hs;
 
 #define MGMT_VERSION   1
-#define MGMT_REVISION  1
+#define MGMT_REVISION  2
 
 static const u16 mgmt_commands[] = {
        MGMT_OP_READ_INDEX_LIST,
@@ -99,6 +99,7 @@ static const u16 mgmt_events[] = {
        MGMT_EV_DEVICE_BLOCKED,
        MGMT_EV_DEVICE_UNBLOCKED,
        MGMT_EV_DEVICE_UNPAIRED,
+       MGMT_EV_PASSKEY_NOTIFY,
 };
 
 /*
@@ -193,6 +194,11 @@ static u8 mgmt_status_table[] = {
        MGMT_STATUS_CONNECT_FAILED,     /* MAC Connection Failed */
 };
 
+bool mgmt_valid_hdev(struct hci_dev *hdev)
+{
+       return hdev->dev_type == HCI_BREDR;
+}
+
 static u8 mgmt_status(u8 hci_status)
 {
        if (hci_status < ARRAY_SIZE(mgmt_status_table))
@@ -317,7 +323,6 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
                           u16 data_len)
 {
        struct mgmt_rp_read_index_list *rp;
-       struct list_head *p;
        struct hci_dev *d;
        size_t rp_len;
        u16 count;
@@ -328,7 +333,10 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
        read_lock(&hci_dev_list_lock);
 
        count = 0;
-       list_for_each(p, &hci_dev_list) {
+       list_for_each_entry(d, &hci_dev_list, list) {
+               if (!mgmt_valid_hdev(d))
+                       continue;
+
                count++;
        }
 
@@ -346,6 +354,9 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
                if (test_bit(HCI_SETUP, &d->dev_flags))
                        continue;
 
+               if (!mgmt_valid_hdev(d))
+                       continue;
+
                rp->index[i++] = cpu_to_le16(d->id);
                BT_DBG("Added hci%u", d->id);
        }
@@ -370,10 +381,10 @@ static u32 get_supported_settings(struct hci_dev *hdev)
        settings |= MGMT_SETTING_DISCOVERABLE;
        settings |= MGMT_SETTING_PAIRABLE;
 
-       if (hdev->features[6] & LMP_SIMPLE_PAIR)
+       if (lmp_ssp_capable(hdev))
                settings |= MGMT_SETTING_SSP;
 
-       if (!(hdev->features[4] & LMP_NO_BREDR)) {
+       if (lmp_bredr_capable(hdev)) {
                settings |= MGMT_SETTING_BREDR;
                settings |= MGMT_SETTING_LINK_SECURITY;
        }
@@ -381,7 +392,7 @@ static u32 get_supported_settings(struct hci_dev *hdev)
        if (enable_hs)
                settings |= MGMT_SETTING_HS;
 
-       if (hdev->features[4] & LMP_LE)
+       if (lmp_le_capable(hdev))
                settings |= MGMT_SETTING_LE;
 
        return settings;
@@ -403,7 +414,7 @@ static u32 get_current_settings(struct hci_dev *hdev)
        if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
                settings |= MGMT_SETTING_PAIRABLE;
 
-       if (!(hdev->features[4] & LMP_NO_BREDR))
+       if (lmp_bredr_capable(hdev))
                settings |= MGMT_SETTING_BREDR;
 
        if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
@@ -1111,7 +1122,7 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
 
        hci_dev_lock(hdev);
 
-       if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) {
+       if (!lmp_ssp_capable(hdev)) {
                err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
                                 MGMT_STATUS_NOT_SUPPORTED);
                goto failed;
@@ -1195,7 +1206,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
 
        hci_dev_lock(hdev);
 
-       if (!(hdev->features[4] & LMP_LE)) {
+       if (!lmp_le_capable(hdev)) {
                err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
                                 MGMT_STATUS_NOT_SUPPORTED);
                goto unlock;
@@ -2191,7 +2202,7 @@ static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
                goto unlock;
        }
 
-       if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) {
+       if (!lmp_ssp_capable(hdev)) {
                err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
                                 MGMT_STATUS_NOT_SUPPORTED);
                goto unlock;
@@ -2820,6 +2831,9 @@ static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
 
 int mgmt_index_added(struct hci_dev *hdev)
 {
+       if (!mgmt_valid_hdev(hdev))
+               return -ENOTSUPP;
+
        return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
 }
 
@@ -2827,6 +2841,9 @@ int mgmt_index_removed(struct hci_dev *hdev)
 {
        u8 status = MGMT_STATUS_INVALID_INDEX;
 
+       if (!mgmt_valid_hdev(hdev))
+               return -ENOTSUPP;
+
        mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
 
        return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
@@ -3077,16 +3094,17 @@ static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
 }
 
 int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
-                            u8 link_type, u8 addr_type)
+                            u8 link_type, u8 addr_type, u8 reason)
 {
-       struct mgmt_addr_info ev;
+       struct mgmt_ev_device_disconnected ev;
        struct sock *sk = NULL;
        int err;
 
        mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
 
-       bacpy(&ev.bdaddr, bdaddr);
-       ev.type = link_to_bdaddr(link_type, addr_type);
+       bacpy(&ev.addr.bdaddr, bdaddr);
+       ev.addr.type = link_to_bdaddr(link_type, addr_type);
+       ev.reason = reason;
 
        err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
                         sk);
@@ -3275,6 +3293,22 @@ int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
                                          MGMT_OP_USER_PASSKEY_NEG_REPLY);
 }
 
+int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                            u8 link_type, u8 addr_type, u32 passkey,
+                            u8 entered)
+{
+       struct mgmt_ev_passkey_notify ev;
+
+       BT_DBG("%s", hdev->name);
+
+       bacpy(&ev.addr.bdaddr, bdaddr);
+       ev.addr.type = link_to_bdaddr(link_type, addr_type);
+       ev.passkey = __cpu_to_le32(passkey);
+       ev.entered = entered;
+
+       return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
+}
+
 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                     u8 addr_type, u8 status)
 {
index 1a17850d093cd652621ac54833c53b8dabd395bc..b3226f3658cfda1142c7484b110324115dccc85b 100644 (file)
@@ -1035,8 +1035,17 @@ int __init rfcomm_init_sockets(void)
                return err;
 
        err = bt_sock_register(BTPROTO_RFCOMM, &rfcomm_sock_family_ops);
-       if (err < 0)
+       if (err < 0) {
+               BT_ERR("RFCOMM socket layer registration failed");
+               goto error;
+       }
+
+       err = bt_procfs_init(THIS_MODULE, &init_net, "rfcomm", &rfcomm_sk_list, NULL);
+       if (err < 0) {
+               BT_ERR("Failed to create RFCOMM proc file");
+               bt_sock_unregister(BTPROTO_RFCOMM);
                goto error;
+       }
 
        if (bt_debugfs) {
                rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444,
@@ -1050,13 +1059,14 @@ int __init rfcomm_init_sockets(void)
        return 0;
 
 error:
-       BT_ERR("RFCOMM socket layer registration failed");
        proto_unregister(&rfcomm_proto);
        return err;
 }
 
 void __exit rfcomm_cleanup_sockets(void)
 {
+       bt_procfs_cleanup(&init_net, "rfcomm");
+
        debugfs_remove(rfcomm_sock_debugfs);
 
        if (bt_sock_unregister(BTPROTO_RFCOMM) < 0)
index 3589e21edb09817bace527336fd4880e24137ada..dc42b917aaafad3f050177694c438648af2b22bb 100644 (file)
@@ -912,7 +912,7 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
        return lm;
 }
 
-int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
+void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
 {
        BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
        if (!status) {
@@ -923,16 +923,13 @@ int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
                        sco_conn_ready(conn);
        } else
                sco_conn_del(hcon, bt_to_errno(status));
-
-       return 0;
 }
 
-int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
+void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
 {
        BT_DBG("hcon %p reason %d", hcon, reason);
 
        sco_conn_del(hcon, bt_to_errno(reason));
-       return 0;
 }
 
 int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
@@ -1025,6 +1022,13 @@ int __init sco_init(void)
                goto error;
        }
 
+       err = bt_procfs_init(THIS_MODULE, &init_net, "sco", &sco_sk_list, NULL);
+       if (err < 0) {
+               BT_ERR("Failed to create SCO proc file");
+               bt_sock_unregister(BTPROTO_SCO);
+               goto error;
+       }
+
        if (bt_debugfs) {
                sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs,
                                                  NULL, &sco_debugfs_fops);
@@ -1043,6 +1047,8 @@ error:
 
 void __exit sco_exit(void)
 {
+       bt_procfs_cleanup(&init_net, "sco");
+
        debugfs_remove(sco_debugfs);
 
        if (bt_sock_unregister(BTPROTO_SCO) < 0)
index d21f32383517f6b66a3b0c43ea534beb3840307c..d9576e6de2b85c232c1dcde0e54469bac3397212 100644 (file)
@@ -312,7 +312,7 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
 
                        fe->is_local = f->is_local;
                        if (!f->is_static)
-                               fe->ageing_timer_value = jiffies_to_clock_t(jiffies - f->updated);
+                               fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated);
                        ++fe;
                        ++num;
                }
@@ -467,14 +467,14 @@ static int fdb_to_nud(const struct net_bridge_fdb_entry *fdb)
 
 static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
                         const struct net_bridge_fdb_entry *fdb,
-                        u32 pid, u32 seq, int type, unsigned int flags)
+                        u32 portid, u32 seq, int type, unsigned int flags)
 {
        unsigned long now = jiffies;
        struct nda_cacheinfo ci;
        struct nlmsghdr *nlh;
        struct ndmsg *ndm;
 
-       nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
+       nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -555,7 +555,7 @@ int br_fdb_dump(struct sk_buff *skb,
                                goto skip;
 
                        if (fdb_fill_info(skb, br, f,
-                                         NETLINK_CB(cb->skb).pid,
+                                         NETLINK_CB(cb->skb).portid,
                                          cb->nlh->nlmsg_seq,
                                          RTM_NEWNEIGH,
                                          NLM_F_MULTI) < 0)
@@ -608,8 +608,9 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
 }
 
 /* Add new permanent fdb entry with RTM_NEWNEIGH */
-int br_fdb_add(struct ndmsg *ndm, struct net_device *dev,
-              unsigned char *addr, u16 nlh_flags)
+int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+              struct net_device *dev,
+              const unsigned char *addr, u16 nlh_flags)
 {
        struct net_bridge_port *p;
        int err = 0;
@@ -639,7 +640,7 @@ int br_fdb_add(struct ndmsg *ndm, struct net_device *dev,
        return err;
 }
 
-static int fdb_delete_by_addr(struct net_bridge_port *p, u8 *addr)
+static int fdb_delete_by_addr(struct net_bridge_port *p, const u8 *addr)
 {
        struct net_bridge *br = p->br;
        struct hlist_head *head = &br->hash[br_mac_hash(addr)];
@@ -655,7 +656,7 @@ static int fdb_delete_by_addr(struct net_bridge_port *p, u8 *addr)
 
 /* Remove neighbor entry with RTM_DELNEIGH */
 int br_fdb_delete(struct ndmsg *ndm, struct net_device *dev,
-                 unsigned char *addr)
+                 const unsigned char *addr)
 {
        struct net_bridge_port *p;
        int err;
index fe41260fbf38b28bb121dcc2235a5d27b83e27b7..093f527276a39c097de23cea8ab7e4016df438cc 100644 (file)
@@ -127,7 +127,7 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
                        goto skip;
 
                if (br_fill_ifinfo(skb, port,
-                                  NETLINK_CB(cb->skb).pid,
+                                  NETLINK_CB(cb->skb).portid,
                                   cb->nlh->nlmsg_seq, RTM_NEWLINK,
                                   NLM_F_MULTI) < 0)
                        break;
index f507d2af9646bcb273cf1f50f98feb065027e14a..9b278c4ebee10efb45ee07f480f749e609ca891e 100644 (file)
@@ -363,10 +363,10 @@ extern void br_fdb_update(struct net_bridge *br,
 
 extern int br_fdb_delete(struct ndmsg *ndm,
                         struct net_device *dev,
-                        unsigned char *addr);
-extern int br_fdb_add(struct ndmsg *nlh,
+                        const unsigned char *addr);
+extern int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[],
                      struct net_device *dev,
-                     unsigned char *addr,
+                     const unsigned char *addr,
                      u16 nlh_flags);
 extern int br_fdb_dump(struct sk_buff *skb,
                       struct netlink_callback *cb,
index a6747e673426e3f29c734f11596c1be328e7b558..c3530a81a33bf40c9162cb28a6f931ea937baf54 100644 (file)
@@ -170,5 +170,5 @@ void br_stp_port_timer_init(struct net_bridge_port *p)
 unsigned long br_timer_value(const struct timer_list *timer)
 {
        return timer_pending(timer)
-               ? jiffies_to_clock_t(timer->expires - jiffies) : 0;
+               ? jiffies_delta_to_clock_t(timer->expires - jiffies) : 0;
 }
index 19063473c71f2efc897e2ee3a51ea023052f15e6..3476ec469740d6829deed8089888353d7934d4a5 100644 (file)
@@ -298,8 +298,7 @@ static int __init ebt_ulog_init(void)
                spin_lock_init(&ulog_buffers[i].lock);
        }
 
-       ebtulognl = netlink_kernel_create(&init_net, NETLINK_NFLOG,
-                                         THIS_MODULE, &cfg);
+       ebtulognl = netlink_kernel_create(&init_net, NETLINK_NFLOG, &cfg);
        if (!ebtulognl)
                ret = -ENOMEM;
        else if ((ret = xt_register_target(&ebt_ulog_tg_reg)) != 0)
index 42e6bd0945745f99ae2cb718b55efed2410f704b..3c2e9dced9e0afd8a5ed3522357eb0faf63e8289 100644 (file)
@@ -100,9 +100,7 @@ static struct nf_hook_ops ebt_ops_filter[] __read_mostly = {
 static int __net_init frame_filter_net_init(struct net *net)
 {
        net->xt.frame_filter = ebt_register_table(net, &frame_filter);
-       if (IS_ERR(net->xt.frame_filter))
-               return PTR_ERR(net->xt.frame_filter);
-       return 0;
+       return PTR_RET(net->xt.frame_filter);
 }
 
 static void __net_exit frame_filter_net_exit(struct net *net)
index 6dc2f878ae0533a58455f3b2b76f49681fb42f5f..10871bc77908ec8799a482636995771fd47a6b03 100644 (file)
@@ -100,9 +100,7 @@ static struct nf_hook_ops ebt_ops_nat[] __read_mostly = {
 static int __net_init frame_nat_net_init(struct net *net)
 {
        net->xt.frame_nat = ebt_register_table(net, &frame_nat);
-       if (IS_ERR(net->xt.frame_nat))
-               return PTR_ERR(net->xt.frame_nat);
-       return 0;
+       return PTR_RET(net->xt.frame_nat);
 }
 
 static void __net_exit frame_nat_net_exit(struct net *net)
index b54d5e695b034b8abbd3c0dc083fc74c5d1cf15e..127879c55fb66f9d3e6698def42013c1561ab58b 100644 (file)
@@ -549,7 +549,7 @@ static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb)
                if (idx < s_idx)
                        goto cont;
 
-               if (cgw_put_job(skb, gwj, RTM_NEWROUTE, NETLINK_CB(cb->skb).pid,
+               if (cgw_put_job(skb, gwj, RTM_NEWROUTE, NETLINK_CB(cb->skb).portid,
                    cb->nlh->nlmsg_seq, NLM_F_MULTI) < 0)
                        break;
 cont:
index 17e912f9b71110fe18df0e8e236536c82de98e5c..1e0a1847c3bbee7fd5aeb8654bf0d9da08ce5ca9 100644 (file)
@@ -959,18 +959,30 @@ int dev_alloc_name(struct net_device *dev, const char *name)
 }
 EXPORT_SYMBOL(dev_alloc_name);
 
-static int dev_get_valid_name(struct net_device *dev, const char *name)
+static int dev_alloc_name_ns(struct net *net,
+                            struct net_device *dev,
+                            const char *name)
 {
-       struct net *net;
+       char buf[IFNAMSIZ];
+       int ret;
 
-       BUG_ON(!dev_net(dev));
-       net = dev_net(dev);
+       ret = __dev_alloc_name(net, name, buf);
+       if (ret >= 0)
+               strlcpy(dev->name, buf, IFNAMSIZ);
+       return ret;
+}
+
+static int dev_get_valid_name(struct net *net,
+                             struct net_device *dev,
+                             const char *name)
+{
+       BUG_ON(!net);
 
        if (!dev_valid_name(name))
                return -EINVAL;
 
        if (strchr(name, '%'))
-               return dev_alloc_name(dev, name);
+               return dev_alloc_name_ns(net, dev, name);
        else if (__dev_get_by_name(net, name))
                return -EEXIST;
        else if (dev->name != name)
@@ -1006,7 +1018,7 @@ int dev_change_name(struct net_device *dev, const char *newname)
 
        memcpy(oldname, dev->name, IFNAMSIZ);
 
-       err = dev_get_valid_name(dev, newname);
+       err = dev_get_valid_name(net, dev, newname);
        if (err < 0)
                return err;
 
@@ -1109,11 +1121,23 @@ void netdev_state_change(struct net_device *dev)
 }
 EXPORT_SYMBOL(netdev_state_change);
 
-int netdev_bonding_change(struct net_device *dev, unsigned long event)
+/**
+ *     netdev_notify_peers - notify network peers about existence of @dev
+ *     @dev: network device
+ *
+ * Generate traffic such that interested network peers are aware of
+ * @dev, such as by generating a gratuitous ARP. This may be used when
+ * a device wants to inform the rest of the network about some sort of
+ * reconfiguration such as a failover event or virtual machine
+ * migration.
+ */
+void netdev_notify_peers(struct net_device *dev)
 {
-       return call_netdevice_notifiers(event, dev);
+       rtnl_lock();
+       call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
+       rtnl_unlock();
 }
-EXPORT_SYMBOL(netdev_bonding_change);
+EXPORT_SYMBOL(netdev_notify_peers);
 
 /**
  *     dev_load        - load a network module
@@ -1394,7 +1418,6 @@ rollback:
                                nb->notifier_call(nb, NETDEV_DOWN, dev);
                        }
                        nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
-                       nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
                }
        }
 
@@ -1436,7 +1459,6 @@ int unregister_netdevice_notifier(struct notifier_block *nb)
                                nb->notifier_call(nb, NETDEV_DOWN, dev);
                        }
                        nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
-                       nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
                }
        }
 unlock:
@@ -2175,9 +2197,7 @@ EXPORT_SYMBOL(netif_skb_features);
 /*
  * Returns true if either:
  *     1. skb has frag_list and the device doesn't support FRAGLIST, or
- *     2. skb is fragmented and the device does not support SG, or if
- *        at least one of fragments is in highmem and device does not
- *        support DMA from it.
+ *     2. skb is fragmented and the device does not support SG.
  */
 static inline int skb_needs_linearize(struct sk_buff *skb,
                                      int features)
@@ -2206,9 +2226,6 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
                        skb_dst_drop(skb);
 
-               if (!list_empty(&ptype_all))
-                       dev_queue_xmit_nit(skb, dev);
-
                features = netif_skb_features(skb);
 
                if (vlan_tx_tag_present(skb) &&
@@ -2243,6 +2260,9 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                        }
                }
 
+               if (!list_empty(&ptype_all))
+                       dev_queue_xmit_nit(skb, dev);
+
                skb_len = skb->len;
                rc = ops->ndo_start_xmit(skb, dev);
                trace_net_dev_xmit(skb, rc, dev, skb_len);
@@ -2265,6 +2285,9 @@ gso:
                if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
                        skb_dst_drop(nskb);
 
+               if (!list_empty(&ptype_all))
+                       dev_queue_xmit_nit(nskb, dev);
+
                skb_len = nskb->len;
                rc = ops->ndo_start_xmit(nskb, dev);
                trace_net_dev_xmit(nskb, rc, dev, skb_len);
@@ -2374,8 +2397,8 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
 #endif
 }
 
-static struct netdev_queue *dev_pick_tx(struct net_device *dev,
-                                       struct sk_buff *skb)
+struct netdev_queue *netdev_pick_tx(struct net_device *dev,
+                                   struct sk_buff *skb)
 {
        int queue_index;
        const struct net_device_ops *ops = dev->netdev_ops;
@@ -2549,7 +2572,7 @@ int dev_queue_xmit(struct sk_buff *skb)
 
        skb_update_prio(skb);
 
-       txq = dev_pick_tx(dev, skb);
+       txq = netdev_pick_tx(dev, skb);
        q = rcu_dereference_bh(txq->qdisc);
 
 #ifdef CONFIG_NET_CLS_ACT
@@ -2622,6 +2645,8 @@ EXPORT_SYMBOL(dev_queue_xmit);
   =======================================================================*/
 
 int netdev_max_backlog __read_mostly = 1000;
+EXPORT_SYMBOL(netdev_max_backlog);
+
 int netdev_tstamp_prequeue __read_mostly = 1;
 int netdev_budget __read_mostly = 300;
 int weight_p __read_mostly = 64;            /* old backlog weight */
@@ -5239,12 +5264,12 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
  */
 static int dev_new_index(struct net *net)
 {
-       static int ifindex;
+       int ifindex = net->ifindex;
        for (;;) {
                if (++ifindex <= 0)
                        ifindex = 1;
                if (!__dev_get_by_index(net, ifindex))
-                       return ifindex;
+                       return net->ifindex = ifindex;
        }
 }
 
@@ -5322,10 +5347,6 @@ static void rollback_registered_many(struct list_head *head)
                netdev_unregister_kobject(dev);
        }
 
-       /* Process any work delayed until the end of the batch */
-       dev = list_first_entry(head, struct net_device, unreg_list);
-       call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
-
        synchronize_net();
 
        list_for_each_entry(dev, head, unreg_list)
@@ -5583,7 +5604,7 @@ int register_netdevice(struct net_device *dev)
 
        dev->iflink = -1;
 
-       ret = dev_get_valid_name(dev, dev->name);
+       ret = dev_get_valid_name(net, dev, dev->name);
        if (ret < 0)
                goto out;
 
@@ -5597,7 +5618,12 @@ int register_netdevice(struct net_device *dev)
                }
        }
 
-       dev->ifindex = dev_new_index(net);
+       ret = -EBUSY;
+       if (!dev->ifindex)
+               dev->ifindex = dev_new_index(net);
+       else if (__dev_get_by_index(net, dev->ifindex))
+               goto err_uninit;
+
        if (dev->iflink == -1)
                dev->iflink = dev->ifindex;
 
@@ -5640,6 +5666,8 @@ int register_netdevice(struct net_device *dev)
 
        set_bit(__LINK_STATE_PRESENT, &dev->state);
 
+       linkwatch_init_dev(dev);
+
        dev_init_scheduler(dev);
        dev_hold(dev);
        list_netdevice(dev);
@@ -5773,9 +5801,12 @@ static void netdev_wait_allrefs(struct net_device *dev)
 
                        /* Rebroadcast unregister notification */
                        call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
-                       /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
-                        * should have already handle it the first time */
 
+                       __rtnl_unlock();
+                       rcu_barrier();
+                       rtnl_lock();
+
+                       call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
                        if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
                                     &dev->state)) {
                                /* We must not have linkwatch events
@@ -5837,9 +5868,8 @@ void netdev_run_todo(void)
 
        __rtnl_unlock();
 
-       /* Wait for rcu callbacks to finish before attempting to drain
-        * the device list.  This usually avoids a 250ms wait.
-        */
+
+       /* Wait for rcu callbacks to finish before next phase */
        if (!list_empty(&list))
                rcu_barrier();
 
@@ -5848,6 +5878,10 @@ void netdev_run_todo(void)
                        = list_first_entry(&list, struct net_device, todo_list);
                list_del(&dev->todo_list);
 
+               rtnl_lock();
+               call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
+               __rtnl_unlock();
+
                if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
                        pr_err("network todo '%s' but state %d\n",
                               dev->name, dev->reg_state);
@@ -5943,6 +5977,8 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
        return queue;
 }
 
+static const struct ethtool_ops default_ethtool_ops;
+
 /**
  *     alloc_netdev_mqs - allocate network device
  *     @sizeof_priv:   size of private data to allocate space for
@@ -6030,6 +6066,8 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
 
        strcpy(dev->name, name);
        dev->group = INIT_NETDEV_GROUP;
+       if (!dev->ethtool_ops)
+               dev->ethtool_ops = &default_ethtool_ops;
        return dev;
 
 free_all:
@@ -6214,7 +6252,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
                /* We get here if we can't use the current device name */
                if (!pat)
                        goto out;
-               if (dev_get_valid_name(dev, pat) < 0)
+               if (dev_get_valid_name(net, dev, pat) < 0)
                        goto out;
        }
 
@@ -6242,7 +6280,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
           the device is just moving and can keep their slaves up.
        */
        call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
-       call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
+       rcu_barrier();
+       call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
        rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
 
        /*
index c4cc2bc49f06d4041fcbe1cbf3759933ed31c1f4..87cc17db2d566e5846601d6eb03ba38ce64b2ddb 100644 (file)
@@ -22,7 +22,7 @@
  */
 
 static int __hw_addr_create_ex(struct netdev_hw_addr_list *list,
-                              unsigned char *addr, int addr_len,
+                              const unsigned char *addr, int addr_len,
                               unsigned char addr_type, bool global)
 {
        struct netdev_hw_addr *ha;
@@ -46,7 +46,7 @@ static int __hw_addr_create_ex(struct netdev_hw_addr_list *list,
 }
 
 static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
-                           unsigned char *addr, int addr_len,
+                           const unsigned char *addr, int addr_len,
                            unsigned char addr_type, bool global)
 {
        struct netdev_hw_addr *ha;
@@ -72,14 +72,15 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
        return __hw_addr_create_ex(list, addr, addr_len, addr_type, global);
 }
 
-static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
-                        int addr_len, unsigned char addr_type)
+static int __hw_addr_add(struct netdev_hw_addr_list *list,
+                        const unsigned char *addr, int addr_len,
+                        unsigned char addr_type)
 {
        return __hw_addr_add_ex(list, addr, addr_len, addr_type, false);
 }
 
 static int __hw_addr_del_ex(struct netdev_hw_addr_list *list,
-                           unsigned char *addr, int addr_len,
+                           const unsigned char *addr, int addr_len,
                            unsigned char addr_type, bool global)
 {
        struct netdev_hw_addr *ha;
@@ -104,8 +105,9 @@ static int __hw_addr_del_ex(struct netdev_hw_addr_list *list,
        return -ENOENT;
 }
 
-static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
-                        int addr_len, unsigned char addr_type)
+static int __hw_addr_del(struct netdev_hw_addr_list *list,
+                        const unsigned char *addr, int addr_len,
+                        unsigned char addr_type)
 {
        return __hw_addr_del_ex(list, addr, addr_len, addr_type, false);
 }
@@ -278,7 +280,7 @@ EXPORT_SYMBOL(dev_addr_init);
  *
  *     The caller must hold the rtnl_mutex.
  */
-int dev_addr_add(struct net_device *dev, unsigned char *addr,
+int dev_addr_add(struct net_device *dev, const unsigned char *addr,
                 unsigned char addr_type)
 {
        int err;
@@ -303,7 +305,7 @@ EXPORT_SYMBOL(dev_addr_add);
  *
  *     The caller must hold the rtnl_mutex.
  */
-int dev_addr_del(struct net_device *dev, unsigned char *addr,
+int dev_addr_del(struct net_device *dev, const unsigned char *addr,
                 unsigned char addr_type)
 {
        int err;
@@ -390,7 +392,7 @@ EXPORT_SYMBOL(dev_addr_del_multiple);
  *     @dev: device
  *     @addr: address to add
  */
-int dev_uc_add_excl(struct net_device *dev, unsigned char *addr)
+int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr)
 {
        struct netdev_hw_addr *ha;
        int err;
@@ -421,7 +423,7 @@ EXPORT_SYMBOL(dev_uc_add_excl);
  *     Add a secondary unicast address to the device or increase
  *     the reference count if it already exists.
  */
-int dev_uc_add(struct net_device *dev, unsigned char *addr)
+int dev_uc_add(struct net_device *dev, const unsigned char *addr)
 {
        int err;
 
@@ -443,7 +445,7 @@ EXPORT_SYMBOL(dev_uc_add);
  *     Release reference to a secondary unicast address and remove it
  *     from the device if the reference count drops to zero.
  */
-int dev_uc_del(struct net_device *dev, unsigned char *addr)
+int dev_uc_del(struct net_device *dev, const unsigned char *addr)
 {
        int err;
 
@@ -543,7 +545,7 @@ EXPORT_SYMBOL(dev_uc_init);
  *     @dev: device
  *     @addr: address to add
  */
-int dev_mc_add_excl(struct net_device *dev, unsigned char *addr)
+int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr)
 {
        struct netdev_hw_addr *ha;
        int err;
@@ -566,7 +568,7 @@ out:
 }
 EXPORT_SYMBOL(dev_mc_add_excl);
 
-static int __dev_mc_add(struct net_device *dev, unsigned char *addr,
+static int __dev_mc_add(struct net_device *dev, const unsigned char *addr,
                        bool global)
 {
        int err;
@@ -587,7 +589,7 @@ static int __dev_mc_add(struct net_device *dev, unsigned char *addr,
  *     Add a multicast address to the device or increase
  *     the reference count if it already exists.
  */
-int dev_mc_add(struct net_device *dev, unsigned char *addr)
+int dev_mc_add(struct net_device *dev, const unsigned char *addr)
 {
        return __dev_mc_add(dev, addr, false);
 }
@@ -600,13 +602,13 @@ EXPORT_SYMBOL(dev_mc_add);
  *
  *     Add a global multicast address to the device.
  */
-int dev_mc_add_global(struct net_device *dev, unsigned char *addr)
+int dev_mc_add_global(struct net_device *dev, const unsigned char *addr)
 {
        return __dev_mc_add(dev, addr, true);
 }
 EXPORT_SYMBOL(dev_mc_add_global);
 
-static int __dev_mc_del(struct net_device *dev, unsigned char *addr,
+static int __dev_mc_del(struct net_device *dev, const unsigned char *addr,
                        bool global)
 {
        int err;
@@ -628,7 +630,7 @@ static int __dev_mc_del(struct net_device *dev, unsigned char *addr,
  *     Release reference to a multicast address and remove it
  *     from the device if the reference count drops to zero.
  */
-int dev_mc_del(struct net_device *dev, unsigned char *addr)
+int dev_mc_del(struct net_device *dev, const unsigned char *addr)
 {
        return __dev_mc_del(dev, addr, false);
 }
@@ -642,7 +644,7 @@ EXPORT_SYMBOL(dev_mc_del);
  *     Release reference to a multicast address and remove it
  *     from the device if the reference count drops to zero.
  */
-int dev_mc_del_global(struct net_device *dev, unsigned char *addr)
+int dev_mc_del_global(struct net_device *dev, const unsigned char *addr)
 {
        return __dev_mc_del(dev, addr, true);
 }
index b8d7c700541d71488e9e3bd5553c9fcedab7f910..ee6153e2cf43b83abb51e96620d482e820560ca9 100644 (file)
@@ -374,7 +374,7 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event,
        struct dst_entry *dst, *last = NULL;
 
        switch (event) {
-       case NETDEV_UNREGISTER:
+       case NETDEV_UNREGISTER_FINAL:
        case NETDEV_DOWN:
                mutex_lock(&dst_gc_mutex);
                for (dst = dst_busy_list; dst; dst = dst->next) {
index cbf033dcaf1feb8b2cfc2610dad581e89bcdb061..4d64cc2e3fa9bf1246ea3504f582616f98060bf1 100644 (file)
@@ -1426,18 +1426,6 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
        if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
                return -EFAULT;
 
-       if (!dev->ethtool_ops) {
-               /* A few commands do not require any driver support,
-                * are unprivileged, and do not change anything, so we
-                * can take a shortcut to them. */
-               if (ethcmd == ETHTOOL_GDRVINFO)
-                       return ethtool_get_drvinfo(dev, useraddr);
-               else if (ethcmd == ETHTOOL_GET_TS_INFO)
-                       return ethtool_get_ts_info(dev, useraddr);
-               else
-                       return -EOPNOTSUPP;
-       }
-
        /* Allow some commands to be done by anyone */
        switch (ethcmd) {
        case ETHTOOL_GSET:
index ab7db83236c96fa2b4746ffc25717f24db671e17..58a4ba27dfe3117d439ada122000e1339a23f48f 100644 (file)
@@ -402,7 +402,7 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
        if (unresolved)
                ops->unresolved_rules++;
 
-       notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid);
+       notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid);
        flush_route_cache(ops);
        rules_ops_put(ops);
        return 0;
@@ -500,7 +500,7 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
                }
 
                notify_rule_change(RTM_DELRULE, rule, ops, nlh,
-                                  NETLINK_CB(skb).pid);
+                                  NETLINK_CB(skb).portid);
                if (ops->delete)
                        ops->delete(rule);
                fib_rule_put(rule);
@@ -601,7 +601,7 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
                if (idx < cb->args[1])
                        goto skip;
 
-               if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).pid,
+               if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
                                     cb->nlh->nlmsg_seq, RTM_NEWRULE,
                                     NLM_F_MULTI, ops) < 0)
                        break;
index 907efd27ec77bcf5f3f214058dce27fc77e873bd..3d92ebb7fbcf71471d4b3d90e5309f459c47bea0 100644 (file)
@@ -167,6 +167,14 @@ unsigned int sk_run_filter(const struct sk_buff *skb,
                case BPF_S_ALU_DIV_K:
                        A = reciprocal_divide(A, K);
                        continue;
+               case BPF_S_ALU_MOD_X:
+                       if (X == 0)
+                               return 0;
+                       A %= X;
+                       continue;
+               case BPF_S_ALU_MOD_K:
+                       A %= K;
+                       continue;
                case BPF_S_ALU_AND_X:
                        A &= X;
                        continue;
@@ -179,6 +187,13 @@ unsigned int sk_run_filter(const struct sk_buff *skb,
                case BPF_S_ALU_OR_K:
                        A |= K;
                        continue;
+               case BPF_S_ANC_ALU_XOR_X:
+               case BPF_S_ALU_XOR_X:
+                       A ^= X;
+                       continue;
+               case BPF_S_ALU_XOR_K:
+                       A ^= K;
+                       continue;
                case BPF_S_ALU_LSH_X:
                        A <<= X;
                        continue;
@@ -326,9 +341,6 @@ load_b:
                case BPF_S_ANC_CPU:
                        A = raw_smp_processor_id();
                        continue;
-               case BPF_S_ANC_ALU_XOR_X:
-                       A ^= X;
-                       continue;
                case BPF_S_ANC_NLATTR: {
                        struct nlattr *nla;
 
@@ -469,10 +481,14 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
                [BPF_ALU|BPF_MUL|BPF_K]  = BPF_S_ALU_MUL_K,
                [BPF_ALU|BPF_MUL|BPF_X]  = BPF_S_ALU_MUL_X,
                [BPF_ALU|BPF_DIV|BPF_X]  = BPF_S_ALU_DIV_X,
+               [BPF_ALU|BPF_MOD|BPF_K]  = BPF_S_ALU_MOD_K,
+               [BPF_ALU|BPF_MOD|BPF_X]  = BPF_S_ALU_MOD_X,
                [BPF_ALU|BPF_AND|BPF_K]  = BPF_S_ALU_AND_K,
                [BPF_ALU|BPF_AND|BPF_X]  = BPF_S_ALU_AND_X,
                [BPF_ALU|BPF_OR|BPF_K]   = BPF_S_ALU_OR_K,
                [BPF_ALU|BPF_OR|BPF_X]   = BPF_S_ALU_OR_X,
+               [BPF_ALU|BPF_XOR|BPF_K]  = BPF_S_ALU_XOR_K,
+               [BPF_ALU|BPF_XOR|BPF_X]  = BPF_S_ALU_XOR_X,
                [BPF_ALU|BPF_LSH|BPF_K]  = BPF_S_ALU_LSH_K,
                [BPF_ALU|BPF_LSH|BPF_X]  = BPF_S_ALU_LSH_X,
                [BPF_ALU|BPF_RSH|BPF_K]  = BPF_S_ALU_RSH_K,
@@ -531,6 +547,11 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
                                return -EINVAL;
                        ftest->k = reciprocal_value(ftest->k);
                        break;
+               case BPF_S_ALU_MOD_K:
+                       /* check for division by zero */
+                       if (ftest->k == 0)
+                               return -EINVAL;
+                       break;
                case BPF_S_LD_MEM:
                case BPF_S_LDX_MEM:
                case BPF_S_ST:
index 8e397a69005afd51726a2310e735416b31d665d3..8f82a5cc3851d3a61953a6cad926c9040bc770a7 100644 (file)
@@ -76,6 +76,14 @@ static void rfc2863_policy(struct net_device *dev)
 }
 
 
+void linkwatch_init_dev(struct net_device *dev)
+{
+       /* Handle pre-registration link state changes */
+       if (!netif_carrier_ok(dev) || netif_dormant(dev))
+               rfc2863_policy(dev);
+}
+
+
 static bool linkwatch_urgent_event(struct net_device *dev)
 {
        if (!netif_running(dev))
index 112c6e2266e90f67630b849a5e029ded20601a08..baca771caae2df503a1b0f1bcc9a4fcb577777e8 100644 (file)
@@ -2102,7 +2102,7 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
                if (tidx < tbl_skip || (family && tbl->family != family))
                        continue;
 
-               if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
+               if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
                                       cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
                                       NLM_F_MULTI) <= 0)
                        break;
@@ -2115,7 +2115,7 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
                                goto next;
 
                        if (neightbl_fill_param_info(skb, tbl, p,
-                                                    NETLINK_CB(cb->skb).pid,
+                                                    NETLINK_CB(cb->skb).portid,
                                                     cb->nlh->nlmsg_seq,
                                                     RTM_NEWNEIGHTBL,
                                                     NLM_F_MULTI) <= 0)
@@ -2244,7 +2244,7 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
                                continue;
                        if (idx < s_idx)
                                goto next;
-                       if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
+                       if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
                                            cb->nlh->nlmsg_seq,
                                            RTM_NEWNEIGH,
                                            NLM_F_MULTI) <= 0) {
@@ -2281,7 +2281,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
                                continue;
                        if (idx < s_idx)
                                goto next;
-                       if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
+                       if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
                                            cb->nlh->nlmsg_seq,
                                            RTM_NEWNEIGH,
                                            NLM_F_MULTI, tbl) <= 0) {
index 72607174ea5a4af158855f588b767b5c4c86d973..bcf02f608cbfa76ad06da490f2dbf423e728fc17 100644 (file)
@@ -166,9 +166,21 @@ static ssize_t show_duplex(struct device *dev,
 
        if (netif_running(netdev)) {
                struct ethtool_cmd cmd;
-               if (!__ethtool_get_settings(netdev, &cmd))
-                       ret = sprintf(buf, "%s\n",
-                                     cmd.duplex ? "full" : "half");
+               if (!__ethtool_get_settings(netdev, &cmd)) {
+                       const char *duplex;
+                       switch (cmd.duplex) {
+                       case DUPLEX_HALF:
+                               duplex = "half";
+                               break;
+                       case DUPLEX_FULL:
+                               duplex = "full";
+                               break;
+                       default:
+                               duplex = "unknown";
+                               break;
+                       }
+                       ret = sprintf(buf, "%s\n", duplex);
+               }
        }
        rtnl_unlock();
        return ret;
index e4ba3e70c1747684ad480815f67b2410e87974a7..77a0388fc3beccbf39187e357c7a1b4c93f09d73 100644 (file)
@@ -328,7 +328,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
        if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
                struct netdev_queue *txq;
 
-               txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
+               txq = netdev_pick_tx(dev, skb);
 
                /* try until next clock tick */
                for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
@@ -380,6 +380,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
        struct udphdr *udph;
        struct iphdr *iph;
        struct ethhdr *eth;
+       static atomic_t ip_ident;
 
        udp_len = len + sizeof(*udph);
        ip_len = udp_len + sizeof(*iph);
@@ -415,7 +416,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
        put_unaligned(0x45, (unsigned char *)iph);
        iph->tos      = 0;
        put_unaligned(htons(ip_len), &(iph->tot_len));
-       iph->id       = 0;
+       iph->id       = htons(atomic_inc_return(&ip_ident));
        iph->frag_off = 0;
        iph->ttl      = 64;
        iph->protocol = IPPROTO_UDP;
index 39e7e4d3cdb430e6c35fbe505fdcb32c710159f1..4a83fb3c8e8770b63b8381eb7d9dc3888da885c5 100644 (file)
@@ -73,7 +73,6 @@ static int extend_netdev_table(struct net_device *dev, u32 new_len)
                           ((sizeof(u32) * new_len));
        struct netprio_map *new_priomap = kzalloc(new_size, GFP_KERNEL);
        struct netprio_map *old_priomap;
-       int i;
 
        old_priomap  = rtnl_dereference(dev->priomap);
 
@@ -82,10 +81,10 @@ static int extend_netdev_table(struct net_device *dev, u32 new_len)
                return -ENOMEM;
        }
 
-       for (i = 0;
-            old_priomap && (i < old_priomap->priomap_len);
-            i++)
-               new_priomap->priomap[i] = old_priomap->priomap[i];
+       if (old_priomap)
+               memcpy(new_priomap->priomap, old_priomap->priomap,
+                      old_priomap->priomap_len *
+                      sizeof(old_priomap->priomap[0]));
 
        new_priomap->priomap_len = new_len;
 
@@ -109,32 +108,6 @@ static int write_update_netdev_table(struct net_device *dev)
        return ret;
 }
 
-static int update_netdev_tables(void)
-{
-       int ret = 0;
-       struct net_device *dev;
-       u32 max_len;
-       struct netprio_map *map;
-
-       rtnl_lock();
-       max_len = atomic_read(&max_prioidx) + 1;
-       for_each_netdev(&init_net, dev) {
-               map = rtnl_dereference(dev->priomap);
-               /*
-                * don't allocate priomap if we didn't
-                * change net_prio.ifpriomap (map == NULL),
-                * this will speed up skb_update_prio.
-                */
-               if (map && map->priomap_len < max_len) {
-                       ret = extend_netdev_table(dev, max_len);
-                       if (ret < 0)
-                               break;
-               }
-       }
-       rtnl_unlock();
-       return ret;
-}
-
 static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp)
 {
        struct cgroup_netprio_state *cs;
@@ -153,12 +126,6 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp)
                goto out;
        }
 
-       ret = update_netdev_tables();
-       if (ret < 0) {
-               put_prioidx(cs->prioidx);
-               goto out;
-       }
-
        return &cs->css;
 out:
        kfree(cs);
index 9b570a6a33c5d8c52d777e160742dc31ec350c16..c31d9e8668c30346894adbf3be55eed4beeb1258 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/random.h>
 #include <linux/slab.h>
 #include <linux/string.h>
+#include <linux/tcp.h>
 #include <linux/vmalloc.h>
 
 #include <net/request_sock.h>
@@ -130,3 +131,97 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
                kfree(lopt);
 }
 
+/*
+ * This function is called to set a Fast Open socket's "fastopen_rsk" field
+ * to NULL when a TFO socket no longer needs to access the request_sock.
+ * This happens only after 3WHS has been either completed or aborted (e.g.,
+ * RST is received).
+ *
+ * Before TFO, a child socket is created only after 3WHS is completed,
+ * hence it never needs to access the request_sock. things get a lot more
+ * complex with TFO. A child socket, accepted or not, has to access its
+ * request_sock for 3WHS processing, e.g., to retransmit SYN-ACK pkts,
+ * until 3WHS is either completed or aborted. Afterwards the req will stay
+ * until either the child socket is accepted, or in the rare case when the
+ * listener is closed before the child is accepted.
+ *
+ * In short, a request socket is only freed after BOTH 3WHS has completed
+ * (or aborted) and the child socket has been accepted (or listener closed).
+ * When a child socket is accepted, its corresponding req->sk is set to
+ * NULL since it's no longer needed. More importantly, "req->sk == NULL"
+ * will be used by the code below to determine if a child socket has been
+ * accepted or not, and the check is protected by the fastopenq->lock
+ * described below.
+ *
+ * Note that fastopen_rsk is only accessed from the child socket's context
+ * with its socket lock held. But a request_sock (req) can be accessed by
+ * both its child socket through fastopen_rsk, and a listener socket through
+ * icsk_accept_queue.rskq_accept_head. To protect the access a simple spin
+ * lock per listener "icsk->icsk_accept_queue.fastopenq->lock" is created.
+ * only in the rare case when both the listener and the child locks are held,
+ * e.g., in inet_csk_listen_stop() do we not need to acquire the lock.
+ * The lock also protects other fields such as fastopenq->qlen, which is
+ * decremented by this function when fastopen_rsk is no longer needed.
+ *
+ * Note that another solution was to simply use the existing socket lock
+ * from the listener. But first socket lock is difficult to use. It is not
+ * a simple spin lock - one must consider sock_owned_by_user() and arrange
+ * to use sk_add_backlog() stuff. But what really makes it infeasible is the
+ * locking hierarchy violation. E.g., inet_csk_listen_stop() may try to
+ * acquire a child's lock while holding listener's socket lock. A corner
+ * case might also exist in tcp_v4_hnd_req() that will trigger this locking
+ * order.
+ *
+ * When a TFO req is created, it needs to sock_hold its listener to prevent
+ * the latter data structure from going away.
+ *
+ * This function also sets "treq->listener" to NULL and unreference listener
+ * socket. treq->listener is used by the listener so it is protected by the
+ * fastopenq->lock in this function.
+ */
+void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
+                          bool reset)
+{
+       struct sock *lsk = tcp_rsk(req)->listener;
+       struct fastopen_queue *fastopenq =
+           inet_csk(lsk)->icsk_accept_queue.fastopenq;
+
+       BUG_ON(!spin_is_locked(&sk->sk_lock.slock) && !sock_owned_by_user(sk));
+
+       tcp_sk(sk)->fastopen_rsk = NULL;
+       spin_lock_bh(&fastopenq->lock);
+       fastopenq->qlen--;
+       tcp_rsk(req)->listener = NULL;
+       if (req->sk)    /* the child socket hasn't been accepted yet */
+               goto out;
+
+       if (!reset || lsk->sk_state != TCP_LISTEN) {
+               /* If the listener has been closed don't bother with the
+                * special RST handling below.
+                */
+               spin_unlock_bh(&fastopenq->lock);
+               sock_put(lsk);
+               reqsk_free(req);
+               return;
+       }
+       /* Wait for 60secs before removing a req that has triggered RST.
+        * This is a simple defense against TFO spoofing attack - by
+        * counting the req against fastopen.max_qlen, and disabling
+        * TFO when the qlen exceeds max_qlen.
+        *
+        * For more details see CoNext'11 "TCP Fast Open" paper.
+        */
+       req->expires = jiffies + 60*HZ;
+       if (fastopenq->rskq_rst_head == NULL)
+               fastopenq->rskq_rst_head = req;
+       else
+               fastopenq->rskq_rst_tail->dl_next = req;
+
+       req->dl_next = NULL;
+       fastopenq->rskq_rst_tail = req;
+       fastopenq->qlen++;
+out:
+       spin_unlock_bh(&fastopenq->lock);
+       sock_put(lsk);
+       return;
+}
index 2c5a0a06c4ce3053a4a6c6afa3437f3a47f2d08c..76d4c2c3c89b9d170e89c39fbf0ed5729621a0bc 100644 (file)
@@ -618,7 +618,7 @@ int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
                       long expires, u32 error)
 {
        struct rta_cacheinfo ci = {
-               .rta_lastuse = jiffies_to_clock_t(jiffies - dst->lastuse),
+               .rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse),
                .rta_used = dst->__use,
                .rta_clntref = atomic_read(&(dst->__refcnt)),
                .rta_error = error,
@@ -1081,7 +1081,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
                        if (idx < s_idx)
                                goto cont;
                        if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
-                                            NETLINK_CB(cb->skb).pid,
+                                            NETLINK_CB(cb->skb).portid,
                                             cb->nlh->nlmsg_seq, 0,
                                             NLM_F_MULTI,
                                             ext_filter_mask) <= 0)
@@ -1812,8 +1812,6 @@ replay:
                        return -ENODEV;
                }
 
-               if (ifm->ifi_index)
-                       return -EOPNOTSUPP;
                if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO])
                        return -EOPNOTSUPP;
 
@@ -1839,10 +1837,14 @@ replay:
                        return PTR_ERR(dest_net);
 
                dev = rtnl_create_link(net, dest_net, ifname, ops, tb);
-
-               if (IS_ERR(dev))
+               if (IS_ERR(dev)) {
                        err = PTR_ERR(dev);
-               else if (ops->newlink)
+                       goto out;
+               }
+
+               dev->ifindex = ifm->ifi_index;
+
+               if (ops->newlink)
                        err = ops->newlink(net, dev, tb, data);
                else
                        err = register_netdevice(dev);
@@ -1897,14 +1899,14 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
        if (nskb == NULL)
                return -ENOBUFS;
 
-       err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).pid,
+       err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).portid,
                               nlh->nlmsg_seq, 0, 0, ext_filter_mask);
        if (err < 0) {
                /* -EMSGSIZE implies BUG in if_nlmsg_size */
                WARN_ON(err == -EMSGSIZE);
                kfree_skb(nskb);
        } else
-               err = rtnl_unicast(nskb, net, NETLINK_CB(skb).pid);
+               err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
 
        return err;
 }
@@ -2088,7 +2090,8 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
        if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
            (dev->priv_flags & IFF_BRIDGE_PORT)) {
                master = dev->master;
-               err = master->netdev_ops->ndo_fdb_add(ndm, dev, addr,
+               err = master->netdev_ops->ndo_fdb_add(ndm, tb,
+                                                     dev, addr,
                                                      nlh->nlmsg_flags);
                if (err)
                        goto out;
@@ -2098,7 +2101,8 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 
        /* Embedded bridge, macvlan, and any other device support */
        if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_add) {
-               err = dev->netdev_ops->ndo_fdb_add(ndm, dev, addr,
+               err = dev->netdev_ops->ndo_fdb_add(ndm, tb,
+                                                  dev, addr,
                                                   nlh->nlmsg_flags);
 
                if (!err) {
@@ -2178,9 +2182,9 @@ static int nlmsg_populate_fdb(struct sk_buff *skb,
 {
        struct netdev_hw_addr *ha;
        int err;
-       u32 pid, seq;
+       u32 portid, seq;
 
-       pid = NETLINK_CB(cb->skb).pid;
+       portid = NETLINK_CB(cb->skb).portid;
        seq = cb->nlh->nlmsg_seq;
 
        list_for_each_entry(ha, &list->list, list) {
@@ -2188,7 +2192,7 @@ static int nlmsg_populate_fdb(struct sk_buff *skb,
                        goto skip;
 
                err = nlmsg_populate_fdb_fill(skb, dev, ha->addr,
-                                             pid, seq, 0, NTF_SELF);
+                                             portid, seq, 0, NTF_SELF);
                if (err < 0)
                        return err;
 skip:
@@ -2356,7 +2360,7 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
        case NETDEV_PRE_TYPE_CHANGE:
        case NETDEV_GOING_DOWN:
        case NETDEV_UNREGISTER:
-       case NETDEV_UNREGISTER_BATCH:
+       case NETDEV_UNREGISTER_FINAL:
        case NETDEV_RELEASE:
        case NETDEV_JOIN:
                break;
@@ -2379,9 +2383,10 @@ static int __net_init rtnetlink_net_init(struct net *net)
                .groups         = RTNLGRP_MAX,
                .input          = rtnetlink_rcv,
                .cb_mutex       = &rtnl_mutex,
+               .flags          = NL_CFG_F_NONROOT_RECV,
        };
 
-       sk = netlink_kernel_create(net, NETLINK_ROUTE, THIS_MODULE, &cfg);
+       sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
        if (!sk)
                return -ENOMEM;
        net->rtnl = sk;
@@ -2414,7 +2419,6 @@ void __init rtnetlink_init(void)
        if (register_pernet_subsys(&rtnetlink_net_ops))
                panic("rtnetlink_init: cannot initialize rtnetlink\n");
 
-       netlink_set_nonroot(NETLINK_ROUTE, NL_NONROOT_RECV);
        register_netdevice_notifier(&rtnetlink_dev_notifier);
 
        rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
index 6ab491d6c26f43d84da26e9e0fc90a477ab49cea..9c1c63da3ca8a99ce8ac25e8f1abf9e309075740 100644 (file)
@@ -155,19 +155,21 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
                        break;
                case SCM_CREDENTIALS:
                {
+                       struct ucred creds;
                        kuid_t uid;
                        kgid_t gid;
                        if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct ucred)))
                                goto error;
-                       memcpy(&p->creds, CMSG_DATA(cmsg), sizeof(struct ucred));
-                       err = scm_check_creds(&p->creds);
+                       memcpy(&creds, CMSG_DATA(cmsg), sizeof(struct ucred));
+                       err = scm_check_creds(&creds);
                        if (err)
                                goto error;
 
-                       if (!p->pid || pid_vnr(p->pid) != p->creds.pid) {
+                       p->creds.pid = creds.pid;
+                       if (!p->pid || pid_vnr(p->pid) != creds.pid) {
                                struct pid *pid;
                                err = -ESRCH;
-                               pid = find_get_pid(p->creds.pid);
+                               pid = find_get_pid(creds.pid);
                                if (!pid)
                                        goto error;
                                put_pid(p->pid);
@@ -175,11 +177,14 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
                        }
 
                        err = -EINVAL;
-                       uid = make_kuid(current_user_ns(), p->creds.uid);
-                       gid = make_kgid(current_user_ns(), p->creds.gid);
+                       uid = make_kuid(current_user_ns(), creds.uid);
+                       gid = make_kgid(current_user_ns(), creds.gid);
                        if (!uid_valid(uid) || !gid_valid(gid))
                                goto error;
 
+                       p->creds.uid = uid;
+                       p->creds.gid = gid;
+
                        if (!p->cred ||
                            !uid_eq(p->cred->euid, uid) ||
                            !gid_eq(p->cred->egid, gid)) {
index 99b2596531bbc2a9a714bf262a8c79c61f1726b5..e61a8bb7fce73393ddaab88747a9637c1dcbf311 100644 (file)
@@ -76,6 +76,7 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
 
        return hash[0];
 }
+EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
 #endif
 
 #ifdef CONFIG_INET
index e33ebae519c8c3283dadde917cdf46d413229a54..cdc28598f4efadb83b79a006a71ea676b508a655 100644 (file)
@@ -340,43 +340,57 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
 EXPORT_SYMBOL(build_skb);
 
 struct netdev_alloc_cache {
-       struct page *page;
-       unsigned int offset;
-       unsigned int pagecnt_bias;
+       struct page_frag        frag;
+       /* we maintain a pagecount bias, so that we dont dirty cache line
+        * containing page->_count every time we allocate a fragment.
+        */
+       unsigned int            pagecnt_bias;
 };
 static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
 
-#define NETDEV_PAGECNT_BIAS (PAGE_SIZE / SMP_CACHE_BYTES)
+#define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768)
+#define NETDEV_FRAG_PAGE_MAX_SIZE  (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
+#define NETDEV_PAGECNT_MAX_BIAS           NETDEV_FRAG_PAGE_MAX_SIZE
 
 static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
 {
        struct netdev_alloc_cache *nc;
        void *data = NULL;
+       int order;
        unsigned long flags;
 
        local_irq_save(flags);
        nc = &__get_cpu_var(netdev_alloc_cache);
-       if (unlikely(!nc->page)) {
+       if (unlikely(!nc->frag.page)) {
 refill:
-               nc->page = alloc_page(gfp_mask);
-               if (unlikely(!nc->page))
-                       goto end;
+               for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) {
+                       gfp_t gfp = gfp_mask;
+
+                       if (order)
+                               gfp |= __GFP_COMP | __GFP_NOWARN;
+                       nc->frag.page = alloc_pages(gfp, order);
+                       if (likely(nc->frag.page))
+                               break;
+                       if (--order < 0)
+                               goto end;
+               }
+               nc->frag.size = PAGE_SIZE << order;
 recycle:
-               atomic_set(&nc->page->_count, NETDEV_PAGECNT_BIAS);
-               nc->pagecnt_bias = NETDEV_PAGECNT_BIAS;
-               nc->offset = 0;
+               atomic_set(&nc->frag.page->_count, NETDEV_PAGECNT_MAX_BIAS);
+               nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
+               nc->frag.offset = 0;
        }
 
-       if (nc->offset + fragsz > PAGE_SIZE) {
+       if (nc->frag.offset + fragsz > nc->frag.size) {
                /* avoid unnecessary locked operations if possible */
-               if ((atomic_read(&nc->page->_count) == nc->pagecnt_bias) ||
-                   atomic_sub_and_test(nc->pagecnt_bias, &nc->page->_count))
+               if ((atomic_read(&nc->frag.page->_count) == nc->pagecnt_bias) ||
+                   atomic_sub_and_test(nc->pagecnt_bias, &nc->frag.page->_count))
                        goto recycle;
                goto refill;
        }
 
-       data = page_address(nc->page) + nc->offset;
-       nc->offset += fragsz;
+       data = page_address(nc->frag.page) + nc->frag.offset;
+       nc->frag.offset += fragsz;
        nc->pagecnt_bias--;
 end:
        local_irq_restore(flags);
@@ -1655,38 +1669,19 @@ static struct page *linear_to_page(struct page *page, unsigned int *len,
                                   unsigned int *offset,
                                   struct sk_buff *skb, struct sock *sk)
 {
-       struct page *p = sk->sk_sndmsg_page;
-       unsigned int off;
-
-       if (!p) {
-new_page:
-               p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0);
-               if (!p)
-                       return NULL;
+       struct page_frag *pfrag = sk_page_frag(sk);
 
-               off = sk->sk_sndmsg_off = 0;
-               /* hold one ref to this page until it's full */
-       } else {
-               unsigned int mlen;
-
-               /* If we are the only user of the page, we can reset offset */
-               if (page_count(p) == 1)
-                       sk->sk_sndmsg_off = 0;
-               off = sk->sk_sndmsg_off;
-               mlen = PAGE_SIZE - off;
-               if (mlen < 64 && mlen < *len) {
-                       put_page(p);
-                       goto new_page;
-               }
+       if (!sk_page_frag_refill(sk, pfrag))
+               return NULL;
 
-               *len = min_t(unsigned int, *len, mlen);
-       }
+       *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
 
-       memcpy(page_address(p) + off, page_address(page) + *offset, *len);
-       sk->sk_sndmsg_off += *len;
-       *offset = off;
+       memcpy(page_address(pfrag->page) + pfrag->offset,
+              page_address(page) + *offset, *len);
+       *offset = pfrag->offset;
+       pfrag->offset += *len;
 
-       return p;
+       return pfrag->page;
 }
 
 static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
@@ -3488,8 +3483,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
                    skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS)
                        return false;
 
-               delta = from->truesize -
-                       SKB_TRUESIZE(skb_end_pointer(from) - from->head);
+               delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
        }
 
        WARN_ON_ONCE(delta < len);
index 12cddd037bce296b1b38d60b8eb073f77d050108..8a146cfcc366fbead42d31d882893a74ec8dcf4e 100644 (file)
@@ -1221,7 +1221,7 @@ void sock_update_classid(struct sock *sk)
        rcu_read_lock();  /* doing current task, which cannot vanish. */
        classid = task_cls_classid(current);
        rcu_read_unlock();
-       if (classid && classid != sk->sk_classid)
+       if (classid != sk->sk_classid)
                sk->sk_classid = classid;
 }
 EXPORT_SYMBOL(sock_update_classid);
@@ -1458,19 +1458,6 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
 }
 EXPORT_SYMBOL_GPL(sk_setup_caps);
 
-void __init sk_init(void)
-{
-       if (totalram_pages <= 4096) {
-               sysctl_wmem_max = 32767;
-               sysctl_rmem_max = 32767;
-               sysctl_wmem_default = 32767;
-               sysctl_rmem_default = 32767;
-       } else if (totalram_pages >= 131072) {
-               sysctl_wmem_max = 131071;
-               sysctl_rmem_max = 131071;
-       }
-}
-
 /*
  *     Simple resource managers for sockets.
  */
@@ -1738,6 +1725,45 @@ struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
 }
 EXPORT_SYMBOL(sock_alloc_send_skb);
 
+/* On 32bit arches, an skb frag is limited to 2^15 */
+#define SKB_FRAG_PAGE_ORDER    get_order(32768)
+
+bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
+{
+       int order;
+
+       if (pfrag->page) {
+               if (atomic_read(&pfrag->page->_count) == 1) {
+                       pfrag->offset = 0;
+                       return true;
+               }
+               if (pfrag->offset < pfrag->size)
+                       return true;
+               put_page(pfrag->page);
+       }
+
+       /* We restrict high order allocations to users that can afford to wait */
+       order = (sk->sk_allocation & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0;
+
+       do {
+               gfp_t gfp = sk->sk_allocation;
+
+               if (order)
+                       gfp |= __GFP_COMP | __GFP_NOWARN;
+               pfrag->page = alloc_pages(gfp, order);
+               if (likely(pfrag->page)) {
+                       pfrag->offset = 0;
+                       pfrag->size = PAGE_SIZE << order;
+                       return true;
+               }
+       } while (--order >= 0);
+
+       sk_enter_memory_pressure(sk);
+       sk_stream_moderate_sndbuf(sk);
+       return false;
+}
+EXPORT_SYMBOL(sk_page_frag_refill);
+
 static void __lock_sock(struct sock *sk)
        __releases(&sk->sk_lock.slock)
        __acquires(&sk->sk_lock.slock)
@@ -2167,8 +2193,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
        sk->sk_error_report     =       sock_def_error_report;
        sk->sk_destruct         =       sock_def_destruct;
 
-       sk->sk_sndmsg_page      =       NULL;
-       sk->sk_sndmsg_off       =       0;
+       sk->sk_frag.page        =       NULL;
+       sk->sk_frag.offset      =       0;
        sk->sk_peek_off         =       -1;
 
        sk->sk_peer_pid         =       NULL;
@@ -2411,6 +2437,12 @@ void sk_common_release(struct sock *sk)
        xfrm_sk_free_policy(sk);
 
        sk_refcnt_debug_release(sk);
+
+       if (sk->sk_frag.page) {
+               put_page(sk->sk_frag.page);
+               sk->sk_frag.page = NULL;
+       }
+
        sock_put(sk);
 }
 EXPORT_SYMBOL(sk_common_release);
index 9d8755e4a7a51e8415818f6c3542ef56c0697c08..602cd637182ebb321af6773d2ccfe9a8945d44c5 100644 (file)
@@ -172,8 +172,7 @@ static int __net_init diag_net_init(struct net *net)
                .input  = sock_diag_rcv,
        };
 
-       net->diag_nlsk = netlink_kernel_create(net, NETLINK_SOCK_DIAG,
-                                              THIS_MODULE, &cfg);
+       net->diag_nlsk = netlink_kernel_create(net, NETLINK_SOCK_DIAG, &cfg);
        return net->diag_nlsk == NULL ? -ENOMEM : 0;
 }
 
index 39895a65e54ae59d35144c656c37483b43efcf87..f5613d569c23a17a806d8579d08a3b103735a84a 100644 (file)
@@ -294,6 +294,26 @@ void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
 }
 EXPORT_SYMBOL(inet_proto_csum_replace4);
 
+void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
+                              const __be32 *from, const __be32 *to,
+                              int pseudohdr)
+{
+       __be32 diff[] = {
+               ~from[0], ~from[1], ~from[2], ~from[3],
+               to[0], to[1], to[2], to[3],
+       };
+       if (skb->ip_summed != CHECKSUM_PARTIAL) {
+               *sum = csum_fold(csum_partial(diff, sizeof(diff),
+                                ~csum_unfold(*sum)));
+               if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
+                       skb->csum = ~csum_partial(diff, sizeof(diff),
+                                                 ~skb->csum);
+       } else if (pseudohdr)
+               *sum = ~csum_fold(csum_partial(diff, sizeof(diff),
+                                 csum_unfold(*sum)));
+}
+EXPORT_SYMBOL(inet_proto_csum_replace16);
+
 int mac_pton(const char *s, u8 *mac)
 {
        int i;
index 81f2bb62dea3a7fdd569636fa8649350eb264a35..70989e672304938a39cdf7b7a0dfd87b7497d2a9 100644 (file)
@@ -1319,7 +1319,7 @@ nla_put_failure:
 }
 
 static int dcbnl_notify(struct net_device *dev, int event, int cmd,
-                       u32 seq, u32 pid, int dcbx_ver)
+                       u32 seq, u32 portid, int dcbx_ver)
 {
        struct net *net = dev_net(dev);
        struct sk_buff *skb;
@@ -1330,7 +1330,7 @@ static int dcbnl_notify(struct net_device *dev, int event, int cmd,
        if (!ops)
                return -EOPNOTSUPP;
 
-       skb = dcbnl_newmsg(event, cmd, pid, seq, 0, &nlh);
+       skb = dcbnl_newmsg(event, cmd, portid, seq, 0, &nlh);
        if (!skb)
                return -ENOBUFS;
 
@@ -1353,16 +1353,16 @@ static int dcbnl_notify(struct net_device *dev, int event, int cmd,
 }
 
 int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
-                     u32 seq, u32 pid)
+                     u32 seq, u32 portid)
 {
-       return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_IEEE);
+       return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_IEEE);
 }
 EXPORT_SYMBOL(dcbnl_ieee_notify);
 
 int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
-                    u32 seq, u32 pid)
+                    u32 seq, u32 portid)
 {
-       return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_CEE);
+       return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_CEE);
 }
 EXPORT_SYMBOL(dcbnl_cee_notify);
 
@@ -1656,7 +1656,7 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
        struct net_device *netdev;
        struct dcbmsg *dcb = nlmsg_data(nlh);
        struct nlattr *tb[DCB_ATTR_MAX + 1];
-       u32 pid = skb ? NETLINK_CB(skb).pid : 0;
+       u32 portid = skb ? NETLINK_CB(skb).portid : 0;
        int ret = -EINVAL;
        struct sk_buff *reply_skb;
        struct nlmsghdr *reply_nlh = NULL;
@@ -1690,7 +1690,7 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
                goto out;
        }
 
-       reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, pid, nlh->nlmsg_seq,
+       reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, portid, nlh->nlmsg_seq,
                                 nlh->nlmsg_flags, &reply_nlh);
        if (!reply_skb) {
                ret = -ENOBUFS;
@@ -1705,7 +1705,7 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 
        nlmsg_end(reply_skb, reply_nlh);
 
-       ret = rtnl_unicast(reply_skb, &init_net, pid);
+       ret = rtnl_unicast(reply_skb, &init_net, portid);
 out:
        dev_put(netdev);
        return ret;
index 2ba1a2814c24e2260592c985706c5cbc58e45fdc..307c322d53bb889540aa3d302a69b13646f0cda3 100644 (file)
@@ -1313,10 +1313,10 @@ static int dn_shutdown(struct socket *sock, int how)
        if (scp->state == DN_O)
                goto out;
 
-       if (how != SHUTDOWN_MASK)
+       if (how != SHUT_RDWR)
                goto out;
 
-       sk->sk_shutdown = how;
+       sk->sk_shutdown = SHUTDOWN_MASK;
        dn_destroy_sock(sk);
        err = 0;
 
index f3924ab1e019f5efc22f6278021078889f43ce52..7b7e561412d379380a54678b1505da796184c49c 100644 (file)
@@ -667,12 +667,12 @@ static inline size_t dn_ifaddr_nlmsg_size(void)
 }
 
 static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa,
-                            u32 pid, u32 seq, int event, unsigned int flags)
+                            u32 portid, u32 seq, int event, unsigned int flags)
 {
        struct ifaddrmsg *ifm;
        struct nlmsghdr *nlh;
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -753,7 +753,7 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
                        if (dn_idx < skip_naddr)
                                continue;
 
-                       if (dn_nl_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid,
+                       if (dn_nl_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).portid,
                                              cb->nlh->nlmsg_seq, RTM_NEWADDR,
                                              NLM_F_MULTI) < 0)
                                goto done;
index 85a3604c87c8d2cac6cac4d7049d9ba75877bf66..b57419cc41a486b3ab2ddc82643169c97b19d132 100644 (file)
@@ -961,7 +961,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *o
                .saddr = oldflp->saddr,
                .flowidn_scope = RT_SCOPE_UNIVERSE,
                .flowidn_mark = oldflp->flowidn_mark,
-               .flowidn_iif = init_net.loopback_dev->ifindex,
+               .flowidn_iif = LOOPBACK_IFINDEX,
                .flowidn_oif = oldflp->flowidn_oif,
        };
        struct dn_route *rt = NULL;
@@ -979,7 +979,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *o
                       "dn_route_output_slow: dst=%04x src=%04x mark=%d"
                       " iif=%d oif=%d\n", le16_to_cpu(oldflp->daddr),
                       le16_to_cpu(oldflp->saddr),
-                      oldflp->flowidn_mark, init_net.loopback_dev->ifindex,
+                      oldflp->flowidn_mark, LOOPBACK_IFINDEX,
                       oldflp->flowidn_oif);
 
        /* If we have an output interface, verify its a DECnet device */
@@ -1042,7 +1042,7 @@ source_ok:
                        if (!fld.daddr)
                                goto out;
                }
-               fld.flowidn_oif = init_net.loopback_dev->ifindex;
+               fld.flowidn_oif = LOOPBACK_IFINDEX;
                res.type = RTN_LOCAL;
                goto make_route;
        }
@@ -1543,7 +1543,7 @@ static int dn_route_input(struct sk_buff *skb)
        return dn_route_input_slow(skb);
 }
 
-static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
+static int dn_rt_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
                           int event, int nowait, unsigned int flags)
 {
        struct dn_route *rt = (struct dn_route *)skb_dst(skb);
@@ -1551,7 +1551,7 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
        struct nlmsghdr *nlh;
        long expires;
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags);
        if (!nlh)
                return -EMSGSIZE;
 
@@ -1685,7 +1685,7 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
        if (rtm->rtm_flags & RTM_F_NOTIFY)
                rt->rt_flags |= RTCF_NOTIFY;
 
-       err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0);
+       err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0);
 
        if (err == 0)
                goto out_free;
@@ -1694,7 +1694,7 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
                goto out_free;
        }
 
-       return rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).pid);
+       return rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).portid);
 
 out_free:
        kfree_skb(skb);
@@ -1737,7 +1737,7 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
                        if (idx < s_idx)
                                continue;
                        skb_dst_set(skb, dst_clone(&rt->dst));
-                       if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
+                       if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).portid,
                                        cb->nlh->nlmsg_seq, RTM_NEWROUTE,
                                        1, NLM_F_MULTI) <= 0) {
                                skb_dst_drop(skb);
index 16c986ab1228ec7f5cf5821253938705b8b6a808..f968c1b58f47d1892ee392eea366d67f4c687397 100644 (file)
@@ -291,14 +291,14 @@ static inline size_t dn_fib_nlmsg_size(struct dn_fib_info *fi)
        return payload;
 }
 
-static int dn_fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
+static int dn_fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
                        u32 tb_id, u8 type, u8 scope, void *dst, int dst_len,
                        struct dn_fib_info *fi, unsigned int flags)
 {
        struct rtmsg *rtm;
        struct nlmsghdr *nlh;
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
        if (!nlh)
                return -EMSGSIZE;
 
@@ -374,14 +374,14 @@ static void dn_rtmsg_fib(int event, struct dn_fib_node *f, int z, u32 tb_id,
                        struct nlmsghdr *nlh, struct netlink_skb_parms *req)
 {
        struct sk_buff *skb;
-       u32 pid = req ? req->pid : 0;
+       u32 portid = req ? req->portid : 0;
        int err = -ENOBUFS;
 
        skb = nlmsg_new(dn_fib_nlmsg_size(DN_FIB_INFO(f)), GFP_KERNEL);
        if (skb == NULL)
                goto errout;
 
-       err = dn_fib_dump_info(skb, pid, nlh->nlmsg_seq, event, tb_id,
+       err = dn_fib_dump_info(skb, portid, nlh->nlmsg_seq, event, tb_id,
                               f->fn_type, f->fn_scope, &f->fn_key, z,
                               DN_FIB_INFO(f), 0);
        if (err < 0) {
@@ -390,7 +390,7 @@ static void dn_rtmsg_fib(int event, struct dn_fib_node *f, int z, u32 tb_id,
                kfree_skb(skb);
                goto errout;
        }
-       rtnl_notify(skb, &init_net, pid, RTNLGRP_DECnet_ROUTE, nlh, GFP_KERNEL);
+       rtnl_notify(skb, &init_net, portid, RTNLGRP_DECnet_ROUTE, nlh, GFP_KERNEL);
        return;
 errout:
        if (err < 0)
@@ -411,7 +411,7 @@ static __inline__ int dn_hash_dump_bucket(struct sk_buff *skb,
                        continue;
                if (f->fn_state & DN_S_ZOMBIE)
                        continue;
-               if (dn_fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
+               if (dn_fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
                                cb->nlh->nlmsg_seq,
                                RTM_NEWROUTE,
                                tb->n,
index 11db0ecf342ff2f32901acdb24836055ca0e383c..dfe42012a044142dbc9c0bf6197f539fd1996a8b 100644 (file)
@@ -130,8 +130,7 @@ static int __init dn_rtmsg_init(void)
                .input  = dnrmg_receive_user_skb,
        };
 
-       dnrmg = netlink_kernel_create(&init_net,
-                                     NETLINK_DNRTMSG, THIS_MODULE, &cfg);
+       dnrmg = netlink_kernel_create(&init_net, NETLINK_DNRTMSG, &cfg);
        if (dnrmg == NULL) {
                printk(KERN_ERR "dn_rtmsg: Cannot create netlink socket");
                return -ENOMEM;
index 6a095225148e929bc5e3b8d7de2731229cc5f53c..6d42c17af96b4a606a545d2d72ad5e32cb53c3da 100644 (file)
@@ -1063,12 +1063,6 @@ out:
        return (err < 0 ? NETDEV_TX_BUSY : NETDEV_TX_OK);
 }
 
-static void lowpan_dev_free(struct net_device *dev)
-{
-       dev_put(lowpan_dev_info(dev)->real_dev);
-       free_netdev(dev);
-}
-
 static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
 {
        struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
@@ -1118,7 +1112,7 @@ static void lowpan_setup(struct net_device *dev)
        dev->netdev_ops         = &lowpan_netdev_ops;
        dev->header_ops         = &lowpan_header_ops;
        dev->ml_priv            = &lowpan_mlme;
-       dev->destructor         = lowpan_dev_free;
+       dev->destructor         = free_netdev;
 }
 
 static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -1133,6 +1127,8 @@ static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
 static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
        struct packet_type *pt, struct net_device *orig_dev)
 {
+       struct sk_buff *local_skb;
+
        if (!netif_running(dev))
                goto drop;
 
@@ -1144,7 +1140,12 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
        case LOWPAN_DISPATCH_IPHC:      /* ipv6 datagram */
        case LOWPAN_DISPATCH_FRAG1:     /* first fragment header */
        case LOWPAN_DISPATCH_FRAGN:     /* next fragments headers */
-               lowpan_process_data(skb);
+               local_skb = skb_clone(skb, GFP_ATOMIC);
+               if (!local_skb)
+                       goto drop;
+               lowpan_process_data(local_skb);
+
+               kfree_skb(skb);
                break;
        default:
                break;
@@ -1237,6 +1238,34 @@ static inline void __init lowpan_netlink_fini(void)
        rtnl_link_unregister(&lowpan_link_ops);
 }
 
+static int lowpan_device_event(struct notifier_block *unused,
+                               unsigned long event,
+                               void *ptr)
+{
+       struct net_device *dev = ptr;
+       LIST_HEAD(del_list);
+       struct lowpan_dev_record *entry, *tmp;
+
+       if (dev->type != ARPHRD_IEEE802154)
+               goto out;
+
+       if (event == NETDEV_UNREGISTER) {
+               list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
+                       if (lowpan_dev_info(entry->ldev)->real_dev == dev)
+                               lowpan_dellink(entry->ldev, &del_list);
+               }
+
+               unregister_netdevice_many(&del_list);
+       }
+
+out:
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block lowpan_dev_notifier = {
+       .notifier_call = lowpan_device_event,
+};
+
 static struct packet_type lowpan_packet_type = {
        .type = __constant_htons(ETH_P_IEEE802154),
        .func = lowpan_rcv,
@@ -1251,6 +1280,12 @@ static int __init lowpan_init_module(void)
                goto out;
 
        dev_add_pack(&lowpan_packet_type);
+
+       err = register_netdevice_notifier(&lowpan_dev_notifier);
+       if (err < 0) {
+               dev_remove_pack(&lowpan_packet_type);
+               lowpan_netlink_fini();
+       }
 out:
        return err;
 }
@@ -1263,6 +1298,8 @@ static void __exit lowpan_cleanup_module(void)
 
        dev_remove_pack(&lowpan_packet_type);
 
+       unregister_netdevice_notifier(&lowpan_dev_notifier);
+
        /* Now 6lowpan packet_type is removed, so no new fragments are
         * expected on RX, therefore that's the time to clean incomplete
         * fragments.
index 1e9917124e75ccada73d34e3a19578155db4dec9..96bb08abece29408f0d9d65f453ee7f2c0c55c4e 100644 (file)
@@ -246,7 +246,7 @@ nla_put_failure:
 }
 EXPORT_SYMBOL(ieee802154_nl_start_confirm);
 
-static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 pid,
+static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
        u32 seq, int flags, struct net_device *dev)
 {
        void *hdr;
@@ -534,7 +534,7 @@ static int ieee802154_list_iface(struct sk_buff *skb,
        if (!msg)
                goto out_dev;
 
-       rc = ieee802154_nl_fill_iface(msg, info->snd_pid, info->snd_seq,
+       rc = ieee802154_nl_fill_iface(msg, info->snd_portid, info->snd_seq,
                        0, dev);
        if (rc < 0)
                goto out_free;
@@ -565,7 +565,7 @@ static int ieee802154_dump_iface(struct sk_buff *skb,
                if (idx < s_idx || (dev->type != ARPHRD_IEEE802154))
                        goto cont;
 
-               if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).pid,
+               if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).portid,
                        cb->nlh->nlmsg_seq, NLM_F_MULTI, dev) < 0)
                        break;
 cont:
index d54be34cca9442b2cb1624ad167e205bea718832..22b1a7058fd3f841d94ee27655840c1ae325f011 100644 (file)
@@ -35,7 +35,7 @@
 
 #include "ieee802154.h"
 
-static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 pid,
+static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 portid,
        u32 seq, int flags, struct wpan_phy *phy)
 {
        void *hdr;
@@ -105,7 +105,7 @@ static int ieee802154_list_phy(struct sk_buff *skb,
        if (!msg)
                goto out_dev;
 
-       rc = ieee802154_nl_fill_phy(msg, info->snd_pid, info->snd_seq,
+       rc = ieee802154_nl_fill_phy(msg, info->snd_portid, info->snd_seq,
                        0, phy);
        if (rc < 0)
                goto out_free;
@@ -138,7 +138,7 @@ static int ieee802154_dump_phy_iter(struct wpan_phy *phy, void *_data)
                return 0;
 
        rc = ieee802154_nl_fill_phy(data->skb,
-                       NETLINK_CB(data->cb->skb).pid,
+                       NETLINK_CB(data->cb->skb).portid,
                        data->cb->nlh->nlmsg_seq,
                        NLM_F_MULTI,
                        phy);
index fe4582ca969a4ff85c862f8fd96d3b6ec3d03a4d..766c596585631e1001bdd64c80639554c6ab7831 100644 (file)
@@ -212,6 +212,26 @@ int inet_listen(struct socket *sock, int backlog)
         * we can only allow the backlog to be adjusted.
         */
        if (old_state != TCP_LISTEN) {
+               /* Check special setups for testing purpose to enable TFO w/o
+                * requiring TCP_FASTOPEN sockopt.
+                * Note that only TCP sockets (SOCK_STREAM) will reach here.
+                * Also fastopenq may already been allocated because this
+                * socket was in TCP_LISTEN state previously but was
+                * shutdown() (rather than close()).
+                */
+               if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) != 0 &&
+                   inet_csk(sk)->icsk_accept_queue.fastopenq == NULL) {
+                       if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) != 0)
+                               err = fastopen_init_queue(sk, backlog);
+                       else if ((sysctl_tcp_fastopen &
+                                 TFO_SERVER_WO_SOCKOPT2) != 0)
+                               err = fastopen_init_queue(sk,
+                                   ((uint)sysctl_tcp_fastopen) >> 16);
+                       else
+                               err = 0;
+                       if (err)
+                               goto out;
+               }
                err = inet_csk_listen_start(sk, backlog);
                if (err)
                        goto out;
@@ -701,7 +721,8 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags)
 
        sock_rps_record_flow(sk2);
        WARN_ON(!((1 << sk2->sk_state) &
-                 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE)));
+                 (TCPF_ESTABLISHED | TCPF_SYN_RECV |
+                 TCPF_CLOSE_WAIT | TCPF_CLOSE)));
 
        sock_graft(sk2, newsock);
 
@@ -1364,7 +1385,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
        if (*(u8 *)iph != 0x45)
                goto out_unlock;
 
-       if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
+       if (unlikely(ip_fast_csum((u8 *)iph, 5)))
                goto out_unlock;
 
        id = ntohl(*(__be32 *)&iph->id);
@@ -1380,7 +1401,6 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
                iph2 = ip_hdr(p);
 
                if ((iph->protocol ^ iph2->protocol) |
-                   (iph->tos ^ iph2->tos) |
                    ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
                    ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {
                        NAPI_GRO_CB(p)->same_flow = 0;
@@ -1390,6 +1410,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
                /* All fields must match except length and checksum. */
                NAPI_GRO_CB(p)->flush |=
                        (iph->ttl ^ iph2->ttl) |
+                       (iph->tos ^ iph2->tos) |
                        ((u16)(ntohs(iph2->id) + NAPI_GRO_CB(p)->count) ^ id);
 
                NAPI_GRO_CB(p)->flush |= flush;
index e12fad773852b5271afafa414ffc1f2706ff4428..2a6abc163ed2fa9cc15d4be2f4e9e795ec9f08b5 100644 (file)
@@ -94,25 +94,22 @@ static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
        [IFA_LABEL]             = { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
 };
 
-/* inet_addr_hash's shifting is dependent upon this IN4_ADDR_HSIZE
- * value.  So if you change this define, make appropriate changes to
- * inet_addr_hash as well.
- */
-#define IN4_ADDR_HSIZE 256
+#define IN4_ADDR_HSIZE_SHIFT   8
+#define IN4_ADDR_HSIZE         (1U << IN4_ADDR_HSIZE_SHIFT)
+
 static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
 static DEFINE_SPINLOCK(inet_addr_hash_lock);
 
-static inline unsigned int inet_addr_hash(struct net *net, __be32 addr)
+static u32 inet_addr_hash(struct net *net, __be32 addr)
 {
-       u32 val = (__force u32) addr ^ hash_ptr(net, 8);
+       u32 val = (__force u32) addr ^ net_hash_mix(net);
 
-       return ((val ^ (val >> 8) ^ (val >> 16) ^ (val >> 24)) &
-               (IN4_ADDR_HSIZE - 1));
+       return hash_32(val, IN4_ADDR_HSIZE_SHIFT);
 }
 
 static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
 {
-       unsigned int hash = inet_addr_hash(net, ifa->ifa_local);
+       u32 hash = inet_addr_hash(net, ifa->ifa_local);
 
        spin_lock(&inet_addr_hash_lock);
        hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
@@ -136,18 +133,18 @@ static void inet_hash_remove(struct in_ifaddr *ifa)
  */
 struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
 {
-       unsigned int hash = inet_addr_hash(net, addr);
+       u32 hash = inet_addr_hash(net, addr);
        struct net_device *result = NULL;
        struct in_ifaddr *ifa;
        struct hlist_node *node;
 
        rcu_read_lock();
        hlist_for_each_entry_rcu(ifa, node, &inet_addr_lst[hash], hash) {
-               struct net_device *dev = ifa->ifa_dev->dev;
-
-               if (!net_eq(dev_net(dev), net))
-                       continue;
                if (ifa->ifa_local == addr) {
+                       struct net_device *dev = ifa->ifa_dev->dev;
+
+                       if (!net_eq(dev_net(dev), net))
+                               continue;
                        result = dev;
                        break;
                }
@@ -182,10 +179,10 @@ static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
 static void devinet_sysctl_register(struct in_device *idev);
 static void devinet_sysctl_unregister(struct in_device *idev);
 #else
-static inline void devinet_sysctl_register(struct in_device *idev)
+static void devinet_sysctl_register(struct in_device *idev)
 {
 }
-static inline void devinet_sysctl_unregister(struct in_device *idev)
+static void devinet_sysctl_unregister(struct in_device *idev)
 {
 }
 #endif
@@ -205,7 +202,7 @@ static void inet_rcu_free_ifa(struct rcu_head *head)
        kfree(ifa);
 }
 
-static inline void inet_free_ifa(struct in_ifaddr *ifa)
+static void inet_free_ifa(struct in_ifaddr *ifa)
 {
        call_rcu(&ifa->rcu_head, inet_rcu_free_ifa);
 }
@@ -314,7 +311,7 @@ int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
 }
 
 static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
-                        int destroy, struct nlmsghdr *nlh, u32 pid)
+                        int destroy, struct nlmsghdr *nlh, u32 portid)
 {
        struct in_ifaddr *promote = NULL;
        struct in_ifaddr *ifa, *ifa1 = *ifap;
@@ -348,7 +345,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
                                inet_hash_remove(ifa);
                                *ifap1 = ifa->ifa_next;
 
-                               rtmsg_ifa(RTM_DELADDR, ifa, nlh, pid);
+                               rtmsg_ifa(RTM_DELADDR, ifa, nlh, portid);
                                blocking_notifier_call_chain(&inetaddr_chain,
                                                NETDEV_DOWN, ifa);
                                inet_free_ifa(ifa);
@@ -385,7 +382,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
           is valid, it will try to restore deleted routes... Grr.
           So that, this order is correct.
         */
-       rtmsg_ifa(RTM_DELADDR, ifa1, nlh, pid);
+       rtmsg_ifa(RTM_DELADDR, ifa1, nlh, portid);
        blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
 
        if (promote) {
@@ -398,7 +395,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
                }
 
                promote->ifa_flags &= ~IFA_F_SECONDARY;
-               rtmsg_ifa(RTM_NEWADDR, promote, nlh, pid);
+               rtmsg_ifa(RTM_NEWADDR, promote, nlh, portid);
                blocking_notifier_call_chain(&inetaddr_chain,
                                NETDEV_UP, promote);
                for (ifa = next_sec; ifa; ifa = ifa->ifa_next) {
@@ -420,7 +417,7 @@ static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
 }
 
 static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
-                            u32 pid)
+                            u32 portid)
 {
        struct in_device *in_dev = ifa->ifa_dev;
        struct in_ifaddr *ifa1, **ifap, **last_primary;
@@ -467,7 +464,7 @@ static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
        /* Send message first, then call notifier.
           Notifier will trigger FIB update, so that
           listeners of netlink will know about new ifaddr */
-       rtmsg_ifa(RTM_NEWADDR, ifa, nlh, pid);
+       rtmsg_ifa(RTM_NEWADDR, ifa, nlh, portid);
        blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
 
        return 0;
@@ -566,7 +563,7 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg
                    !inet_ifa_match(nla_get_be32(tb[IFA_ADDRESS]), ifa)))
                        continue;
 
-               __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).pid);
+               __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
                return 0;
        }
 
@@ -652,14 +649,14 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg
        if (IS_ERR(ifa))
                return PTR_ERR(ifa);
 
-       return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).pid);
+       return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid);
 }
 
 /*
  *     Determine a default network mask, based on the IP address.
  */
 
-static inline int inet_abc_len(__be32 addr)
+static int inet_abc_len(__be32 addr)
 {
        int rc = -1;    /* Something else, probably a multicast. */
 
@@ -1124,7 +1121,7 @@ skip:
        }
 }
 
-static inline bool inetdev_valid_mtu(unsigned int mtu)
+static bool inetdev_valid_mtu(unsigned int mtu)
 {
        return mtu >= 68;
 }
@@ -1239,7 +1236,7 @@ static struct notifier_block ip_netdev_notifier = {
        .notifier_call = inetdev_event,
 };
 
-static inline size_t inet_nlmsg_size(void)
+static size_t inet_nlmsg_size(void)
 {
        return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
               + nla_total_size(4) /* IFA_ADDRESS */
@@ -1249,12 +1246,12 @@ static inline size_t inet_nlmsg_size(void)
 }
 
 static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
-                           u32 pid, u32 seq, int event, unsigned int flags)
+                           u32 portid, u32 seq, int event, unsigned int flags)
 {
        struct ifaddrmsg *ifm;
        struct nlmsghdr  *nlh;
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -1316,7 +1313,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
                                if (ip_idx < s_ip_idx)
                                        continue;
                                if (inet_fill_ifaddr(skb, ifa,
-                                            NETLINK_CB(cb->skb).pid,
+                                            NETLINK_CB(cb->skb).portid,
                                             cb->nlh->nlmsg_seq,
                                             RTM_NEWADDR, NLM_F_MULTI) <= 0) {
                                        rcu_read_unlock();
@@ -1338,7 +1335,7 @@ done:
 }
 
 static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
-                     u32 pid)
+                     u32 portid)
 {
        struct sk_buff *skb;
        u32 seq = nlh ? nlh->nlmsg_seq : 0;
@@ -1350,14 +1347,14 @@ static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
        if (skb == NULL)
                goto errout;
 
-       err = inet_fill_ifaddr(skb, ifa, pid, seq, event, 0);
+       err = inet_fill_ifaddr(skb, ifa, portid, seq, event, 0);
        if (err < 0) {
                /* -EMSGSIZE implies BUG in inet_nlmsg_size() */
                WARN_ON(err == -EMSGSIZE);
                kfree_skb(skb);
                goto errout;
        }
-       rtnl_notify(skb, net, pid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
+       rtnl_notify(skb, net, portid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
        return;
 errout:
        if (err < 0)
index 8e2b475da9faca9f7e27482f7e416908e7cf66fa..68c93d1bb03adb9fef46ff264a783dad82a962ea 100644 (file)
@@ -218,7 +218,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
        scope = RT_SCOPE_UNIVERSE;
        if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
                fl4.flowi4_oif = 0;
-               fl4.flowi4_iif = net->loopback_dev->ifindex;
+               fl4.flowi4_iif = LOOPBACK_IFINDEX;
                fl4.daddr = ip_hdr(skb)->saddr;
                fl4.saddr = 0;
                fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
@@ -557,7 +557,7 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
        cfg->fc_flags = rtm->rtm_flags;
        cfg->fc_nlflags = nlh->nlmsg_flags;
 
-       cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid;
+       cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
        cfg->fc_nlinfo.nlh = nlh;
        cfg->fc_nlinfo.nl_net = net;
 
@@ -955,7 +955,7 @@ static void nl_fib_input(struct sk_buff *skb)
        struct fib_result_nl *frn;
        struct nlmsghdr *nlh;
        struct fib_table *tb;
-       u32 pid;
+       u32 portid;
 
        net = sock_net(skb->sk);
        nlh = nlmsg_hdr(skb);
@@ -973,10 +973,10 @@ static void nl_fib_input(struct sk_buff *skb)
 
        nl_fib_lookup(frn, tb);
 
-       pid = NETLINK_CB(skb).pid;      /* pid of sending process */
-       NETLINK_CB(skb).pid = 0;        /* from kernel */
+       portid = NETLINK_CB(skb).portid;      /* pid of sending process */
+       NETLINK_CB(skb).portid = 0;        /* from kernel */
        NETLINK_CB(skb).dst_group = 0;  /* unicast */
-       netlink_unicast(net->ipv4.fibnl, skb, pid, MSG_DONTWAIT);
+       netlink_unicast(net->ipv4.fibnl, skb, portid, MSG_DONTWAIT);
 }
 
 static int __net_init nl_fib_lookup_init(struct net *net)
@@ -986,7 +986,7 @@ static int __net_init nl_fib_lookup_init(struct net *net)
                .input  = nl_fib_input,
        };
 
-       sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, THIS_MODULE, &cfg);
+       sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, &cfg);
        if (sk == NULL)
                return -EAFNOSUPPORT;
        net->ipv4.fibnl = sk;
@@ -1041,7 +1041,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
 static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
 {
        struct net_device *dev = ptr;
-       struct in_device *in_dev = __in_dev_get_rtnl(dev);
+       struct in_device *in_dev;
        struct net *net = dev_net(dev);
 
        if (event == NETDEV_UNREGISTER) {
@@ -1050,8 +1050,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
                return NOTIFY_DONE;
        }
 
-       if (!in_dev)
-               return NOTIFY_DONE;
+       in_dev = __in_dev_get_rtnl(dev);
 
        switch (event) {
        case NETDEV_UP:
@@ -1062,16 +1061,14 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
                fib_sync_up(dev);
 #endif
                atomic_inc(&net->ipv4.dev_addr_genid);
-               rt_cache_flush(dev_net(dev));
+               rt_cache_flush(net);
                break;
        case NETDEV_DOWN:
                fib_disable_ip(dev, 0);
                break;
        case NETDEV_CHANGEMTU:
        case NETDEV_CHANGE:
-               rt_cache_flush(dev_net(dev));
-               break;
-       case NETDEV_UNREGISTER_BATCH:
+               rt_cache_flush(net);
                break;
        }
        return NOTIFY_DONE;
index da80dc14cc76f51cb79e5c8042173396c036051e..3509065e409ab2782fe23cc8a174e369f0da501d 100644 (file)
@@ -391,7 +391,7 @@ void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
        if (skb == NULL)
                goto errout;
 
-       err = fib_dump_info(skb, info->pid, seq, event, tb_id,
+       err = fib_dump_info(skb, info->portid, seq, event, tb_id,
                            fa->fa_type, key, dst_len,
                            fa->fa_tos, fa->fa_info, nlm_flags);
        if (err < 0) {
@@ -400,7 +400,7 @@ void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
                kfree_skb(skb);
                goto errout;
        }
-       rtnl_notify(skb, info->nl_net, info->pid, RTNLGRP_IPV4_ROUTE,
+       rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_IPV4_ROUTE,
                    info->nlh, GFP_KERNEL);
        return;
 errout:
@@ -989,14 +989,14 @@ failure:
        return ERR_PTR(err);
 }
 
-int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
+int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
                  u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos,
                  struct fib_info *fi, unsigned int flags)
 {
        struct nlmsghdr *nlh;
        struct rtmsg *rtm;
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
        if (nlh == NULL)
                return -EMSGSIZE;
 
index d1b93595b4a7dce1e7701bb713bd5f0bc5adf1f6..31d771ca9a709f71328c1734433cecba1fde40fd 100644 (file)
@@ -1550,7 +1550,8 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
                 * state.directly.
                 */
                if (pref_mismatch) {
-                       int mp = KEYLENGTH - fls(pref_mismatch);
+                       /* fls(x) = __fls(x) + 1 */
+                       int mp = KEYLENGTH - __fls(pref_mismatch) - 1;
 
                        if (tkey_extract_bits(cn->key, mp, cn->pos - mp) != 0)
                                goto backtrace;
@@ -1655,7 +1656,12 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
        if (!l)
                return -ESRCH;
 
-       fa_head = get_fa_head(l, plen);
+       li = find_leaf_info(l, plen);
+
+       if (!li)
+               return -ESRCH;
+
+       fa_head = &li->falh;
        fa = fib_find_alias(fa_head, tos, 0);
 
        if (!fa)
@@ -1691,9 +1697,6 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
        rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id,
                  &cfg->fc_nlinfo, 0);
 
-       l = fib_find_node(t, key);
-       li = find_leaf_info(l, plen);
-
        list_del_rcu(&fa->fa_list);
 
        if (!plen)
@@ -1870,7 +1873,7 @@ static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah,
                        continue;
                }
 
-               if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
+               if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
                                  cb->nlh->nlmsg_seq,
                                  RTM_NEWROUTE,
                                  tb->tb_id,
index 6699f23e6f55b0012cc50b74030f415362efbed1..736ab70fd17981c95085ee0fad163aaeca39133c 100644 (file)
@@ -815,14 +815,15 @@ static int igmp_marksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
        return 1;
 }
 
-static void igmp_heard_report(struct in_device *in_dev, __be32 group)
+/* return true if packet was dropped */
+static bool igmp_heard_report(struct in_device *in_dev, __be32 group)
 {
        struct ip_mc_list *im;
 
        /* Timers are only set for non-local groups */
 
        if (group == IGMP_ALL_HOSTS)
-               return;
+               return false;
 
        rcu_read_lock();
        for_each_pmc_rcu(in_dev, im) {
@@ -832,9 +833,11 @@ static void igmp_heard_report(struct in_device *in_dev, __be32 group)
                }
        }
        rcu_read_unlock();
+       return false;
 }
 
-static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
+/* return true if packet was dropped */
+static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
        int len)
 {
        struct igmphdr          *ih = igmp_hdr(skb);
@@ -866,7 +869,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
                /* clear deleted report items */
                igmpv3_clear_delrec(in_dev);
        } else if (len < 12) {
-               return; /* ignore bogus packet; freed by caller */
+               return true;    /* ignore bogus packet; freed by caller */
        } else if (IGMP_V1_SEEN(in_dev)) {
                /* This is a v3 query with v1 queriers present */
                max_delay = IGMP_Query_Response_Interval;
@@ -883,13 +886,13 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
                        max_delay = 1;  /* can't mod w/ 0 */
        } else { /* v3 */
                if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
-                       return;
+                       return true;
 
                ih3 = igmpv3_query_hdr(skb);
                if (ih3->nsrcs) {
                        if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)
                                           + ntohs(ih3->nsrcs)*sizeof(__be32)))
-                               return;
+                               return true;
                        ih3 = igmpv3_query_hdr(skb);
                }
 
@@ -901,9 +904,9 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
                        in_dev->mr_qrv = ih3->qrv;
                if (!group) { /* general query */
                        if (ih3->nsrcs)
-                               return; /* no sources allowed */
+                               return false;   /* no sources allowed */
                        igmp_gq_start_timer(in_dev);
-                       return;
+                       return false;
                }
                /* mark sources to include, if group & source-specific */
                mark = ih3->nsrcs != 0;
@@ -939,6 +942,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
                        igmp_mod_timer(im, max_delay);
        }
        rcu_read_unlock();
+       return false;
 }
 
 /* called in rcu_read_lock() section */
@@ -948,6 +952,7 @@ int igmp_rcv(struct sk_buff *skb)
        struct igmphdr *ih;
        struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
        int len = skb->len;
+       bool dropped = true;
 
        if (in_dev == NULL)
                goto drop;
@@ -969,7 +974,7 @@ int igmp_rcv(struct sk_buff *skb)
        ih = igmp_hdr(skb);
        switch (ih->type) {
        case IGMP_HOST_MEMBERSHIP_QUERY:
-               igmp_heard_query(in_dev, skb, len);
+               dropped = igmp_heard_query(in_dev, skb, len);
                break;
        case IGMP_HOST_MEMBERSHIP_REPORT:
        case IGMPV2_HOST_MEMBERSHIP_REPORT:
@@ -979,7 +984,7 @@ int igmp_rcv(struct sk_buff *skb)
                /* don't rely on MC router hearing unicast reports */
                if (skb->pkt_type == PACKET_MULTICAST ||
                    skb->pkt_type == PACKET_BROADCAST)
-                       igmp_heard_report(in_dev, ih->group);
+                       dropped = igmp_heard_report(in_dev, ih->group);
                break;
        case IGMP_PIM:
 #ifdef CONFIG_IP_PIMSM_V1
@@ -997,7 +1002,10 @@ int igmp_rcv(struct sk_buff *skb)
        }
 
 drop:
-       kfree_skb(skb);
+       if (dropped)
+               kfree_skb(skb);
+       else
+               consume_skb(skb);
        return 0;
 }
 
@@ -1896,6 +1904,7 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
        rtnl_unlock();
        return ret;
 }
+EXPORT_SYMBOL(ip_mc_leave_group);
 
 int ip_mc_source(int add, int omode, struct sock *sk, struct
        ip_mreq_source *mreqs, int ifindex)
@@ -2435,6 +2444,8 @@ static int igmp_mc_seq_show(struct seq_file *seq, void *v)
                struct ip_mc_list *im = (struct ip_mc_list *)v;
                struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
                char   *querier;
+               long delta;
+
 #ifdef CONFIG_IP_MULTICAST
                querier = IGMP_V1_SEEN(state->in_dev) ? "V1" :
                          IGMP_V2_SEEN(state->in_dev) ? "V2" :
@@ -2448,11 +2459,12 @@ static int igmp_mc_seq_show(struct seq_file *seq, void *v)
                                   state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier);
                }
 
+               delta = im->timer.expires - jiffies;
                seq_printf(seq,
                           "\t\t\t\t%08X %5d %d:%08lX\t\t%d\n",
                           im->multiaddr, im->users,
-                          im->tm_running, im->tm_running ?
-                          jiffies_to_clock_t(im->timer.expires-jiffies) : 0,
+                          im->tm_running,
+                          im->tm_running ? jiffies_delta_to_clock_t(delta) : 0,
                           im->reporter);
        }
        return 0;
index 7f75f21d7b8346e0279364c511117582f3c1f342..f0c5b9c1a95714e2e206cf6cd178a90626271fcc 100644 (file)
@@ -283,7 +283,9 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
 struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
+       struct request_sock_queue *queue = &icsk->icsk_accept_queue;
        struct sock *newsk;
+       struct request_sock *req;
        int error;
 
        lock_sock(sk);
@@ -296,7 +298,7 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
                goto out_err;
 
        /* Find already established connection */
-       if (reqsk_queue_empty(&icsk->icsk_accept_queue)) {
+       if (reqsk_queue_empty(queue)) {
                long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
 
                /* If this is a non blocking socket don't sleep */
@@ -308,14 +310,32 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
                if (error)
                        goto out_err;
        }
-
-       newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk);
-       WARN_ON(newsk->sk_state == TCP_SYN_RECV);
+       req = reqsk_queue_remove(queue);
+       newsk = req->sk;
+
+       sk_acceptq_removed(sk);
+       if (sk->sk_protocol == IPPROTO_TCP && queue->fastopenq != NULL) {
+               spin_lock_bh(&queue->fastopenq->lock);
+               if (tcp_rsk(req)->listener) {
+                       /* We are still waiting for the final ACK from 3WHS
+                        * so can't free req now. Instead, we set req->sk to
+                        * NULL to signify that the child socket is taken
+                        * so reqsk_fastopen_remove() will free the req
+                        * when 3WHS finishes (or is aborted).
+                        */
+                       req->sk = NULL;
+                       req = NULL;
+               }
+               spin_unlock_bh(&queue->fastopenq->lock);
+       }
 out:
        release_sock(sk);
+       if (req)
+               __reqsk_free(req);
        return newsk;
 out_err:
        newsk = NULL;
+       req = NULL;
        *err = error;
        goto out;
 }
@@ -720,13 +740,14 @@ EXPORT_SYMBOL_GPL(inet_csk_listen_start);
 void inet_csk_listen_stop(struct sock *sk)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
+       struct request_sock_queue *queue = &icsk->icsk_accept_queue;
        struct request_sock *acc_req;
        struct request_sock *req;
 
        inet_csk_delete_keepalive_timer(sk);
 
        /* make all the listen_opt local to us */
-       acc_req = reqsk_queue_yank_acceptq(&icsk->icsk_accept_queue);
+       acc_req = reqsk_queue_yank_acceptq(queue);
 
        /* Following specs, it would be better either to send FIN
         * (and enter FIN-WAIT-1, it is normal close)
@@ -736,7 +757,7 @@ void inet_csk_listen_stop(struct sock *sk)
         * To be honest, we are not able to make either
         * of the variants now.                 --ANK
         */
-       reqsk_queue_destroy(&icsk->icsk_accept_queue);
+       reqsk_queue_destroy(queue);
 
        while ((req = acc_req) != NULL) {
                struct sock *child = req->sk;
@@ -754,6 +775,19 @@ void inet_csk_listen_stop(struct sock *sk)
 
                percpu_counter_inc(sk->sk_prot->orphan_count);
 
+               if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->listener) {
+                       BUG_ON(tcp_sk(child)->fastopen_rsk != req);
+                       BUG_ON(sk != tcp_rsk(req)->listener);
+
+                       /* Paranoid, to prevent race condition if
+                        * an inbound pkt destined for child is
+                        * blocked by sock lock in tcp_v4_rcv().
+                        * Also to satisfy an assertion in
+                        * tcp_v4_destroy_sock().
+                        */
+                       tcp_sk(child)->fastopen_rsk = NULL;
+                       sock_put(sk);
+               }
                inet_csk_destroy_sock(child);
 
                bh_unlock_sock(child);
@@ -763,6 +797,17 @@ void inet_csk_listen_stop(struct sock *sk)
                sk_acceptq_removed(sk);
                __reqsk_free(req);
        }
+       if (queue->fastopenq != NULL) {
+               /* Free all the reqs queued in rskq_rst_head. */
+               spin_lock_bh(&queue->fastopenq->lock);
+               acc_req = queue->fastopenq->rskq_rst_head;
+               queue->fastopenq->rskq_rst_head = NULL;
+               spin_unlock_bh(&queue->fastopenq->lock);
+               while ((req = acc_req) != NULL) {
+                       acc_req = req->dl_next;
+                       __reqsk_free(req);
+               }
+       }
        WARN_ON(sk->sk_ack_backlog);
 }
 EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
index 8bc005b1435f5109269a6057ad96d7c9985574d5..535584c00f9118fe33a17e79b858e66935f424f9 100644 (file)
@@ -70,7 +70,7 @@ static inline void inet_diag_unlock_handler(
 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
                              struct sk_buff *skb, struct inet_diag_req_v2 *req,
                              struct user_namespace *user_ns,                   
-                             u32 pid, u32 seq, u16 nlmsg_flags,
+                             u32 portid, u32 seq, u16 nlmsg_flags,
                              const struct nlmsghdr *unlh)
 {
        const struct inet_sock *inet = inet_sk(sk);
@@ -84,7 +84,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
        handler = inet_diag_table[req->sdiag_protocol];
        BUG_ON(handler == NULL);
 
-       nlh = nlmsg_put(skb, pid, seq, unlh->nlmsg_type, sizeof(*r),
+       nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
                        nlmsg_flags);
        if (!nlh)
                return -EMSGSIZE;
@@ -201,23 +201,23 @@ EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
 static int inet_csk_diag_fill(struct sock *sk,
                              struct sk_buff *skb, struct inet_diag_req_v2 *req,
                              struct user_namespace *user_ns,
-                             u32 pid, u32 seq, u16 nlmsg_flags,
+                             u32 portid, u32 seq, u16 nlmsg_flags,
                              const struct nlmsghdr *unlh)
 {
        return inet_sk_diag_fill(sk, inet_csk(sk),
-                       skb, req, user_ns, pid, seq, nlmsg_flags, unlh);
+                       skb, req, user_ns, portid, seq, nlmsg_flags, unlh);
 }
 
 static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
                               struct sk_buff *skb, struct inet_diag_req_v2 *req,
-                              u32 pid, u32 seq, u16 nlmsg_flags,
+                              u32 portid, u32 seq, u16 nlmsg_flags,
                               const struct nlmsghdr *unlh)
 {
        long tmo;
        struct inet_diag_msg *r;
        struct nlmsghdr *nlh;
 
-       nlh = nlmsg_put(skb, pid, seq, unlh->nlmsg_type, sizeof(*r),
+       nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
                        nlmsg_flags);
        if (!nlh)
                return -EMSGSIZE;
@@ -260,14 +260,14 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
                        struct inet_diag_req_v2 *r,
                        struct user_namespace *user_ns,
-                       u32 pid, u32 seq, u16 nlmsg_flags,
+                       u32 portid, u32 seq, u16 nlmsg_flags,
                        const struct nlmsghdr *unlh)
 {
        if (sk->sk_state == TCP_TIME_WAIT)
                return inet_twsk_diag_fill((struct inet_timewait_sock *)sk,
-                                          skb, r, pid, seq, nlmsg_flags,
+                                          skb, r, portid, seq, nlmsg_flags,
                                           unlh);
-       return inet_csk_diag_fill(sk, skb, r, user_ns, pid, seq, nlmsg_flags, unlh);
+       return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq, nlmsg_flags, unlh);
 }
 
 int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb,
@@ -316,14 +316,14 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s
 
        err = sk_diag_fill(sk, rep, req,
                           sk_user_ns(NETLINK_CB(in_skb).ssk),
-                          NETLINK_CB(in_skb).pid,
+                          NETLINK_CB(in_skb).portid,
                           nlh->nlmsg_seq, 0, nlh);
        if (err < 0) {
                WARN_ON(err == -EMSGSIZE);
                nlmsg_free(rep);
                goto out;
        }
-       err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).pid,
+       err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
                              MSG_DONTWAIT);
        if (err > 0)
                err = 0;
@@ -557,7 +557,7 @@ static int inet_csk_diag_dump(struct sock *sk,
 
        return inet_csk_diag_fill(sk, skb, r,
                                  sk_user_ns(NETLINK_CB(cb->skb).ssk),
-                                 NETLINK_CB(cb->skb).pid,
+                                 NETLINK_CB(cb->skb).portid,
                                  cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
 }
 
@@ -592,14 +592,14 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
        }
 
        return inet_twsk_diag_fill(tw, skb, r,
-                                  NETLINK_CB(cb->skb).pid,
+                                  NETLINK_CB(cb->skb).portid,
                                   cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
 }
 
 static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
                              struct request_sock *req,
                              struct user_namespace *user_ns,
-                             u32 pid, u32 seq,
+                             u32 portid, u32 seq,
                              const struct nlmsghdr *unlh)
 {
        const struct inet_request_sock *ireq = inet_rsk(req);
@@ -608,7 +608,7 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
        struct nlmsghdr *nlh;
        long tmo;
 
-       nlh = nlmsg_put(skb, pid, seq, unlh->nlmsg_type, sizeof(*r),
+       nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
                        NLM_F_MULTI);
        if (!nlh)
                return -EMSGSIZE;
@@ -711,7 +711,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
 
                        err = inet_diag_fill_req(skb, sk, req,
                                               sk_user_ns(NETLINK_CB(cb->skb).ssk),
-                                              NETLINK_CB(cb->skb).pid,
+                                              NETLINK_CB(cb->skb).portid,
                                               cb->nlh->nlmsg_seq, cb->nlh);
                        if (err < 0) {
                                cb->args[3] = j + 1;
index 85190e69297bfd736df428226c3e01aeb5be3aa8..4750d2b74d79324cdc3176b7a9cbbe0d13c4e9c7 100644 (file)
@@ -89,7 +89,7 @@ void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
        nf->low_thresh = 0;
 
        local_bh_disable();
-       inet_frag_evictor(nf, f);
+       inet_frag_evictor(nf, f, true);
        local_bh_enable();
 }
 EXPORT_SYMBOL(inet_frags_exit_net);
@@ -158,11 +158,16 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
 }
 EXPORT_SYMBOL(inet_frag_destroy);
 
-int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f)
+int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
 {
        struct inet_frag_queue *q;
        int work, evicted = 0;
 
+       if (!force) {
+               if (atomic_read(&nf->mem) <= nf->high_thresh)
+                       return 0;
+       }
+
        work = atomic_read(&nf->mem) - nf->low_thresh;
        while (work > 0) {
                read_lock(&f->lock);
index 8d07c973409ca3df9d09f8fb6b3a614d304603a8..448e68546827431098c980bafc4a63967764942f 100644 (file)
@@ -219,7 +219,7 @@ static void ip_evictor(struct net *net)
 {
        int evicted;
 
-       evicted = inet_frag_evictor(&net->ipv4.frags, &ip4_frags);
+       evicted = inet_frag_evictor(&net->ipv4.frags, &ip4_frags, false);
        if (evicted)
                IP_ADD_STATS_BH(net, IPSTATS_MIB_REASMFAILS, evicted);
 }
@@ -523,6 +523,10 @@ found:
        if (offset == 0)
                qp->q.last_in |= INET_FRAG_FIRST_IN;
 
+       if (ip_hdr(skb)->frag_off & htons(IP_DF) &&
+           skb->len + ihl > qp->q.max_size)
+               qp->q.max_size = skb->len + ihl;
+
        if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
            qp->q.meat == qp->q.len)
                return ip_frag_reasm(qp, prev, dev);
@@ -646,9 +650,11 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
        head->next = NULL;
        head->dev = dev;
        head->tstamp = qp->q.stamp;
+       IPCB(head)->frag_max_size = qp->q.max_size;
 
        iph = ip_hdr(head);
-       iph->frag_off = 0;
+       /* max_size != 0 implies at least one fragment had IP_DF set */
+       iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0;
        iph->tot_len = htons(len);
        iph->tos |= ecn;
        IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
@@ -678,8 +684,7 @@ int ip_defrag(struct sk_buff *skb, u32 user)
        IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
 
        /* Start by cleaning up the memory. */
-       if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh)
-               ip_evictor(net);
+       ip_evictor(net);
 
        /* Lookup (or create) queue header */
        if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) {
index b062a98574f2e40e63d5b5eee8f1d7f2fda28425..7240f8e2dd4511dde4de0bd08290bb718eab140f 100644 (file)
    Alexey Kuznetsov.
  */
 
+static bool log_ecn_error = true;
+module_param(log_ecn_error, bool, 0644);
+MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
+
 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
 static int ipgre_tunnel_init(struct net_device *dev);
 static void ipgre_tunnel_setup(struct net_device *dev);
@@ -204,7 +208,9 @@ static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev,
        tot->rx_crc_errors = dev->stats.rx_crc_errors;
        tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
        tot->rx_length_errors = dev->stats.rx_length_errors;
+       tot->rx_frame_errors = dev->stats.rx_frame_errors;
        tot->rx_errors = dev->stats.rx_errors;
+
        tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
        tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
        tot->tx_dropped = dev->stats.tx_dropped;
@@ -214,11 +220,25 @@ static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev,
        return tot;
 }
 
+/* Does key in tunnel parameters match packet */
+static bool ipgre_key_match(const struct ip_tunnel_parm *p,
+                           __be16 flags, __be32 key)
+{
+       if (p->i_flags & GRE_KEY) {
+               if (flags & GRE_KEY)
+                       return key == p->i_key;
+               else
+                       return false;   /* key expected, none present */
+       } else
+               return !(flags & GRE_KEY);
+}
+
 /* Given src, dst and key, find appropriate for input tunnel. */
 
 static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev,
                                             __be32 remote, __be32 local,
-                                            __be32 key, __be16 gre_proto)
+                                            __be16 flags, __be32 key,
+                                            __be16 gre_proto)
 {
        struct net *net = dev_net(dev);
        int link = dev->ifindex;
@@ -233,10 +253,12 @@ static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev,
        for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) {
                if (local != t->parms.iph.saddr ||
                    remote != t->parms.iph.daddr ||
-                   key != t->parms.i_key ||
                    !(t->dev->flags & IFF_UP))
                        continue;
 
+               if (!ipgre_key_match(&t->parms, flags, key))
+                       continue;
+
                if (t->dev->type != ARPHRD_IPGRE &&
                    t->dev->type != dev_type)
                        continue;
@@ -257,10 +279,12 @@ static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev,
 
        for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) {
                if (remote != t->parms.iph.daddr ||
-                   key != t->parms.i_key ||
                    !(t->dev->flags & IFF_UP))
                        continue;
 
+               if (!ipgre_key_match(&t->parms, flags, key))
+                       continue;
+
                if (t->dev->type != ARPHRD_IPGRE &&
                    t->dev->type != dev_type)
                        continue;
@@ -283,10 +307,12 @@ static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev,
                if ((local != t->parms.iph.saddr &&
                     (local != t->parms.iph.daddr ||
                      !ipv4_is_multicast(local))) ||
-                   key != t->parms.i_key ||
                    !(t->dev->flags & IFF_UP))
                        continue;
 
+               if (!ipgre_key_match(&t->parms, flags, key))
+                       continue;
+
                if (t->dev->type != ARPHRD_IPGRE &&
                    t->dev->type != dev_type)
                        continue;
@@ -489,6 +515,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
        const int code = icmp_hdr(skb)->code;
        struct ip_tunnel *t;
        __be16 flags;
+       __be32 key = 0;
 
        flags = p[0];
        if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
@@ -505,6 +532,9 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
        if (skb_headlen(skb) < grehlen)
                return;
 
+       if (flags & GRE_KEY)
+               key = *(((__be32 *)p) + (grehlen / 4) - 1);
+
        switch (type) {
        default:
        case ICMP_PARAMETERPROB:
@@ -533,49 +563,34 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
                break;
        }
 
-       rcu_read_lock();
        t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr,
-                               flags & GRE_KEY ?
-                               *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
-                               p[1]);
+                               flags, key, p[1]);
+
        if (t == NULL)
-               goto out;
+               return;
 
        if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
                ipv4_update_pmtu(skb, dev_net(skb->dev), info,
                                 t->parms.link, 0, IPPROTO_GRE, 0);
-               goto out;
+               return;
        }
        if (type == ICMP_REDIRECT) {
                ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
                              IPPROTO_GRE, 0);
-               goto out;
+               return;
        }
        if (t->parms.iph.daddr == 0 ||
            ipv4_is_multicast(t->parms.iph.daddr))
-               goto out;
+               return;
 
        if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
-               goto out;
+               return;
 
        if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
                t->err_count++;
        else
                t->err_count = 1;
        t->err_time = jiffies;
-out:
-       rcu_read_unlock();
-}
-
-static inline void ipgre_ecn_decapsulate(const struct iphdr *iph, struct sk_buff *skb)
-{
-       if (INET_ECN_is_ce(iph->tos)) {
-               if (skb->protocol == htons(ETH_P_IP)) {
-                       IP_ECN_set_ce(ip_hdr(skb));
-               } else if (skb->protocol == htons(ETH_P_IPV6)) {
-                       IP6_ECN_set_ce(ipv6_hdr(skb));
-               }
-       }
 }
 
 static inline u8
@@ -600,9 +615,10 @@ static int ipgre_rcv(struct sk_buff *skb)
        struct ip_tunnel *tunnel;
        int    offset = 4;
        __be16 gre_proto;
+       int    err;
 
        if (!pskb_may_pull(skb, 16))
-               goto drop_nolock;
+               goto drop;
 
        iph = ip_hdr(skb);
        h = skb->data;
@@ -613,7 +629,7 @@ static int ipgre_rcv(struct sk_buff *skb)
                   - We do not support routing headers.
                 */
                if (flags&(GRE_VERSION|GRE_ROUTING))
-                       goto drop_nolock;
+                       goto drop;
 
                if (flags&GRE_CSUM) {
                        switch (skb->ip_summed) {
@@ -641,10 +657,10 @@ static int ipgre_rcv(struct sk_buff *skb)
 
        gre_proto = *(__be16 *)(h + 2);
 
-       rcu_read_lock();
-       if ((tunnel = ipgre_tunnel_lookup(skb->dev,
-                                         iph->saddr, iph->daddr, key,
-                                         gre_proto))) {
+       tunnel = ipgre_tunnel_lookup(skb->dev,
+                                    iph->saddr, iph->daddr, flags, key,
+                                    gre_proto);
+       if (tunnel) {
                struct pcpu_tstats *tstats;
 
                secpath_reset(skb);
@@ -703,27 +719,33 @@ static int ipgre_rcv(struct sk_buff *skb)
                        skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
                }
 
+               __skb_tunnel_rx(skb, tunnel->dev);
+
+               skb_reset_network_header(skb);
+               err = IP_ECN_decapsulate(iph, skb);
+               if (unlikely(err)) {
+                       if (log_ecn_error)
+                               net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
+                                                    &iph->saddr, iph->tos);
+                       if (err > 1) {
+                               ++tunnel->dev->stats.rx_frame_errors;
+                               ++tunnel->dev->stats.rx_errors;
+                               goto drop;
+                       }
+               }
+
                tstats = this_cpu_ptr(tunnel->dev->tstats);
                u64_stats_update_begin(&tstats->syncp);
                tstats->rx_packets++;
                tstats->rx_bytes += skb->len;
                u64_stats_update_end(&tstats->syncp);
 
-               __skb_tunnel_rx(skb, tunnel->dev);
-
-               skb_reset_network_header(skb);
-               ipgre_ecn_decapsulate(iph, skb);
-
-               netif_rx(skb);
-
-               rcu_read_unlock();
+               gro_cells_receive(&tunnel->gro_cells, skb);
                return 0;
        }
        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 
 drop:
-       rcu_read_unlock();
-drop_nolock:
        kfree_skb(skb);
        return 0;
 }
@@ -745,6 +767,10 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
        __be32 dst;
        int    mtu;
 
+       if (skb->ip_summed == CHECKSUM_PARTIAL &&
+           skb_checksum_help(skb))
+               goto tx_error;
+
        if (dev->type == ARPHRD_ETHER)
                IPCB(skb)->flags = 0;
 
@@ -1292,10 +1318,18 @@ static const struct net_device_ops ipgre_netdev_ops = {
 
 static void ipgre_dev_free(struct net_device *dev)
 {
+       struct ip_tunnel *tunnel = netdev_priv(dev);
+
+       gro_cells_destroy(&tunnel->gro_cells);
        free_percpu(dev->tstats);
        free_netdev(dev);
 }
 
+#define GRE_FEATURES (NETIF_F_SG |             \
+                     NETIF_F_FRAGLIST |        \
+                     NETIF_F_HIGHDMA |         \
+                     NETIF_F_HW_CSUM)
+
 static void ipgre_tunnel_setup(struct net_device *dev)
 {
        dev->netdev_ops         = &ipgre_netdev_ops;
@@ -1309,12 +1343,16 @@ static void ipgre_tunnel_setup(struct net_device *dev)
        dev->addr_len           = 4;
        dev->features           |= NETIF_F_NETNS_LOCAL;
        dev->priv_flags         &= ~IFF_XMIT_DST_RELEASE;
+
+       dev->features           |= GRE_FEATURES;
+       dev->hw_features        |= GRE_FEATURES;
 }
 
 static int ipgre_tunnel_init(struct net_device *dev)
 {
        struct ip_tunnel *tunnel;
        struct iphdr *iph;
+       int err;
 
        tunnel = netdev_priv(dev);
        iph = &tunnel->parms.iph;
@@ -1341,6 +1379,12 @@ static int ipgre_tunnel_init(struct net_device *dev)
        if (!dev->tstats)
                return -ENOMEM;
 
+       err = gro_cells_init(&tunnel->gro_cells, dev);
+       if (err) {
+               free_percpu(dev->tstats);
+               return err;
+       }
+
        return 0;
 }
 
index c196d749daf23b3823ffe012495ea5d9411be99a..24a29a39e9a885dfa96300067fd37154b0875f53 100644 (file)
@@ -467,7 +467,9 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
 
        iph = ip_hdr(skb);
 
-       if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
+       if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->local_df) ||
+                    (IPCB(skb)->frag_max_size &&
+                     IPCB(skb)->frag_max_size > dst_mtu(&rt->dst)))) {
                IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
                icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
                          htonl(ip_skb_dst_mtu(skb)));
@@ -791,6 +793,7 @@ static int __ip_append_data(struct sock *sk,
                            struct flowi4 *fl4,
                            struct sk_buff_head *queue,
                            struct inet_cork *cork,
+                           struct page_frag *pfrag,
                            int getfrag(void *from, char *to, int offset,
                                        int len, int odd, struct sk_buff *skb),
                            void *from, int length, int transhdrlen,
@@ -985,47 +988,30 @@ alloc_new_skb:
                        }
                } else {
                        int i = skb_shinfo(skb)->nr_frags;
-                       skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
-                       struct page *page = cork->page;
-                       int off = cork->off;
-                       unsigned int left;
-
-                       if (page && (left = PAGE_SIZE - off) > 0) {
-                               if (copy >= left)
-                                       copy = left;
-                               if (page != skb_frag_page(frag)) {
-                                       if (i == MAX_SKB_FRAGS) {
-                                               err = -EMSGSIZE;
-                                               goto error;
-                                       }
-                                       skb_fill_page_desc(skb, i, page, off, 0);
-                                       skb_frag_ref(skb, i);
-                                       frag = &skb_shinfo(skb)->frags[i];
-                               }
-                       } else if (i < MAX_SKB_FRAGS) {
-                               if (copy > PAGE_SIZE)
-                                       copy = PAGE_SIZE;
-                               page = alloc_pages(sk->sk_allocation, 0);
-                               if (page == NULL)  {
-                                       err = -ENOMEM;
-                                       goto error;
-                               }
-                               cork->page = page;
-                               cork->off = 0;
 
-                               skb_fill_page_desc(skb, i, page, 0, 0);
-                               frag = &skb_shinfo(skb)->frags[i];
-                       } else {
-                               err = -EMSGSIZE;
-                               goto error;
-                       }
-                       if (getfrag(from, skb_frag_address(frag)+skb_frag_size(frag),
-                                   offset, copy, skb->len, skb) < 0) {
-                               err = -EFAULT;
+                       err = -ENOMEM;
+                       if (!sk_page_frag_refill(sk, pfrag))
                                goto error;
+
+                       if (!skb_can_coalesce(skb, i, pfrag->page,
+                                             pfrag->offset)) {
+                               err = -EMSGSIZE;
+                               if (i == MAX_SKB_FRAGS)
+                                       goto error;
+
+                               __skb_fill_page_desc(skb, i, pfrag->page,
+                                                    pfrag->offset, 0);
+                               skb_shinfo(skb)->nr_frags = ++i;
+                               get_page(pfrag->page);
                        }
-                       cork->off += copy;
-                       skb_frag_size_add(frag, copy);
+                       copy = min_t(int, copy, pfrag->size - pfrag->offset);
+                       if (getfrag(from,
+                                   page_address(pfrag->page) + pfrag->offset,
+                                   offset, copy, skb->len, skb) < 0)
+                               goto error_efault;
+
+                       pfrag->offset += copy;
+                       skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
                        skb->len += copy;
                        skb->data_len += copy;
                        skb->truesize += copy;
@@ -1037,6 +1023,8 @@ alloc_new_skb:
 
        return 0;
 
+error_efault:
+       err = -EFAULT;
 error:
        cork->length -= length;
        IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
@@ -1077,8 +1065,6 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
        cork->dst = &rt->dst;
        cork->length = 0;
        cork->tx_flags = ipc->tx_flags;
-       cork->page = NULL;
-       cork->off = 0;
 
        return 0;
 }
@@ -1115,7 +1101,8 @@ int ip_append_data(struct sock *sk, struct flowi4 *fl4,
                transhdrlen = 0;
        }
 
-       return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base, getfrag,
+       return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base,
+                               sk_page_frag(sk), getfrag,
                                from, length, transhdrlen, flags);
 }
 
@@ -1437,7 +1424,8 @@ struct sk_buff *ip_make_skb(struct sock *sk,
        if (err)
                return ERR_PTR(err);
 
-       err = __ip_append_data(sk, fl4, &queue, &cork, getfrag,
+       err = __ip_append_data(sk, fl4, &queue, &cork,
+                              &current->task_frag, getfrag,
                               from, length, transhdrlen, flags);
        if (err) {
                __ip_flush_pending_frames(sk, &queue, &cork);
index 3511ffba7bd41088cd95d677f5c43dab947d9bf2..978bca4818aef0c52dda4011c2a98b7ef0326559 100644 (file)
@@ -304,7 +304,6 @@ static int vti_err(struct sk_buff *skb, u32 info)
 
        err = -ENOENT;
 
-       rcu_read_lock();
        t = vti_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr);
        if (t == NULL)
                goto out;
@@ -326,7 +325,6 @@ static int vti_err(struct sk_buff *skb, u32 info)
                t->err_count = 1;
        t->err_time = jiffies;
 out:
-       rcu_read_unlock();
        return err;
 }
 
@@ -336,7 +334,6 @@ static int vti_rcv(struct sk_buff *skb)
        struct ip_tunnel *tunnel;
        const struct iphdr *iph = ip_hdr(skb);
 
-       rcu_read_lock();
        tunnel = vti_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr);
        if (tunnel != NULL) {
                struct pcpu_tstats *tstats;
@@ -348,10 +345,8 @@ static int vti_rcv(struct sk_buff *skb)
                u64_stats_update_end(&tstats->syncp);
 
                skb->dev = tunnel->dev;
-               rcu_read_unlock();
                return 1;
        }
-       rcu_read_unlock();
 
        return -1;
 }
index 67e8a6b086ea7a0d2c4cc986ed6ed0e6b4414c6a..798358b107171664823de1b13cc3ffd020dc7f94 100644 (file)
@@ -582,6 +582,17 @@ static void __init ic_rarp_send_if(struct ic_device *d)
 }
 #endif
 
+/*
+ *  Predefine Nameservers
+ */
+static inline void __init ic_nameservers_predef(void)
+{
+       int i;
+
+       for (i = 0; i < CONF_NAMESERVERS_MAX; i++)
+               ic_nameservers[i] = NONE;
+}
+
 /*
  *     DHCP/BOOTP support.
  */
@@ -747,10 +758,7 @@ static void __init ic_bootp_init_ext(u8 *e)
  */
 static inline void __init ic_bootp_init(void)
 {
-       int i;
-
-       for (i = 0; i < CONF_NAMESERVERS_MAX; i++)
-               ic_nameservers[i] = NONE;
+       ic_nameservers_predef();
 
        dev_add_pack(&bootp_packet_type);
 }
@@ -1379,6 +1387,7 @@ static int __init ip_auto_config(void)
        int retries = CONF_OPEN_RETRIES;
 #endif
        int err;
+       unsigned int i;
 
 #ifdef CONFIG_PROC_FS
        proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops);
@@ -1499,7 +1508,15 @@ static int __init ip_auto_config(void)
                &ic_servaddr, &root_server_addr, root_server_path);
        if (ic_dev_mtu)
                pr_cont(", mtu=%d", ic_dev_mtu);
-       pr_cont("\n");
+       for (i = 0; i < CONF_NAMESERVERS_MAX; i++)
+               if (ic_nameservers[i] != NONE) {
+                       pr_info("     nameserver%u=%pI4",
+                               i, &ic_nameservers[i]);
+                       break;
+               }
+       for (i++; i < CONF_NAMESERVERS_MAX; i++)
+               if (ic_nameservers[i] != NONE)
+                       pr_cont(", nameserver%u=%pI4\n", i, &ic_nameservers[i]);
 #endif /* !SILENT */
 
        return 0;
@@ -1570,6 +1587,8 @@ static int __init ip_auto_config_setup(char *addrs)
                return 1;
        }
 
+       ic_nameservers_predef();
+
        /* Parse string for static IP assignment.  */
        ip = addrs;
        while (ip && *ip) {
@@ -1613,6 +1632,20 @@ static int __init ip_auto_config_setup(char *addrs)
                                        ic_enable = 0;
                                }
                                break;
+                       case 7:
+                               if (CONF_NAMESERVERS_MAX >= 1) {
+                                       ic_nameservers[0] = in_aton(ip);
+                                       if (ic_nameservers[0] == ANY)
+                                               ic_nameservers[0] = NONE;
+                               }
+                               break;
+                       case 8:
+                               if (CONF_NAMESERVERS_MAX >= 2) {
+                                       ic_nameservers[1] = in_aton(ip);
+                                       if (ic_nameservers[1] == ANY)
+                                               ic_nameservers[1] = NONE;
+                               }
+                               break;
                        }
                }
                ip = cp;
index 99af1f0cc65827c3faa40407c17b38527ac8a211..e15b45297c09f0043bda21ad366ce36d5a6e5824 100644 (file)
 #define HASH_SIZE  16
 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
 
+static bool log_ecn_error = true;
+module_param(log_ecn_error, bool, 0644);
+MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
+
 static int ipip_net_id __read_mostly;
 struct ipip_net {
        struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
@@ -365,8 +369,6 @@ static int ipip_err(struct sk_buff *skb, u32 info)
        }
 
        err = -ENOENT;
-
-       rcu_read_lock();
        t = ipip_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr);
        if (t == NULL)
                goto out;
@@ -398,34 +400,22 @@ static int ipip_err(struct sk_buff *skb, u32 info)
                t->err_count = 1;
        t->err_time = jiffies;
 out:
-       rcu_read_unlock();
-       return err;
-}
-
-static inline void ipip_ecn_decapsulate(const struct iphdr *outer_iph,
-                                       struct sk_buff *skb)
-{
-       struct iphdr *inner_iph = ip_hdr(skb);
 
-       if (INET_ECN_is_ce(outer_iph->tos))
-               IP_ECN_set_ce(inner_iph);
+       return err;
 }
 
 static int ipip_rcv(struct sk_buff *skb)
 {
        struct ip_tunnel *tunnel;
        const struct iphdr *iph = ip_hdr(skb);
+       int err;
 
-       rcu_read_lock();
        tunnel = ipip_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr);
        if (tunnel != NULL) {
                struct pcpu_tstats *tstats;
 
-               if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
-                       rcu_read_unlock();
-                       kfree_skb(skb);
-                       return 0;
-               }
+               if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
+                       goto drop;
 
                secpath_reset(skb);
 
@@ -434,24 +424,35 @@ static int ipip_rcv(struct sk_buff *skb)
                skb->protocol = htons(ETH_P_IP);
                skb->pkt_type = PACKET_HOST;
 
+               __skb_tunnel_rx(skb, tunnel->dev);
+
+               err = IP_ECN_decapsulate(iph, skb);
+               if (unlikely(err)) {
+                       if (log_ecn_error)
+                               net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
+                                                    &iph->saddr, iph->tos);
+                       if (err > 1) {
+                               ++tunnel->dev->stats.rx_frame_errors;
+                               ++tunnel->dev->stats.rx_errors;
+                               goto drop;
+                       }
+               }
+
                tstats = this_cpu_ptr(tunnel->dev->tstats);
                u64_stats_update_begin(&tstats->syncp);
                tstats->rx_packets++;
                tstats->rx_bytes += skb->len;
                u64_stats_update_end(&tstats->syncp);
 
-               __skb_tunnel_rx(skb, tunnel->dev);
-
-               ipip_ecn_decapsulate(iph, skb);
-
                netif_rx(skb);
-
-               rcu_read_unlock();
                return 0;
        }
-       rcu_read_unlock();
 
        return -1;
+
+drop:
+       kfree_skb(skb);
+       return 0;
 }
 
 /*
index ebdf06f938bf040eebc91763c3e952c63d00f92b..1daa95c2a0bad8e532181dc4d67d4aead0f3671f 100644 (file)
@@ -626,7 +626,7 @@ static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
                        e->error = -ETIMEDOUT;
                        memset(&e->msg, 0, sizeof(e->msg));
 
-                       rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
+                       rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
                } else {
                        kfree_skb(skb);
                }
@@ -870,7 +870,7 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
                                memset(&e->msg, 0, sizeof(e->msg));
                        }
 
-                       rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
+                       rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
                } else {
                        ip_mr_forward(net, mrt, skb, c, 0);
                }
@@ -1808,7 +1808,7 @@ static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
                .flowi4_oif = (rt_is_output_route(rt) ?
                               skb->dev->ifindex : 0),
                .flowi4_iif = (rt_is_output_route(rt) ?
-                              net->loopback_dev->ifindex :
+                              LOOPBACK_IFINDEX :
                               skb->dev->ifindex),
                .flowi4_mark = skb->mark,
        };
@@ -2117,12 +2117,12 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
 }
 
 static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
-                           u32 pid, u32 seq, struct mfc_cache *c)
+                           u32 portid, u32 seq, struct mfc_cache *c)
 {
        struct nlmsghdr *nlh;
        struct rtmsg *rtm;
 
-       nlh = nlmsg_put(skb, pid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI);
+       nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -2176,7 +2176,7 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
                                if (e < s_e)
                                        goto next_entry;
                                if (ipmr_fill_mroute(mrt, skb,
-                                                    NETLINK_CB(cb->skb).pid,
+                                                    NETLINK_CB(cb->skb).portid,
                                                     cb->nlh->nlmsg_seq,
                                                     mfc) < 0)
                                        goto done;
index ed1b3678319223eaa7a4d0b2df3a9aae21deeb8a..4c0cf63dd92e9ab94bf155c8e0d497c2029cebde 100644 (file)
@@ -72,43 +72,6 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned int addr_type)
 }
 EXPORT_SYMBOL(ip_route_me_harder);
 
-#ifdef CONFIG_XFRM
-int ip_xfrm_me_harder(struct sk_buff *skb)
-{
-       struct flowi fl;
-       unsigned int hh_len;
-       struct dst_entry *dst;
-
-       if (IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
-               return 0;
-       if (xfrm_decode_session(skb, &fl, AF_INET) < 0)
-               return -1;
-
-       dst = skb_dst(skb);
-       if (dst->xfrm)
-               dst = ((struct xfrm_dst *)dst)->route;
-       dst_hold(dst);
-
-       dst = xfrm_lookup(dev_net(dst->dev), dst, &fl, skb->sk, 0);
-       if (IS_ERR(dst))
-               return -1;
-
-       skb_dst_drop(skb);
-       skb_dst_set(skb, dst);
-
-       /* Change in oif may mean change in hh_len. */
-       hh_len = skb_dst(skb)->dev->hard_header_len;
-       if (skb_headroom(skb) < hh_len &&
-           pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
-               return -1;
-       return 0;
-}
-EXPORT_SYMBOL(ip_xfrm_me_harder);
-#endif
-
-void (*ip_nat_decode_session)(struct sk_buff *, struct flowi *);
-EXPORT_SYMBOL(ip_nat_decode_session);
-
 /*
  * Extra routing may needed on local out, as the QUEUE target never
  * returns control to the table.
@@ -225,12 +188,12 @@ static const struct nf_afinfo nf_ip_afinfo = {
        .route_key_size         = sizeof(struct ip_rt_info),
 };
 
-static int ipv4_netfilter_init(void)
+static int __init ipv4_netfilter_init(void)
 {
        return nf_register_afinfo(&nf_ip_afinfo);
 }
 
-static void ipv4_netfilter_fini(void)
+static void __exit ipv4_netfilter_fini(void)
 {
        nf_unregister_afinfo(&nf_ip_afinfo);
 }
index fcc543cd987a3f22b45ea143c9c754019fa8bacd..d8d6f2a5bf120fe857fce4d3beadfebea9a57e0a 100644 (file)
@@ -143,25 +143,22 @@ config IP_NF_TARGET_ULOG
          To compile it as a module, choose M here.  If unsure, say N.
 
 # NAT + specific targets: nf_conntrack
-config NF_NAT
-       tristate "Full NAT"
+config NF_NAT_IPV4
+       tristate "IPv4 NAT"
        depends on NF_CONNTRACK_IPV4
        default m if NETFILTER_ADVANCED=n
+       select NF_NAT
        help
-         The Full NAT option allows masquerading, port forwarding and other
+         The IPv4 NAT option allows masquerading, port forwarding and other
          forms of full Network Address Port Translation.  It is controlled by
          the `nat' table in iptables: see the man page for iptables(8).
 
          To compile it as a module, choose M here.  If unsure, say N.
 
-config NF_NAT_NEEDED
-       bool
-       depends on NF_NAT
-       default y
+if NF_NAT_IPV4
 
 config IP_NF_TARGET_MASQUERADE
        tristate "MASQUERADE target support"
-       depends on NF_NAT
        default m if NETFILTER_ADVANCED=n
        help
          Masquerading is a special case of NAT: all outgoing connections are
@@ -174,30 +171,27 @@ config IP_NF_TARGET_MASQUERADE
 
 config IP_NF_TARGET_NETMAP
        tristate "NETMAP target support"
-       depends on NF_NAT
        depends on NETFILTER_ADVANCED
-       help
-         NETMAP is an implementation of static 1:1 NAT mapping of network
-         addresses. It maps the network address part, while keeping the host
-         address part intact.
-
-         To compile it as a module, choose M here.  If unsure, say N.
+       select NETFILTER_XT_TARGET_NETMAP
+       ---help---
+       This is a backwards-compat option for the user's convenience
+       (e.g. when running oldconfig). It selects
+       CONFIG_NETFILTER_XT_TARGET_NETMAP.
 
 config IP_NF_TARGET_REDIRECT
        tristate "REDIRECT target support"
-       depends on NF_NAT
        depends on NETFILTER_ADVANCED
-       help
-         REDIRECT is a special case of NAT: all incoming connections are
-         mapped onto the incoming interface's address, causing the packets to
-         come to the local machine instead of passing through.  This is
-         useful for transparent proxies.
+       select NETFILTER_XT_TARGET_REDIRECT
+       ---help---
+       This is a backwards-compat option for the user's convenience
+       (e.g. when running oldconfig). It selects
+       CONFIG_NETFILTER_XT_TARGET_REDIRECT.
 
-         To compile it as a module, choose M here.  If unsure, say N.
+endif
 
 config NF_NAT_SNMP_BASIC
        tristate "Basic SNMP-ALG support"
-       depends on NF_CONNTRACK_SNMP && NF_NAT
+       depends on NF_CONNTRACK_SNMP && NF_NAT_IPV4
        depends on NETFILTER_ADVANCED
        default NF_NAT && NF_CONNTRACK_SNMP
        ---help---
@@ -219,61 +213,21 @@ config NF_NAT_SNMP_BASIC
 #           <expr> '&&' <expr>                   (6)
 #
 # (6) Returns the result of min(/expr/, /expr/).
-config NF_NAT_PROTO_DCCP
-       tristate
-       depends on NF_NAT && NF_CT_PROTO_DCCP
-       default NF_NAT && NF_CT_PROTO_DCCP
 
 config NF_NAT_PROTO_GRE
        tristate
-       depends on NF_NAT && NF_CT_PROTO_GRE
-
-config NF_NAT_PROTO_UDPLITE
-       tristate
-       depends on NF_NAT && NF_CT_PROTO_UDPLITE
-       default NF_NAT && NF_CT_PROTO_UDPLITE
-
-config NF_NAT_PROTO_SCTP
-       tristate
-       default NF_NAT && NF_CT_PROTO_SCTP
-       depends on NF_NAT && NF_CT_PROTO_SCTP
-       select LIBCRC32C
-
-config NF_NAT_FTP
-       tristate
-       depends on NF_CONNTRACK && NF_NAT
-       default NF_NAT && NF_CONNTRACK_FTP
-
-config NF_NAT_IRC
-       tristate
-       depends on NF_CONNTRACK && NF_NAT
-       default NF_NAT && NF_CONNTRACK_IRC
-
-config NF_NAT_TFTP
-       tristate
-       depends on NF_CONNTRACK && NF_NAT
-       default NF_NAT && NF_CONNTRACK_TFTP
-
-config NF_NAT_AMANDA
-       tristate
-       depends on NF_CONNTRACK && NF_NAT
-       default NF_NAT && NF_CONNTRACK_AMANDA
+       depends on NF_NAT_IPV4 && NF_CT_PROTO_GRE
 
 config NF_NAT_PPTP
        tristate
-       depends on NF_CONNTRACK && NF_NAT
-       default NF_NAT && NF_CONNTRACK_PPTP
+       depends on NF_CONNTRACK && NF_NAT_IPV4
+       default NF_NAT_IPV4 && NF_CONNTRACK_PPTP
        select NF_NAT_PROTO_GRE
 
 config NF_NAT_H323
        tristate
-       depends on NF_CONNTRACK && NF_NAT
-       default NF_NAT && NF_CONNTRACK_H323
-
-config NF_NAT_SIP
-       tristate
-       depends on NF_CONNTRACK && NF_NAT
-       default NF_NAT && NF_CONNTRACK_SIP
+       depends on NF_CONNTRACK && NF_NAT_IPV4
+       default NF_NAT_IPV4 && NF_CONNTRACK_H323
 
 # mangle + specific targets
 config IP_NF_MANGLE
index c20674dc9452cefe0796d4cf45e93f005f965635..007b128eecc90246af3353f1e210bb07f267fe20 100644 (file)
@@ -10,32 +10,22 @@ nf_conntrack_ipv4-objs      += nf_conntrack_l3proto_ipv4_compat.o
 endif
 endif
 
-nf_nat-y               := nf_nat_core.o nf_nat_helper.o nf_nat_proto_unknown.o nf_nat_proto_common.o nf_nat_proto_tcp.o nf_nat_proto_udp.o nf_nat_proto_icmp.o
-iptable_nat-y  := nf_nat_rule.o nf_nat_standalone.o
-
 # connection tracking
 obj-$(CONFIG_NF_CONNTRACK_IPV4) += nf_conntrack_ipv4.o
 
-obj-$(CONFIG_NF_NAT) += nf_nat.o
+nf_nat_ipv4-y          := nf_nat_l3proto_ipv4.o nf_nat_proto_icmp.o
+obj-$(CONFIG_NF_NAT_IPV4) += nf_nat_ipv4.o
 
 # defrag
 obj-$(CONFIG_NF_DEFRAG_IPV4) += nf_defrag_ipv4.o
 
 # NAT helpers (nf_conntrack)
-obj-$(CONFIG_NF_NAT_AMANDA) += nf_nat_amanda.o
-obj-$(CONFIG_NF_NAT_FTP) += nf_nat_ftp.o
 obj-$(CONFIG_NF_NAT_H323) += nf_nat_h323.o
-obj-$(CONFIG_NF_NAT_IRC) += nf_nat_irc.o
 obj-$(CONFIG_NF_NAT_PPTP) += nf_nat_pptp.o
-obj-$(CONFIG_NF_NAT_SIP) += nf_nat_sip.o
 obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o
-obj-$(CONFIG_NF_NAT_TFTP) += nf_nat_tftp.o
 
 # NAT protocols (nf_nat)
-obj-$(CONFIG_NF_NAT_PROTO_DCCP) += nf_nat_proto_dccp.o
 obj-$(CONFIG_NF_NAT_PROTO_GRE) += nf_nat_proto_gre.o
-obj-$(CONFIG_NF_NAT_PROTO_UDPLITE) += nf_nat_proto_udplite.o
-obj-$(CONFIG_NF_NAT_PROTO_SCTP) += nf_nat_proto_sctp.o
 
 # generic IP tables 
 obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
@@ -43,7 +33,7 @@ obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
 # the three instances of ip_tables
 obj-$(CONFIG_IP_NF_FILTER) += iptable_filter.o
 obj-$(CONFIG_IP_NF_MANGLE) += iptable_mangle.o
-obj-$(CONFIG_NF_NAT) += iptable_nat.o
+obj-$(CONFIG_NF_NAT_IPV4) += iptable_nat.o
 obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
 obj-$(CONFIG_IP_NF_SECURITY) += iptable_security.o
 
@@ -55,8 +45,6 @@ obj-$(CONFIG_IP_NF_MATCH_RPFILTER) += ipt_rpfilter.o
 obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
 obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o
 obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
-obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o
-obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
 obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
 obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
 
index cbb6a1a6f6f794aa3f16561658812cd181a6f49c..5d5d4d1be9c2c7c2c951943e5a7d7ce7a4b940d4 100644 (file)
@@ -19,9 +19,9 @@
 #include <net/ip.h>
 #include <net/checksum.h>
 #include <net/route.h>
-#include <net/netfilter/nf_nat_rule.h>
 #include <linux/netfilter_ipv4.h>
 #include <linux/netfilter/x_tables.h>
+#include <net/netfilter/nf_nat.h>
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
@@ -49,7 +49,7 @@ masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par)
        struct nf_conn *ct;
        struct nf_conn_nat *nat;
        enum ip_conntrack_info ctinfo;
-       struct nf_nat_ipv4_range newrange;
+       struct nf_nat_range newrange;
        const struct nf_nat_ipv4_multi_range_compat *mr;
        const struct rtable *rt;
        __be32 newsrc, nh;
@@ -80,10 +80,13 @@ masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par)
        nat->masq_index = par->out->ifindex;
 
        /* Transfer from original range. */
-       newrange = ((struct nf_nat_ipv4_range)
-               { mr->range[0].flags | NF_NAT_RANGE_MAP_IPS,
-                 newsrc, newsrc,
-                 mr->range[0].min, mr->range[0].max });
+       memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
+       memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
+       newrange.flags       = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS;
+       newrange.min_addr.ip = newsrc;
+       newrange.max_addr.ip = newsrc;
+       newrange.min_proto   = mr->range[0].min;
+       newrange.max_proto   = mr->range[0].max;
 
        /* Hand modified range to generic setup. */
        return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC);
@@ -96,7 +99,8 @@ device_cmp(struct nf_conn *i, void *ifindex)
 
        if (!nat)
                return 0;
-
+       if (nf_ct_l3num(i) != NFPROTO_IPV4)
+               return 0;
        return nat->masq_index == (int)(long)ifindex;
 }
 
diff --git a/net/ipv4/netfilter/ipt_NETMAP.c b/net/ipv4/netfilter/ipt_NETMAP.c
deleted file mode 100644 (file)
index b5bfbba..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-/* NETMAP - static NAT mapping of IP network addresses (1:1).
- * The mapping can be applied to source (POSTROUTING),
- * destination (PREROUTING), or both (with separate rules).
- */
-
-/* (C) 2000-2001 Svenning Soerensen <svenning@post5.tele.dk>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/ip.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/netfilter/x_tables.h>
-#include <net/netfilter/nf_nat_rule.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Svenning Soerensen <svenning@post5.tele.dk>");
-MODULE_DESCRIPTION("Xtables: 1:1 NAT mapping of IPv4 subnets");
-
-static int netmap_tg_check(const struct xt_tgchk_param *par)
-{
-       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
-
-       if (!(mr->range[0].flags & NF_NAT_RANGE_MAP_IPS)) {
-               pr_debug("bad MAP_IPS.\n");
-               return -EINVAL;
-       }
-       if (mr->rangesize != 1) {
-               pr_debug("bad rangesize %u.\n", mr->rangesize);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static unsigned int
-netmap_tg(struct sk_buff *skb, const struct xt_action_param *par)
-{
-       struct nf_conn *ct;
-       enum ip_conntrack_info ctinfo;
-       __be32 new_ip, netmask;
-       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
-       struct nf_nat_ipv4_range newrange;
-
-       NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING ||
-                    par->hooknum == NF_INET_POST_ROUTING ||
-                    par->hooknum == NF_INET_LOCAL_OUT ||
-                    par->hooknum == NF_INET_LOCAL_IN);
-       ct = nf_ct_get(skb, &ctinfo);
-
-       netmask = ~(mr->range[0].min_ip ^ mr->range[0].max_ip);
-
-       if (par->hooknum == NF_INET_PRE_ROUTING ||
-           par->hooknum == NF_INET_LOCAL_OUT)
-               new_ip = ip_hdr(skb)->daddr & ~netmask;
-       else
-               new_ip = ip_hdr(skb)->saddr & ~netmask;
-       new_ip |= mr->range[0].min_ip & netmask;
-
-       newrange = ((struct nf_nat_ipv4_range)
-               { mr->range[0].flags | NF_NAT_RANGE_MAP_IPS,
-                 new_ip, new_ip,
-                 mr->range[0].min, mr->range[0].max });
-
-       /* Hand modified range to generic setup. */
-       return nf_nat_setup_info(ct, &newrange, HOOK2MANIP(par->hooknum));
-}
-
-static struct xt_target netmap_tg_reg __read_mostly = {
-       .name           = "NETMAP",
-       .family         = NFPROTO_IPV4,
-       .target         = netmap_tg,
-       .targetsize     = sizeof(struct nf_nat_ipv4_multi_range_compat),
-       .table          = "nat",
-       .hooks          = (1 << NF_INET_PRE_ROUTING) |
-                         (1 << NF_INET_POST_ROUTING) |
-                         (1 << NF_INET_LOCAL_OUT) |
-                         (1 << NF_INET_LOCAL_IN),
-       .checkentry     = netmap_tg_check,
-       .me             = THIS_MODULE
-};
-
-static int __init netmap_tg_init(void)
-{
-       return xt_register_target(&netmap_tg_reg);
-}
-
-static void __exit netmap_tg_exit(void)
-{
-       xt_unregister_target(&netmap_tg_reg);
-}
-
-module_init(netmap_tg_init);
-module_exit(netmap_tg_exit);
diff --git a/net/ipv4/netfilter/ipt_REDIRECT.c b/net/ipv4/netfilter/ipt_REDIRECT.c
deleted file mode 100644 (file)
index 7c0103a..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-/* Redirect.  Simple mapping which alters dst to a local IP address. */
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/types.h>
-#include <linux/ip.h>
-#include <linux/timer.h>
-#include <linux/module.h>
-#include <linux/netfilter.h>
-#include <linux/netdevice.h>
-#include <linux/if.h>
-#include <linux/inetdevice.h>
-#include <net/protocol.h>
-#include <net/checksum.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/netfilter/x_tables.h>
-#include <net/netfilter/nf_nat_rule.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
-MODULE_DESCRIPTION("Xtables: Connection redirection to localhost");
-
-/* FIXME: Take multiple ranges --RR */
-static int redirect_tg_check(const struct xt_tgchk_param *par)
-{
-       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
-
-       if (mr->range[0].flags & NF_NAT_RANGE_MAP_IPS) {
-               pr_debug("bad MAP_IPS.\n");
-               return -EINVAL;
-       }
-       if (mr->rangesize != 1) {
-               pr_debug("bad rangesize %u.\n", mr->rangesize);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static unsigned int
-redirect_tg(struct sk_buff *skb, const struct xt_action_param *par)
-{
-       struct nf_conn *ct;
-       enum ip_conntrack_info ctinfo;
-       __be32 newdst;
-       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
-       struct nf_nat_ipv4_range newrange;
-
-       NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING ||
-                    par->hooknum == NF_INET_LOCAL_OUT);
-
-       ct = nf_ct_get(skb, &ctinfo);
-       NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED));
-
-       /* Local packets: make them go to loopback */
-       if (par->hooknum == NF_INET_LOCAL_OUT)
-               newdst = htonl(0x7F000001);
-       else {
-               struct in_device *indev;
-               struct in_ifaddr *ifa;
-
-               newdst = 0;
-
-               rcu_read_lock();
-               indev = __in_dev_get_rcu(skb->dev);
-               if (indev && (ifa = indev->ifa_list))
-                       newdst = ifa->ifa_local;
-               rcu_read_unlock();
-
-               if (!newdst)
-                       return NF_DROP;
-       }
-
-       /* Transfer from original range. */
-       newrange = ((struct nf_nat_ipv4_range)
-               { mr->range[0].flags | NF_NAT_RANGE_MAP_IPS,
-                 newdst, newdst,
-                 mr->range[0].min, mr->range[0].max });
-
-       /* Hand modified range to generic setup. */
-       return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
-}
-
-static struct xt_target redirect_tg_reg __read_mostly = {
-       .name           = "REDIRECT",
-       .family         = NFPROTO_IPV4,
-       .target         = redirect_tg,
-       .targetsize     = sizeof(struct nf_nat_ipv4_multi_range_compat),
-       .table          = "nat",
-       .hooks          = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT),
-       .checkentry     = redirect_tg_check,
-       .me             = THIS_MODULE,
-};
-
-static int __init redirect_tg_init(void)
-{
-       return xt_register_target(&redirect_tg_reg);
-}
-
-static void __exit redirect_tg_exit(void)
-{
-       xt_unregister_target(&redirect_tg_reg);
-}
-
-module_init(redirect_tg_init);
-module_exit(redirect_tg_exit);
index 1109f7f6c25433d64515180eb6c9ff599dcef0f0..b5ef3cba225046fdc142bf5029954e02fcb08df1 100644 (file)
@@ -396,8 +396,7 @@ static int __init ulog_tg_init(void)
        for (i = 0; i < ULOG_MAXNLGROUPS; i++)
                setup_timer(&ulog_buffers[i].timer, ulog_timer, i);
 
-       nflognl = netlink_kernel_create(&init_net, NETLINK_NFLOG,
-                                       THIS_MODULE, &cfg);
+       nflognl = netlink_kernel_create(&init_net, NETLINK_NFLOG, &cfg);
        if (!nflognl)
                return -ENOMEM;
 
index 31371be8174be1d8da1a50f11dd6c48409b29b74..c30130062cd6515f31d7497eaa6a403d2b1d629d 100644 (file)
@@ -85,7 +85,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
                        return ipv4_is_local_multicast(iph->daddr) ^ invert;
                flow.flowi4_iif = 0;
        } else {
-               flow.flowi4_iif = dev_net(par->in)->loopback_dev->ifindex;
+               flow.flowi4_iif = LOOPBACK_IFINDEX;
        }
 
        flow.daddr = iph->saddr;
index 851acec852d284bbe61f7fc9fd9b1cf2ab4bb1f1..6b3da5cf54e96d99054170c77a911e3af9e9d139 100644 (file)
@@ -69,9 +69,7 @@ static int __net_init iptable_filter_net_init(struct net *net)
        net->ipv4.iptable_filter =
                ipt_register_table(net, &packet_filter, repl);
        kfree(repl);
-       if (IS_ERR(net->ipv4.iptable_filter))
-               return PTR_ERR(net->ipv4.iptable_filter);
-       return 0;
+       return PTR_RET(net->ipv4.iptable_filter);
 }
 
 static void __net_exit iptable_filter_net_exit(struct net *net)
@@ -96,14 +94,10 @@ static int __init iptable_filter_init(void)
        filter_ops = xt_hook_link(&packet_filter, iptable_filter_hook);
        if (IS_ERR(filter_ops)) {
                ret = PTR_ERR(filter_ops);
-               goto cleanup_table;
+               unregister_pernet_subsys(&iptable_filter_net_ops);
        }
 
        return ret;
-
- cleanup_table:
-       unregister_pernet_subsys(&iptable_filter_net_ops);
-       return ret;
 }
 
 static void __exit iptable_filter_fini(void)
index aef5d1fbe77dc39b5e6b951a97897e02ead47a62..85d88f20644701f63ad9b8250d9fd391c108ee55 100644 (file)
@@ -104,9 +104,7 @@ static int __net_init iptable_mangle_net_init(struct net *net)
        net->ipv4.iptable_mangle =
                ipt_register_table(net, &packet_mangler, repl);
        kfree(repl);
-       if (IS_ERR(net->ipv4.iptable_mangle))
-               return PTR_ERR(net->ipv4.iptable_mangle);
-       return 0;
+       return PTR_RET(net->ipv4.iptable_mangle);
 }
 
 static void __net_exit iptable_mangle_net_exit(struct net *net)
@@ -131,14 +129,10 @@ static int __init iptable_mangle_init(void)
        mangle_ops = xt_hook_link(&packet_mangler, iptable_mangle_hook);
        if (IS_ERR(mangle_ops)) {
                ret = PTR_ERR(mangle_ops);
-               goto cleanup_table;
+               unregister_pernet_subsys(&iptable_mangle_net_ops);
        }
 
        return ret;
-
- cleanup_table:
-       unregister_pernet_subsys(&iptable_mangle_net_ops);
-       return ret;
 }
 
 static void __exit iptable_mangle_fini(void)
similarity index 52%
rename from net/ipv4/netfilter/nf_nat_standalone.c
rename to net/ipv4/netfilter/iptable_nat.c
index 3828a4229822951b8ad71839ac6241a9a2b19fbf..9e0ffaf1d942624cf44d9693922e13312fc77ff2 100644 (file)
@@ -1,84 +1,71 @@
 /* (C) 1999-2001 Paul `Rusty' Russell
  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ * (C) 2011 Patrick McHardy <kaber@trash.net>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-#include <linux/types.h>
-#include <linux/icmp.h>
-#include <linux/gfp.h>
-#include <linux/ip.h>
+
+#include <linux/module.h>
 #include <linux/netfilter.h>
 #include <linux/netfilter_ipv4.h>
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/proc_fs.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/ip.h>
 #include <net/ip.h>
-#include <net/checksum.h>
-#include <linux/spinlock.h>
 
-#include <net/netfilter/nf_conntrack.h>
-#include <net/netfilter/nf_conntrack_core.h>
-#include <net/netfilter/nf_conntrack_extend.h>
 #include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_rule.h>
-#include <net/netfilter/nf_nat_protocol.h>
 #include <net/netfilter/nf_nat_core.h>
-#include <net/netfilter/nf_nat_helper.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+
+static const struct xt_table nf_nat_ipv4_table = {
+       .name           = "nat",
+       .valid_hooks    = (1 << NF_INET_PRE_ROUTING) |
+                         (1 << NF_INET_POST_ROUTING) |
+                         (1 << NF_INET_LOCAL_OUT) |
+                         (1 << NF_INET_LOCAL_IN),
+       .me             = THIS_MODULE,
+       .af             = NFPROTO_IPV4,
+};
 
-#ifdef CONFIG_XFRM
-static void nat_decode_session(struct sk_buff *skb, struct flowi *fl)
+static unsigned int alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
 {
-       struct flowi4 *fl4 = &fl->u.ip4;
-       const struct nf_conn *ct;
-       const struct nf_conntrack_tuple *t;
-       enum ip_conntrack_info ctinfo;
-       enum ip_conntrack_dir dir;
-       unsigned long statusbit;
-
-       ct = nf_ct_get(skb, &ctinfo);
-       if (ct == NULL)
-               return;
-       dir = CTINFO2DIR(ctinfo);
-       t = &ct->tuplehash[dir].tuple;
-
-       if (dir == IP_CT_DIR_ORIGINAL)
-               statusbit = IPS_DST_NAT;
-       else
-               statusbit = IPS_SRC_NAT;
-
-       if (ct->status & statusbit) {
-               fl4->daddr = t->dst.u3.ip;
-               if (t->dst.protonum == IPPROTO_TCP ||
-                   t->dst.protonum == IPPROTO_UDP ||
-                   t->dst.protonum == IPPROTO_UDPLITE ||
-                   t->dst.protonum == IPPROTO_DCCP ||
-                   t->dst.protonum == IPPROTO_SCTP)
-                       fl4->fl4_dport = t->dst.u.tcp.port;
-       }
+       /* Force range to this IP; let proto decide mapping for
+        * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
+        */
+       struct nf_nat_range range;
+
+       range.flags = 0;
+       pr_debug("Allocating NULL binding for %p (%pI4)\n", ct,
+                HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ?
+                &ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip :
+                &ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip);
+
+       return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum));
+}
 
-       statusbit ^= IPS_NAT_MASK;
+static unsigned int nf_nat_rule_find(struct sk_buff *skb, unsigned int hooknum,
+                                    const struct net_device *in,
+                                    const struct net_device *out,
+                                    struct nf_conn *ct)
+{
+       struct net *net = nf_ct_net(ct);
+       unsigned int ret;
 
-       if (ct->status & statusbit) {
-               fl4->saddr = t->src.u3.ip;
-               if (t->dst.protonum == IPPROTO_TCP ||
-                   t->dst.protonum == IPPROTO_UDP ||
-                   t->dst.protonum == IPPROTO_UDPLITE ||
-                   t->dst.protonum == IPPROTO_DCCP ||
-                   t->dst.protonum == IPPROTO_SCTP)
-                       fl4->fl4_sport = t->src.u.tcp.port;
+       ret = ipt_do_table(skb, hooknum, in, out, net->ipv4.nat_table);
+       if (ret == NF_ACCEPT) {
+               if (!nf_nat_initialized(ct, HOOK2MANIP(hooknum)))
+                       ret = alloc_null_binding(ct, hooknum);
        }
+       return ret;
 }
-#endif
 
 static unsigned int
-nf_nat_fn(unsigned int hooknum,
-         struct sk_buff *skb,
-         const struct net_device *in,
-         const struct net_device *out,
-         int (*okfn)(struct sk_buff *))
+nf_nat_ipv4_fn(unsigned int hooknum,
+              struct sk_buff *skb,
+              const struct net_device *in,
+              const struct net_device *out,
+              int (*okfn)(struct sk_buff *))
 {
        struct nf_conn *ct;
        enum ip_conntrack_info ctinfo;
@@ -87,14 +74,16 @@ nf_nat_fn(unsigned int hooknum,
        enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum);
 
        /* We never see fragments: conntrack defrags on pre-routing
-          and local-out, and nf_nat_out protects post-routing. */
+        * and local-out, and nf_nat_out protects post-routing.
+        */
        NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb)));
 
        ct = nf_ct_get(skb, &ctinfo);
        /* Can't track?  It's not due to stress, or conntrack would
-          have dropped it.  Hence it's the user's responsibilty to
-          packet filter it out, or implement conntrack/NAT for that
-          protocol. 8) --RR */
+        * have dropped it.  Hence it's the user's responsibilty to
+        * packet filter it out, or implement conntrack/NAT for that
+        * protocol. 8) --RR
+        */
        if (!ct)
                return NF_ACCEPT;
 
@@ -118,17 +107,17 @@ nf_nat_fn(unsigned int hooknum,
        case IP_CT_RELATED:
        case IP_CT_RELATED_REPLY:
                if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
-                       if (!nf_nat_icmp_reply_translation(ct, ctinfo,
-                                                          hooknum, skb))
+                       if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
+                                                          hooknum))
                                return NF_DROP;
                        else
                                return NF_ACCEPT;
                }
                /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */
        case IP_CT_NEW:
-
                /* Seen it before?  This can happen for loopback, retrans,
-                  or local packets.. */
+                * or local packets.
+                */
                if (!nf_nat_initialized(ct, maniptype)) {
                        unsigned int ret;
 
@@ -151,16 +140,16 @@ nf_nat_fn(unsigned int hooknum,
 }
 
 static unsigned int
-nf_nat_in(unsigned int hooknum,
-         struct sk_buff *skb,
-         const struct net_device *in,
-         const struct net_device *out,
-         int (*okfn)(struct sk_buff *))
+nf_nat_ipv4_in(unsigned int hooknum,
+              struct sk_buff *skb,
+              const struct net_device *in,
+              const struct net_device *out,
+              int (*okfn)(struct sk_buff *))
 {
        unsigned int ret;
        __be32 daddr = ip_hdr(skb)->daddr;
 
-       ret = nf_nat_fn(hooknum, skb, in, out, okfn);
+       ret = nf_nat_ipv4_fn(hooknum, skb, in, out, okfn);
        if (ret != NF_DROP && ret != NF_STOLEN &&
            daddr != ip_hdr(skb)->daddr)
                skb_dst_drop(skb);
@@ -169,11 +158,11 @@ nf_nat_in(unsigned int hooknum,
 }
 
 static unsigned int
-nf_nat_out(unsigned int hooknum,
-          struct sk_buff *skb,
-          const struct net_device *in,
-          const struct net_device *out,
-          int (*okfn)(struct sk_buff *))
+nf_nat_ipv4_out(unsigned int hooknum,
+               struct sk_buff *skb,
+               const struct net_device *in,
+               const struct net_device *out,
+               int (*okfn)(struct sk_buff *))
 {
 #ifdef CONFIG_XFRM
        const struct nf_conn *ct;
@@ -186,29 +175,30 @@ nf_nat_out(unsigned int hooknum,
            ip_hdrlen(skb) < sizeof(struct iphdr))
                return NF_ACCEPT;
 
-       ret = nf_nat_fn(hooknum, skb, in, out, okfn);
+       ret = nf_nat_ipv4_fn(hooknum, skb, in, out, okfn);
 #ifdef CONFIG_XFRM
        if (ret != NF_DROP && ret != NF_STOLEN &&
+           !(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
            (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
                enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
 
                if ((ct->tuplehash[dir].tuple.src.u3.ip !=
                     ct->tuplehash[!dir].tuple.dst.u3.ip) ||
                    (ct->tuplehash[dir].tuple.src.u.all !=
-                    ct->tuplehash[!dir].tuple.dst.u.all)
-                  )
-                       return ip_xfrm_me_harder(skb) == 0 ? ret : NF_DROP;
+                    ct->tuplehash[!dir].tuple.dst.u.all))
+                       if (nf_xfrm_me_harder(skb, AF_INET) < 0)
+                               ret = NF_DROP;
        }
 #endif
        return ret;
 }
 
 static unsigned int
-nf_nat_local_fn(unsigned int hooknum,
-               struct sk_buff *skb,
-               const struct net_device *in,
-               const struct net_device *out,
-               int (*okfn)(struct sk_buff *))
+nf_nat_ipv4_local_fn(unsigned int hooknum,
+                    struct sk_buff *skb,
+                    const struct net_device *in,
+                    const struct net_device *out,
+                    int (*okfn)(struct sk_buff *))
 {
        const struct nf_conn *ct;
        enum ip_conntrack_info ctinfo;
@@ -219,7 +209,7 @@ nf_nat_local_fn(unsigned int hooknum,
            ip_hdrlen(skb) < sizeof(struct iphdr))
                return NF_ACCEPT;
 
-       ret = nf_nat_fn(hooknum, skb, in, out, okfn);
+       ret = nf_nat_ipv4_fn(hooknum, skb, in, out, okfn);
        if (ret != NF_DROP && ret != NF_STOLEN &&
            (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
                enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
@@ -230,21 +220,20 @@ nf_nat_local_fn(unsigned int hooknum,
                                ret = NF_DROP;
                }
 #ifdef CONFIG_XFRM
-               else if (ct->tuplehash[dir].tuple.dst.u.all !=
+               else if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
+                        ct->tuplehash[dir].tuple.dst.u.all !=
                         ct->tuplehash[!dir].tuple.src.u.all)
-                       if (ip_xfrm_me_harder(skb))
+                       if (nf_xfrm_me_harder(skb, AF_INET) < 0)
                                ret = NF_DROP;
 #endif
        }
        return ret;
 }
 
-/* We must be after connection tracking and before packet filtering. */
-
-static struct nf_hook_ops nf_nat_ops[] __read_mostly = {
+static struct nf_hook_ops nf_nat_ipv4_ops[] __read_mostly = {
        /* Before packet filtering, change destination */
        {
-               .hook           = nf_nat_in,
+               .hook           = nf_nat_ipv4_in,
                .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV4,
                .hooknum        = NF_INET_PRE_ROUTING,
@@ -252,7 +241,7 @@ static struct nf_hook_ops nf_nat_ops[] __read_mostly = {
        },
        /* After packet filtering, change source */
        {
-               .hook           = nf_nat_out,
+               .hook           = nf_nat_ipv4_out,
                .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV4,
                .hooknum        = NF_INET_POST_ROUTING,
@@ -260,7 +249,7 @@ static struct nf_hook_ops nf_nat_ops[] __read_mostly = {
        },
        /* Before packet filtering, change destination */
        {
-               .hook           = nf_nat_local_fn,
+               .hook           = nf_nat_ipv4_local_fn,
                .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV4,
                .hooknum        = NF_INET_LOCAL_OUT,
@@ -268,7 +257,7 @@ static struct nf_hook_ops nf_nat_ops[] __read_mostly = {
        },
        /* After packet filtering, change source */
        {
-               .hook           = nf_nat_fn,
+               .hook           = nf_nat_ipv4_fn,
                .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV4,
                .hooknum        = NF_INET_LOCAL_IN,
@@ -276,51 +265,56 @@ static struct nf_hook_ops nf_nat_ops[] __read_mostly = {
        },
 };
 
-static int __init nf_nat_standalone_init(void)
+static int __net_init iptable_nat_net_init(struct net *net)
 {
-       int ret = 0;
+       struct ipt_replace *repl;
+
+       repl = ipt_alloc_initial_table(&nf_nat_ipv4_table);
+       if (repl == NULL)
+               return -ENOMEM;
+       net->ipv4.nat_table = ipt_register_table(net, &nf_nat_ipv4_table, repl);
+       kfree(repl);
+       if (IS_ERR(net->ipv4.nat_table))
+               return PTR_ERR(net->ipv4.nat_table);
+       return 0;
+}
 
-       need_ipv4_conntrack();
+static void __net_exit iptable_nat_net_exit(struct net *net)
+{
+       ipt_unregister_table(net, net->ipv4.nat_table);
+}
 
-#ifdef CONFIG_XFRM
-       BUG_ON(ip_nat_decode_session != NULL);
-       RCU_INIT_POINTER(ip_nat_decode_session, nat_decode_session);
-#endif
-       ret = nf_nat_rule_init();
-       if (ret < 0) {
-               pr_err("nf_nat_init: can't setup rules.\n");
-               goto cleanup_decode_session;
-       }
-       ret = nf_register_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops));
-       if (ret < 0) {
-               pr_err("nf_nat_init: can't register hooks.\n");
-               goto cleanup_rule_init;
-       }
-       return ret;
+static struct pernet_operations iptable_nat_net_ops = {
+       .init   = iptable_nat_net_init,
+       .exit   = iptable_nat_net_exit,
+};
 
- cleanup_rule_init:
-       nf_nat_rule_cleanup();
- cleanup_decode_session:
-#ifdef CONFIG_XFRM
-       RCU_INIT_POINTER(ip_nat_decode_session, NULL);
-       synchronize_net();
-#endif
-       return ret;
+static int __init iptable_nat_init(void)
+{
+       int err;
+
+       err = register_pernet_subsys(&iptable_nat_net_ops);
+       if (err < 0)
+               goto err1;
+
+       err = nf_register_hooks(nf_nat_ipv4_ops, ARRAY_SIZE(nf_nat_ipv4_ops));
+       if (err < 0)
+               goto err2;
+       return 0;
+
+err2:
+       unregister_pernet_subsys(&iptable_nat_net_ops);
+err1:
+       return err;
 }
 
-static void __exit nf_nat_standalone_fini(void)
+static void __exit iptable_nat_exit(void)
 {
-       nf_unregister_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops));
-       nf_nat_rule_cleanup();
-#ifdef CONFIG_XFRM
-       RCU_INIT_POINTER(ip_nat_decode_session, NULL);
-       synchronize_net();
-#endif
-       /* Conntrack caches are unregistered in nf_conntrack_cleanup */
+       nf_unregister_hooks(nf_nat_ipv4_ops, ARRAY_SIZE(nf_nat_ipv4_ops));
+       unregister_pernet_subsys(&iptable_nat_net_ops);
 }
 
-module_init(nf_nat_standalone_init);
-module_exit(nf_nat_standalone_fini);
+module_init(iptable_nat_init);
+module_exit(iptable_nat_exit);
 
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("ip_nat");
index 07fb710cd722f329ea297b764f50cfeb0ad8175e..03d9696d3c6eb27b24eed32ad3fb0c4e069d9bf3 100644 (file)
@@ -48,9 +48,7 @@ static int __net_init iptable_raw_net_init(struct net *net)
        net->ipv4.iptable_raw =
                ipt_register_table(net, &packet_raw, repl);
        kfree(repl);
-       if (IS_ERR(net->ipv4.iptable_raw))
-               return PTR_ERR(net->ipv4.iptable_raw);
-       return 0;
+       return PTR_RET(net->ipv4.iptable_raw);
 }
 
 static void __net_exit iptable_raw_net_exit(struct net *net)
@@ -75,14 +73,10 @@ static int __init iptable_raw_init(void)
        rawtable_ops = xt_hook_link(&packet_raw, iptable_raw_hook);
        if (IS_ERR(rawtable_ops)) {
                ret = PTR_ERR(rawtable_ops);
-               goto cleanup_table;
+               unregister_pernet_subsys(&iptable_raw_net_ops);
        }
 
        return ret;
-
- cleanup_table:
-       unregister_pernet_subsys(&iptable_raw_net_ops);
-       return ret;
 }
 
 static void __exit iptable_raw_fini(void)
index be45bdc4c60251a0936e8e5f7d0c6ea56d0e6eec..b283d8e2601abfadb80c1024fc7558862784f7b2 100644 (file)
@@ -66,10 +66,7 @@ static int __net_init iptable_security_net_init(struct net *net)
        net->ipv4.iptable_security =
                ipt_register_table(net, &security_table, repl);
        kfree(repl);
-       if (IS_ERR(net->ipv4.iptable_security))
-               return PTR_ERR(net->ipv4.iptable_security);
-
-       return 0;
+       return PTR_RET(net->ipv4.iptable_security);
 }
 
 static void __net_exit iptable_security_net_exit(struct net *net)
index e7ff2dcab6cec0fdd82cfb5ce804e172b67adc1f..fcdd0c2406e6d85d888633222e6697bd2352db86 100644 (file)
 #include <net/netfilter/ipv4/nf_defrag_ipv4.h>
 #include <net/netfilter/nf_log.h>
 
-int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb,
-                             struct nf_conn *ct,
-                             enum ip_conntrack_info ctinfo);
-EXPORT_SYMBOL_GPL(nf_nat_seq_adjust_hook);
-
 static bool ipv4_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
                              struct nf_conntrack_tuple *tuple)
 {
@@ -149,7 +144,8 @@ static unsigned int ipv4_confirm(unsigned int hooknum,
                typeof(nf_nat_seq_adjust_hook) seq_adjust;
 
                seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook);
-               if (!seq_adjust || !seq_adjust(skb, ct, ctinfo)) {
+               if (!seq_adjust ||
+                   !seq_adjust(skb, ct, ctinfo, ip_hdrlen(skb))) {
                        NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
                        return NF_DROP;
                }
index c6784a18c1c45f3f01ce67bef5f9214a9ab82385..9c3db10b22d33862fd1ceea6382a85b57cc44e6d 100644 (file)
 
 #include <net/netfilter/nf_nat.h>
 #include <net/netfilter/nf_nat_helper.h>
-#include <net/netfilter/nf_nat_rule.h>
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_expect.h>
 #include <linux/netfilter/nf_conntrack_h323.h>
 
 /****************************************************************************/
-static int set_addr(struct sk_buff *skb,
+static int set_addr(struct sk_buff *skb, unsigned int protoff,
                    unsigned char **data, int dataoff,
                    unsigned int addroff, __be32 ip, __be16 port)
 {
@@ -40,7 +39,7 @@ static int set_addr(struct sk_buff *skb,
 
        if (ip_hdr(skb)->protocol == IPPROTO_TCP) {
                if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
-                                             addroff, sizeof(buf),
+                                             protoff, addroff, sizeof(buf),
                                              (char *) &buf, sizeof(buf))) {
                        net_notice_ratelimited("nf_nat_h323: nf_nat_mangle_tcp_packet error\n");
                        return -1;
@@ -54,7 +53,7 @@ static int set_addr(struct sk_buff *skb,
                *data = skb->data + ip_hdrlen(skb) + th->doff * 4 + dataoff;
        } else {
                if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo,
-                                             addroff, sizeof(buf),
+                                             protoff, addroff, sizeof(buf),
                                              (char *) &buf, sizeof(buf))) {
                        net_notice_ratelimited("nf_nat_h323: nf_nat_mangle_udp_packet error\n");
                        return -1;
@@ -69,22 +68,22 @@ static int set_addr(struct sk_buff *skb,
 }
 
 /****************************************************************************/
-static int set_h225_addr(struct sk_buff *skb,
+static int set_h225_addr(struct sk_buff *skb, unsigned int protoff,
                         unsigned char **data, int dataoff,
                         TransportAddress *taddr,
                         union nf_inet_addr *addr, __be16 port)
 {
-       return set_addr(skb, data, dataoff, taddr->ipAddress.ip,
+       return set_addr(skb, protoff, data, dataoff, taddr->ipAddress.ip,
                        addr->ip, port);
 }
 
 /****************************************************************************/
-static int set_h245_addr(struct sk_buff *skb,
+static int set_h245_addr(struct sk_buff *skb, unsigned protoff,
                         unsigned char **data, int dataoff,
                         H245_TransportAddress *taddr,
                         union nf_inet_addr *addr, __be16 port)
 {
-       return set_addr(skb, data, dataoff,
+       return set_addr(skb, protoff, data, dataoff,
                        taddr->unicastAddress.iPAddress.network,
                        addr->ip, port);
 }
@@ -92,7 +91,7 @@ static int set_h245_addr(struct sk_buff *skb,
 /****************************************************************************/
 static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct,
                        enum ip_conntrack_info ctinfo,
-                       unsigned char **data,
+                       unsigned int protoff, unsigned char **data,
                        TransportAddress *taddr, int count)
 {
        const struct nf_ct_h323_master *info = nfct_help_data(ct);
@@ -118,7 +117,8 @@ static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct,
                                         &addr.ip, port,
                                         &ct->tuplehash[!dir].tuple.dst.u3.ip,
                                         info->sig_port[!dir]);
-                               return set_h225_addr(skb, data, 0, &taddr[i],
+                               return set_h225_addr(skb, protoff, data, 0,
+                                                    &taddr[i],
                                                     &ct->tuplehash[!dir].
                                                     tuple.dst.u3,
                                                     info->sig_port[!dir]);
@@ -129,7 +129,8 @@ static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct,
                                         &addr.ip, port,
                                         &ct->tuplehash[!dir].tuple.src.u3.ip,
                                         info->sig_port[!dir]);
-                               return set_h225_addr(skb, data, 0, &taddr[i],
+                               return set_h225_addr(skb, protoff, data, 0,
+                                                    &taddr[i],
                                                     &ct->tuplehash[!dir].
                                                     tuple.src.u3,
                                                     info->sig_port[!dir]);
@@ -143,7 +144,7 @@ static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int set_ras_addr(struct sk_buff *skb, struct nf_conn *ct,
                        enum ip_conntrack_info ctinfo,
-                       unsigned char **data,
+                       unsigned int protoff, unsigned char **data,
                        TransportAddress *taddr, int count)
 {
        int dir = CTINFO2DIR(ctinfo);
@@ -159,7 +160,7 @@ static int set_ras_addr(struct sk_buff *skb, struct nf_conn *ct,
                                 &addr.ip, ntohs(port),
                                 &ct->tuplehash[!dir].tuple.dst.u3.ip,
                                 ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.port));
-                       return set_h225_addr(skb, data, 0, &taddr[i],
+                       return set_h225_addr(skb, protoff, data, 0, &taddr[i],
                                             &ct->tuplehash[!dir].tuple.dst.u3,
                                             ct->tuplehash[!dir].tuple.
                                                                dst.u.udp.port);
@@ -172,7 +173,7 @@ static int set_ras_addr(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
                        enum ip_conntrack_info ctinfo,
-                       unsigned char **data, int dataoff,
+                       unsigned int protoff, unsigned char **data, int dataoff,
                        H245_TransportAddress *taddr,
                        __be16 port, __be16 rtp_port,
                        struct nf_conntrack_expect *rtp_exp,
@@ -244,7 +245,7 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
        }
 
        /* Modify signal */
-       if (set_h245_addr(skb, data, dataoff, taddr,
+       if (set_h245_addr(skb, protoff, data, dataoff, taddr,
                          &ct->tuplehash[!dir].tuple.dst.u3,
                          htons((port & htons(1)) ? nated_port + 1 :
                                                    nated_port)) == 0) {
@@ -275,7 +276,7 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int nat_t120(struct sk_buff *skb, struct nf_conn *ct,
                    enum ip_conntrack_info ctinfo,
-                   unsigned char **data, int dataoff,
+                   unsigned int protoff, unsigned char **data, int dataoff,
                    H245_TransportAddress *taddr, __be16 port,
                    struct nf_conntrack_expect *exp)
 {
@@ -307,7 +308,7 @@ static int nat_t120(struct sk_buff *skb, struct nf_conn *ct,
        }
 
        /* Modify signal */
-       if (set_h245_addr(skb, data, dataoff, taddr,
+       if (set_h245_addr(skb, protoff, data, dataoff, taddr,
                          &ct->tuplehash[!dir].tuple.dst.u3,
                          htons(nated_port)) < 0) {
                nf_ct_unexpect_related(exp);
@@ -326,7 +327,7 @@ static int nat_t120(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
                    enum ip_conntrack_info ctinfo,
-                   unsigned char **data, int dataoff,
+                   unsigned int protoff, unsigned char **data, int dataoff,
                    TransportAddress *taddr, __be16 port,
                    struct nf_conntrack_expect *exp)
 {
@@ -363,7 +364,7 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
        }
 
        /* Modify signal */
-       if (set_h225_addr(skb, data, dataoff, taddr,
+       if (set_h225_addr(skb, protoff, data, dataoff, taddr,
                          &ct->tuplehash[!dir].tuple.dst.u3,
                          htons(nated_port)) == 0) {
                /* Save ports */
@@ -390,7 +391,7 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
 static void ip_nat_q931_expect(struct nf_conn *new,
                               struct nf_conntrack_expect *this)
 {
-       struct nf_nat_ipv4_range range;
+       struct nf_nat_range range;
 
        if (this->tuple.src.u3.ip != 0) {       /* Only accept calls from GK */
                nf_nat_follow_master(new, this);
@@ -402,21 +403,23 @@ static void ip_nat_q931_expect(struct nf_conn *new,
 
        /* Change src to where master sends to */
        range.flags = NF_NAT_RANGE_MAP_IPS;
-       range.min_ip = range.max_ip = new->tuplehash[!this->dir].tuple.src.u3.ip;
+       range.min_addr = range.max_addr =
+           new->tuplehash[!this->dir].tuple.src.u3;
        nf_nat_setup_info(new, &range, NF_NAT_MANIP_SRC);
 
        /* For DST manip, map port here to where it's expected. */
        range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
-       range.min = range.max = this->saved_proto;
-       range.min_ip = range.max_ip =
-           new->master->tuplehash[!this->dir].tuple.src.u3.ip;
+       range.min_proto = range.max_proto = this->saved_proto;
+       range.min_addr = range.max_addr =
+           new->master->tuplehash[!this->dir].tuple.src.u3;
        nf_nat_setup_info(new, &range, NF_NAT_MANIP_DST);
 }
 
 /****************************************************************************/
 static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
                    enum ip_conntrack_info ctinfo,
-                   unsigned char **data, TransportAddress *taddr, int idx,
+                   unsigned int protoff, unsigned char **data,
+                   TransportAddress *taddr, int idx,
                    __be16 port, struct nf_conntrack_expect *exp)
 {
        struct nf_ct_h323_master *info = nfct_help_data(ct);
@@ -453,7 +456,7 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
        }
 
        /* Modify signal */
-       if (set_h225_addr(skb, data, 0, &taddr[idx],
+       if (set_h225_addr(skb, protoff, data, 0, &taddr[idx],
                          &ct->tuplehash[!dir].tuple.dst.u3,
                          htons(nated_port)) == 0) {
                /* Save ports */
@@ -464,7 +467,7 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
                if (idx > 0 &&
                    get_h225_addr(ct, *data, &taddr[0], &addr, &port) &&
                    (ntohl(addr.ip) & 0xff000000) == 0x7f000000) {
-                       set_h225_addr(skb, data, 0, &taddr[0],
+                       set_h225_addr(skb, protoff, data, 0, &taddr[0],
                                      &ct->tuplehash[!dir].tuple.dst.u3,
                                      info->sig_port[!dir]);
                }
@@ -487,26 +490,28 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
 static void ip_nat_callforwarding_expect(struct nf_conn *new,
                                         struct nf_conntrack_expect *this)
 {
-       struct nf_nat_ipv4_range range;
+       struct nf_nat_range range;
 
        /* This must be a fresh one. */
        BUG_ON(new->status & IPS_NAT_DONE_MASK);
 
        /* Change src to where master sends to */
        range.flags = NF_NAT_RANGE_MAP_IPS;
-       range.min_ip = range.max_ip = new->tuplehash[!this->dir].tuple.src.u3.ip;
+       range.min_addr = range.max_addr =
+           new->tuplehash[!this->dir].tuple.src.u3;
        nf_nat_setup_info(new, &range, NF_NAT_MANIP_SRC);
 
        /* For DST manip, map port here to where it's expected. */
        range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
-       range.min = range.max = this->saved_proto;
-       range.min_ip = range.max_ip = this->saved_ip;
+       range.min_proto = range.max_proto = this->saved_proto;
+       range.min_addr = range.max_addr = this->saved_addr;
        nf_nat_setup_info(new, &range, NF_NAT_MANIP_DST);
 }
 
 /****************************************************************************/
 static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct,
                              enum ip_conntrack_info ctinfo,
+                             unsigned int protoff,
                              unsigned char **data, int dataoff,
                              TransportAddress *taddr, __be16 port,
                              struct nf_conntrack_expect *exp)
@@ -515,7 +520,7 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct,
        u_int16_t nated_port;
 
        /* Set expectations for NAT */
-       exp->saved_ip = exp->tuple.dst.u3.ip;
+       exp->saved_addr = exp->tuple.dst.u3;
        exp->tuple.dst.u3.ip = ct->tuplehash[!dir].tuple.dst.u3.ip;
        exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
        exp->expectfn = ip_nat_callforwarding_expect;
@@ -541,7 +546,7 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct,
        }
 
        /* Modify signal */
-       if (!set_h225_addr(skb, data, dataoff, taddr,
+       if (!set_h225_addr(skb, protoff, data, dataoff, taddr,
                           &ct->tuplehash[!dir].tuple.dst.u3,
                           htons(nated_port)) == 0) {
                nf_ct_unexpect_related(exp);
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
new file mode 100644 (file)
index 0000000..d8b2e14
--- /dev/null
@@ -0,0 +1,281 @@
+/*
+ * (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ * (C) 2011 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/icmp.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <net/secure_seq.h>
+#include <net/checksum.h>
+#include <net/route.h>
+#include <net/ip.h>
+
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/netfilter/nf_nat_l4proto.h>
+
+static const struct nf_nat_l3proto nf_nat_l3proto_ipv4;
+
+#ifdef CONFIG_XFRM
+static void nf_nat_ipv4_decode_session(struct sk_buff *skb,
+                                      const struct nf_conn *ct,
+                                      enum ip_conntrack_dir dir,
+                                      unsigned long statusbit,
+                                      struct flowi *fl)
+{
+       const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple;
+       struct flowi4 *fl4 = &fl->u.ip4;
+
+       if (ct->status & statusbit) {
+               fl4->daddr = t->dst.u3.ip;
+               if (t->dst.protonum == IPPROTO_TCP ||
+                   t->dst.protonum == IPPROTO_UDP ||
+                   t->dst.protonum == IPPROTO_UDPLITE ||
+                   t->dst.protonum == IPPROTO_DCCP ||
+                   t->dst.protonum == IPPROTO_SCTP)
+                       fl4->fl4_dport = t->dst.u.all;
+       }
+
+       statusbit ^= IPS_NAT_MASK;
+
+       if (ct->status & statusbit) {
+               fl4->saddr = t->src.u3.ip;
+               if (t->dst.protonum == IPPROTO_TCP ||
+                   t->dst.protonum == IPPROTO_UDP ||
+                   t->dst.protonum == IPPROTO_UDPLITE ||
+                   t->dst.protonum == IPPROTO_DCCP ||
+                   t->dst.protonum == IPPROTO_SCTP)
+                       fl4->fl4_sport = t->src.u.all;
+       }
+}
+#endif /* CONFIG_XFRM */
+
+static bool nf_nat_ipv4_in_range(const struct nf_conntrack_tuple *t,
+                                const struct nf_nat_range *range)
+{
+       return ntohl(t->src.u3.ip) >= ntohl(range->min_addr.ip) &&
+              ntohl(t->src.u3.ip) <= ntohl(range->max_addr.ip);
+}
+
+static u32 nf_nat_ipv4_secure_port(const struct nf_conntrack_tuple *t,
+                                  __be16 dport)
+{
+       return secure_ipv4_port_ephemeral(t->src.u3.ip, t->dst.u3.ip, dport);
+}
+
+static bool nf_nat_ipv4_manip_pkt(struct sk_buff *skb,
+                                 unsigned int iphdroff,
+                                 const struct nf_nat_l4proto *l4proto,
+                                 const struct nf_conntrack_tuple *target,
+                                 enum nf_nat_manip_type maniptype)
+{
+       struct iphdr *iph;
+       unsigned int hdroff;
+
+       if (!skb_make_writable(skb, iphdroff + sizeof(*iph)))
+               return false;
+
+       iph = (void *)skb->data + iphdroff;
+       hdroff = iphdroff + iph->ihl * 4;
+
+       if (!l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv4, iphdroff, hdroff,
+                               target, maniptype))
+               return false;
+       iph = (void *)skb->data + iphdroff;
+
+       if (maniptype == NF_NAT_MANIP_SRC) {
+               csum_replace4(&iph->check, iph->saddr, target->src.u3.ip);
+               iph->saddr = target->src.u3.ip;
+       } else {
+               csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip);
+               iph->daddr = target->dst.u3.ip;
+       }
+       return true;
+}
+
+static void nf_nat_ipv4_csum_update(struct sk_buff *skb,
+                                   unsigned int iphdroff, __sum16 *check,
+                                   const struct nf_conntrack_tuple *t,
+                                   enum nf_nat_manip_type maniptype)
+{
+       struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
+       __be32 oldip, newip;
+
+       if (maniptype == NF_NAT_MANIP_SRC) {
+               oldip = iph->saddr;
+               newip = t->src.u3.ip;
+       } else {
+               oldip = iph->daddr;
+               newip = t->dst.u3.ip;
+       }
+       inet_proto_csum_replace4(check, skb, oldip, newip, 1);
+}
+
+static void nf_nat_ipv4_csum_recalc(struct sk_buff *skb,
+                                   u8 proto, void *data, __sum16 *check,
+                                   int datalen, int oldlen)
+{
+       const struct iphdr *iph = ip_hdr(skb);
+       struct rtable *rt = skb_rtable(skb);
+
+       if (skb->ip_summed != CHECKSUM_PARTIAL) {
+               if (!(rt->rt_flags & RTCF_LOCAL) &&
+                   (!skb->dev || skb->dev->features & NETIF_F_V4_CSUM)) {
+                       skb->ip_summed = CHECKSUM_PARTIAL;
+                       skb->csum_start = skb_headroom(skb) +
+                                         skb_network_offset(skb) +
+                                         ip_hdrlen(skb);
+                       skb->csum_offset = (void *)check - data;
+                       *check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+                                                   datalen, proto, 0);
+               } else {
+                       *check = 0;
+                       *check = csum_tcpudp_magic(iph->saddr, iph->daddr,
+                                                  datalen, proto,
+                                                  csum_partial(data, datalen,
+                                                               0));
+                       if (proto == IPPROTO_UDP && !*check)
+                               *check = CSUM_MANGLED_0;
+               }
+       } else
+               inet_proto_csum_replace2(check, skb,
+                                        htons(oldlen), htons(datalen), 1);
+}
+
+static int nf_nat_ipv4_nlattr_to_range(struct nlattr *tb[],
+                                      struct nf_nat_range *range)
+{
+       if (tb[CTA_NAT_V4_MINIP]) {
+               range->min_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MINIP]);
+               range->flags |= NF_NAT_RANGE_MAP_IPS;
+       }
+
+       if (tb[CTA_NAT_V4_MAXIP])
+               range->max_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MAXIP]);
+       else
+               range->max_addr.ip = range->min_addr.ip;
+
+       return 0;
+}
+
+static const struct nf_nat_l3proto nf_nat_l3proto_ipv4 = {
+       .l3proto                = NFPROTO_IPV4,
+       .in_range               = nf_nat_ipv4_in_range,
+       .secure_port            = nf_nat_ipv4_secure_port,
+       .manip_pkt              = nf_nat_ipv4_manip_pkt,
+       .csum_update            = nf_nat_ipv4_csum_update,
+       .csum_recalc            = nf_nat_ipv4_csum_recalc,
+       .nlattr_to_range        = nf_nat_ipv4_nlattr_to_range,
+#ifdef CONFIG_XFRM
+       .decode_session         = nf_nat_ipv4_decode_session,
+#endif
+};
+
+int nf_nat_icmp_reply_translation(struct sk_buff *skb,
+                                 struct nf_conn *ct,
+                                 enum ip_conntrack_info ctinfo,
+                                 unsigned int hooknum)
+{
+       struct {
+               struct icmphdr  icmp;
+               struct iphdr    ip;
+       } *inside;
+       enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+       enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
+       unsigned int hdrlen = ip_hdrlen(skb);
+       const struct nf_nat_l4proto *l4proto;
+       struct nf_conntrack_tuple target;
+       unsigned long statusbit;
+
+       NF_CT_ASSERT(ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY);
+
+       if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
+               return 0;
+       if (nf_ip_checksum(skb, hooknum, hdrlen, 0))
+               return 0;
+
+       inside = (void *)skb->data + hdrlen;
+       if (inside->icmp.type == ICMP_REDIRECT) {
+               if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
+                       return 0;
+               if (ct->status & IPS_NAT_MASK)
+                       return 0;
+       }
+
+       if (manip == NF_NAT_MANIP_SRC)
+               statusbit = IPS_SRC_NAT;
+       else
+               statusbit = IPS_DST_NAT;
+
+       /* Invert if this is reply direction */
+       if (dir == IP_CT_DIR_REPLY)
+               statusbit ^= IPS_NAT_MASK;
+
+       if (!(ct->status & statusbit))
+               return 1;
+
+       l4proto = __nf_nat_l4proto_find(NFPROTO_IPV4, inside->ip.protocol);
+       if (!nf_nat_ipv4_manip_pkt(skb, hdrlen + sizeof(inside->icmp),
+                                  l4proto, &ct->tuplehash[!dir].tuple, !manip))
+               return 0;
+
+       if (skb->ip_summed != CHECKSUM_PARTIAL) {
+               /* Reloading "inside" here since manip_pkt may reallocate */
+               inside = (void *)skb->data + hdrlen;
+               inside->icmp.checksum = 0;
+               inside->icmp.checksum =
+                       csum_fold(skb_checksum(skb, hdrlen,
+                                              skb->len - hdrlen, 0));
+       }
+
+       /* Change outer to look like the reply to an incoming packet */
+       nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
+       l4proto = __nf_nat_l4proto_find(NFPROTO_IPV4, 0);
+       if (!nf_nat_ipv4_manip_pkt(skb, 0, l4proto, &target, manip))
+               return 0;
+
+       return 1;
+}
+EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation);
+
+static int __init nf_nat_l3proto_ipv4_init(void)
+{
+       int err;
+
+       err = nf_nat_l4proto_register(NFPROTO_IPV4, &nf_nat_l4proto_icmp);
+       if (err < 0)
+               goto err1;
+       err = nf_nat_l3proto_register(&nf_nat_l3proto_ipv4);
+       if (err < 0)
+               goto err2;
+       return err;
+
+err2:
+       nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_icmp);
+err1:
+       return err;
+}
+
+static void __exit nf_nat_l3proto_ipv4_exit(void)
+{
+       nf_nat_l3proto_unregister(&nf_nat_l3proto_ipv4);
+       nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_icmp);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("nf-nat-" __stringify(AF_INET));
+
+module_init(nf_nat_l3proto_ipv4_init);
+module_exit(nf_nat_l3proto_ipv4_exit);
index 388140881ebe2eac05ed78a8ee68de518cfddef8..a06d7d74817d3976d5cb15147036a3270941dc80 100644 (file)
@@ -22,7 +22,6 @@
 
 #include <net/netfilter/nf_nat.h>
 #include <net/netfilter/nf_nat_helper.h>
-#include <net/netfilter/nf_nat_rule.h>
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_expect.h>
 #include <net/netfilter/nf_conntrack_zones.h>
@@ -47,7 +46,7 @@ static void pptp_nat_expected(struct nf_conn *ct,
        struct nf_conntrack_tuple t;
        const struct nf_ct_pptp_master *ct_pptp_info;
        const struct nf_nat_pptp *nat_pptp_info;
-       struct nf_nat_ipv4_range range;
+       struct nf_nat_range range;
 
        ct_pptp_info = nfct_help_data(master);
        nat_pptp_info = &nfct_nat(master)->help.nat_pptp_info;
@@ -89,21 +88,21 @@ static void pptp_nat_expected(struct nf_conn *ct,
 
        /* Change src to where master sends to */
        range.flags = NF_NAT_RANGE_MAP_IPS;
-       range.min_ip = range.max_ip
-               = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
+       range.min_addr = range.max_addr
+               = ct->master->tuplehash[!exp->dir].tuple.dst.u3;
        if (exp->dir == IP_CT_DIR_ORIGINAL) {
                range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
-               range.min = range.max = exp->saved_proto;
+               range.min_proto = range.max_proto = exp->saved_proto;
        }
        nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
 
        /* For DST manip, map port here to where it's expected. */
        range.flags = NF_NAT_RANGE_MAP_IPS;
-       range.min_ip = range.max_ip
-               = ct->master->tuplehash[!exp->dir].tuple.src.u3.ip;
+       range.min_addr = range.max_addr
+               = ct->master->tuplehash[!exp->dir].tuple.src.u3;
        if (exp->dir == IP_CT_DIR_REPLY) {
                range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
-               range.min = range.max = exp->saved_proto;
+               range.min_proto = range.max_proto = exp->saved_proto;
        }
        nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
 }
@@ -113,6 +112,7 @@ static int
 pptp_outbound_pkt(struct sk_buff *skb,
                  struct nf_conn *ct,
                  enum ip_conntrack_info ctinfo,
+                 unsigned int protoff,
                  struct PptpControlHeader *ctlh,
                  union pptp_ctrl_union *pptpReq)
 
@@ -175,7 +175,7 @@ pptp_outbound_pkt(struct sk_buff *skb,
                 ntohs(REQ_CID(pptpReq, cid_off)), ntohs(new_callid));
 
        /* mangle packet */
-       if (nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
+       if (nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff,
                                     cid_off + sizeof(struct pptp_pkt_hdr) +
                                     sizeof(struct PptpControlHeader),
                                     sizeof(new_callid), (char *)&new_callid,
@@ -216,6 +216,7 @@ static int
 pptp_inbound_pkt(struct sk_buff *skb,
                 struct nf_conn *ct,
                 enum ip_conntrack_info ctinfo,
+                unsigned int protoff,
                 struct PptpControlHeader *ctlh,
                 union pptp_ctrl_union *pptpReq)
 {
@@ -268,7 +269,7 @@ pptp_inbound_pkt(struct sk_buff *skb,
        pr_debug("altering peer call id from 0x%04x to 0x%04x\n",
                 ntohs(REQ_CID(pptpReq, pcid_off)), ntohs(new_pcid));
 
-       if (nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
+       if (nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff,
                                     pcid_off + sizeof(struct pptp_pkt_hdr) +
                                     sizeof(struct PptpControlHeader),
                                     sizeof(new_pcid), (char *)&new_pcid,
index 46ba0b9ab985b70ac5c80883a642d3048ac3cf83..ea44f02563b5dc5275bb13484fae50b207079b96 100644 (file)
@@ -28,8 +28,7 @@
 #include <linux/ip.h>
 
 #include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_rule.h>
-#include <net/netfilter/nf_nat_protocol.h>
+#include <net/netfilter/nf_nat_l4proto.h>
 #include <linux/netfilter/nf_conntrack_proto_gre.h>
 
 MODULE_LICENSE("GPL");
@@ -38,8 +37,9 @@ MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE");
 
 /* generate unique tuple ... */
 static void
-gre_unique_tuple(struct nf_conntrack_tuple *tuple,
-                const struct nf_nat_ipv4_range *range,
+gre_unique_tuple(const struct nf_nat_l3proto *l3proto,
+                struct nf_conntrack_tuple *tuple,
+                const struct nf_nat_range *range,
                 enum nf_nat_manip_type maniptype,
                 const struct nf_conn *ct)
 {
@@ -62,8 +62,8 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple,
                min = 1;
                range_size = 0xffff;
        } else {
-               min = ntohs(range->min.gre.key);
-               range_size = ntohs(range->max.gre.key) - min + 1;
+               min = ntohs(range->min_proto.gre.key);
+               range_size = ntohs(range->max_proto.gre.key) - min + 1;
        }
 
        pr_debug("min = %u, range_size = %u\n", min, range_size);
@@ -80,14 +80,14 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple,
 
 /* manipulate a GRE packet according to maniptype */
 static bool
-gre_manip_pkt(struct sk_buff *skb, unsigned int iphdroff,
+gre_manip_pkt(struct sk_buff *skb,
+             const struct nf_nat_l3proto *l3proto,
+             unsigned int iphdroff, unsigned int hdroff,
              const struct nf_conntrack_tuple *tuple,
              enum nf_nat_manip_type maniptype)
 {
        const struct gre_hdr *greh;
        struct gre_hdr_pptp *pgreh;
-       const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
-       unsigned int hdroff = iphdroff + iph->ihl * 4;
 
        /* pgreh includes two optional 32bit fields which are not required
         * to be there.  That's where the magic '8' comes from */
@@ -117,24 +117,24 @@ gre_manip_pkt(struct sk_buff *skb, unsigned int iphdroff,
        return true;
 }
 
-static const struct nf_nat_protocol gre = {
-       .protonum               = IPPROTO_GRE,
+static const struct nf_nat_l4proto gre = {
+       .l4proto                = IPPROTO_GRE,
        .manip_pkt              = gre_manip_pkt,
-       .in_range               = nf_nat_proto_in_range,
+       .in_range               = nf_nat_l4proto_in_range,
        .unique_tuple           = gre_unique_tuple,
 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-       .nlattr_to_range        = nf_nat_proto_nlattr_to_range,
+       .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
 #endif
 };
 
 static int __init nf_nat_proto_gre_init(void)
 {
-       return nf_nat_protocol_register(&gre);
+       return nf_nat_l4proto_register(NFPROTO_IPV4, &gre);
 }
 
 static void __exit nf_nat_proto_gre_fini(void)
 {
-       nf_nat_protocol_unregister(&gre);
+       nf_nat_l4proto_unregister(NFPROTO_IPV4, &gre);
 }
 
 module_init(nf_nat_proto_gre_init);
index b35172851bae8b92094bff8dc0da833968e43f39..eb303471bcf6c252c2017061fd866e4ccf6a1fd1 100644 (file)
@@ -15,8 +15,7 @@
 #include <linux/netfilter.h>
 #include <net/netfilter/nf_nat.h>
 #include <net/netfilter/nf_nat_core.h>
-#include <net/netfilter/nf_nat_rule.h>
-#include <net/netfilter/nf_nat_protocol.h>
+#include <net/netfilter/nf_nat_l4proto.h>
 
 static bool
 icmp_in_range(const struct nf_conntrack_tuple *tuple,
@@ -29,8 +28,9 @@ icmp_in_range(const struct nf_conntrack_tuple *tuple,
 }
 
 static void
-icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
-                 const struct nf_nat_ipv4_range *range,
+icmp_unique_tuple(const struct nf_nat_l3proto *l3proto,
+                 struct nf_conntrack_tuple *tuple,
+                 const struct nf_nat_range *range,
                  enum nf_nat_manip_type maniptype,
                  const struct nf_conn *ct)
 {
@@ -38,13 +38,14 @@ icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
        unsigned int range_size;
        unsigned int i;
 
-       range_size = ntohs(range->max.icmp.id) - ntohs(range->min.icmp.id) + 1;
+       range_size = ntohs(range->max_proto.icmp.id) -
+                    ntohs(range->min_proto.icmp.id) + 1;
        /* If no range specified... */
        if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED))
                range_size = 0xFFFF;
 
        for (i = 0; ; ++id) {
-               tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) +
+               tuple->src.u.icmp.id = htons(ntohs(range->min_proto.icmp.id) +
                                             (id % range_size));
                if (++i == range_size || !nf_nat_used_tuple(tuple, ct))
                        return;
@@ -54,13 +55,12 @@ icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
 
 static bool
 icmp_manip_pkt(struct sk_buff *skb,
-              unsigned int iphdroff,
+              const struct nf_nat_l3proto *l3proto,
+              unsigned int iphdroff, unsigned int hdroff,
               const struct nf_conntrack_tuple *tuple,
               enum nf_nat_manip_type maniptype)
 {
-       const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
        struct icmphdr *hdr;
-       unsigned int hdroff = iphdroff + iph->ihl*4;
 
        if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
                return false;
@@ -72,12 +72,12 @@ icmp_manip_pkt(struct sk_buff *skb,
        return true;
 }
 
-const struct nf_nat_protocol nf_nat_protocol_icmp = {
-       .protonum               = IPPROTO_ICMP,
+const struct nf_nat_l4proto nf_nat_l4proto_icmp = {
+       .l4proto                = IPPROTO_ICMP,
        .manip_pkt              = icmp_manip_pkt,
        .in_range               = icmp_in_range,
        .unique_tuple           = icmp_unique_tuple,
 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-       .nlattr_to_range        = nf_nat_proto_nlattr_to_range,
+       .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
 #endif
 };
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
deleted file mode 100644 (file)
index d2a9dc3..0000000
+++ /dev/null
@@ -1,214 +0,0 @@
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/* Everything about the rules for NAT. */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/types.h>
-#include <linux/ip.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/module.h>
-#include <linux/kmod.h>
-#include <linux/skbuff.h>
-#include <linux/proc_fs.h>
-#include <linux/slab.h>
-#include <net/checksum.h>
-#include <net/route.h>
-#include <linux/bitops.h>
-
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_core.h>
-#include <net/netfilter/nf_nat_rule.h>
-
-#define NAT_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | \
-                        (1 << NF_INET_POST_ROUTING) | \
-                        (1 << NF_INET_LOCAL_OUT) | \
-                        (1 << NF_INET_LOCAL_IN))
-
-static const struct xt_table nat_table = {
-       .name           = "nat",
-       .valid_hooks    = NAT_VALID_HOOKS,
-       .me             = THIS_MODULE,
-       .af             = NFPROTO_IPV4,
-};
-
-/* Source NAT */
-static unsigned int
-ipt_snat_target(struct sk_buff *skb, const struct xt_action_param *par)
-{
-       struct nf_conn *ct;
-       enum ip_conntrack_info ctinfo;
-       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
-
-       NF_CT_ASSERT(par->hooknum == NF_INET_POST_ROUTING ||
-                    par->hooknum == NF_INET_LOCAL_IN);
-
-       ct = nf_ct_get(skb, &ctinfo);
-
-       /* Connection must be valid and new. */
-       NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
-                           ctinfo == IP_CT_RELATED_REPLY));
-       NF_CT_ASSERT(par->out != NULL);
-
-       return nf_nat_setup_info(ct, &mr->range[0], NF_NAT_MANIP_SRC);
-}
-
-static unsigned int
-ipt_dnat_target(struct sk_buff *skb, const struct xt_action_param *par)
-{
-       struct nf_conn *ct;
-       enum ip_conntrack_info ctinfo;
-       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
-
-       NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING ||
-                    par->hooknum == NF_INET_LOCAL_OUT);
-
-       ct = nf_ct_get(skb, &ctinfo);
-
-       /* Connection must be valid and new. */
-       NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED));
-
-       return nf_nat_setup_info(ct, &mr->range[0], NF_NAT_MANIP_DST);
-}
-
-static int ipt_snat_checkentry(const struct xt_tgchk_param *par)
-{
-       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
-
-       /* Must be a valid range */
-       if (mr->rangesize != 1) {
-               pr_info("SNAT: multiple ranges no longer supported\n");
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static int ipt_dnat_checkentry(const struct xt_tgchk_param *par)
-{
-       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
-
-       /* Must be a valid range */
-       if (mr->rangesize != 1) {
-               pr_info("DNAT: multiple ranges no longer supported\n");
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static unsigned int
-alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
-{
-       /* Force range to this IP; let proto decide mapping for
-          per-proto parts (hence not NF_NAT_RANGE_PROTO_SPECIFIED).
-       */
-       struct nf_nat_ipv4_range range;
-
-       range.flags = 0;
-       pr_debug("Allocating NULL binding for %p (%pI4)\n", ct,
-                HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ?
-                &ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip :
-                &ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip);
-
-       return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum));
-}
-
-int nf_nat_rule_find(struct sk_buff *skb,
-                    unsigned int hooknum,
-                    const struct net_device *in,
-                    const struct net_device *out,
-                    struct nf_conn *ct)
-{
-       struct net *net = nf_ct_net(ct);
-       int ret;
-
-       ret = ipt_do_table(skb, hooknum, in, out, net->ipv4.nat_table);
-
-       if (ret == NF_ACCEPT) {
-               if (!nf_nat_initialized(ct, HOOK2MANIP(hooknum)))
-                       /* NUL mapping */
-                       ret = alloc_null_binding(ct, hooknum);
-       }
-       return ret;
-}
-
-static struct xt_target ipt_snat_reg __read_mostly = {
-       .name           = "SNAT",
-       .target         = ipt_snat_target,
-       .targetsize     = sizeof(struct nf_nat_ipv4_multi_range_compat),
-       .table          = "nat",
-       .hooks          = (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_LOCAL_IN),
-       .checkentry     = ipt_snat_checkentry,
-       .family         = AF_INET,
-};
-
-static struct xt_target ipt_dnat_reg __read_mostly = {
-       .name           = "DNAT",
-       .target         = ipt_dnat_target,
-       .targetsize     = sizeof(struct nf_nat_ipv4_multi_range_compat),
-       .table          = "nat",
-       .hooks          = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT),
-       .checkentry     = ipt_dnat_checkentry,
-       .family         = AF_INET,
-};
-
-static int __net_init nf_nat_rule_net_init(struct net *net)
-{
-       struct ipt_replace *repl;
-
-       repl = ipt_alloc_initial_table(&nat_table);
-       if (repl == NULL)
-               return -ENOMEM;
-       net->ipv4.nat_table = ipt_register_table(net, &nat_table, repl);
-       kfree(repl);
-       if (IS_ERR(net->ipv4.nat_table))
-               return PTR_ERR(net->ipv4.nat_table);
-       return 0;
-}
-
-static void __net_exit nf_nat_rule_net_exit(struct net *net)
-{
-       ipt_unregister_table(net, net->ipv4.nat_table);
-}
-
-static struct pernet_operations nf_nat_rule_net_ops = {
-       .init = nf_nat_rule_net_init,
-       .exit = nf_nat_rule_net_exit,
-};
-
-int __init nf_nat_rule_init(void)
-{
-       int ret;
-
-       ret = register_pernet_subsys(&nf_nat_rule_net_ops);
-       if (ret != 0)
-               goto out;
-       ret = xt_register_target(&ipt_snat_reg);
-       if (ret != 0)
-               goto unregister_table;
-
-       ret = xt_register_target(&ipt_dnat_reg);
-       if (ret != 0)
-               goto unregister_snat;
-
-       return ret;
-
- unregister_snat:
-       xt_unregister_target(&ipt_snat_reg);
- unregister_table:
-       unregister_pernet_subsys(&nf_nat_rule_net_ops);
- out:
-       return ret;
-}
-
-void nf_nat_rule_cleanup(void)
-{
-       xt_unregister_target(&ipt_dnat_reg);
-       xt_unregister_target(&ipt_snat_reg);
-       unregister_pernet_subsys(&nf_nat_rule_net_ops);
-}
index 957acd12250bd1ee078ef0fbc6831225448559ee..8de53e1ddd544b594909b9d15c96b2cad85ffa3d 100644 (file)
@@ -263,6 +263,10 @@ static const struct snmp_mib snmp4_net_list[] = {
        SNMP_MIB_ITEM("TCPChallengeACK", LINUX_MIB_TCPCHALLENGEACK),
        SNMP_MIB_ITEM("TCPSYNChallenge", LINUX_MIB_TCPSYNCHALLENGE),
        SNMP_MIB_ITEM("TCPFastOpenActive", LINUX_MIB_TCPFASTOPENACTIVE),
+       SNMP_MIB_ITEM("TCPFastOpenPassive", LINUX_MIB_TCPFASTOPENPASSIVE),
+       SNMP_MIB_ITEM("TCPFastOpenPassiveFail", LINUX_MIB_TCPFASTOPENPASSIVEFAIL),
+       SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW),
+       SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD),
        SNMP_MIB_SENTINEL
 };
 
index fd9af60397b590dd817e02a52a33fdf99bb24c8e..ff622069fcefbe5ac2248440c2059b5a16d524e0 100644 (file)
@@ -1111,10 +1111,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
        const struct rtable *rt = (const struct rtable *) dst;
        unsigned int mtu = rt->rt_pmtu;
 
-       if (mtu && time_after_eq(jiffies, rt->dst.expires))
-               mtu = 0;
-
-       if (!mtu)
+       if (!mtu || time_after_eq(jiffies, rt->dst.expires))
                mtu = dst_metric_raw(dst, RTAX_MTU);
 
        if (mtu && rt_is_output_route(rt))
@@ -1566,11 +1563,14 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
        if (ipv4_is_zeronet(daddr))
                goto martian_destination;
 
-       if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev))) {
-               if (ipv4_is_loopback(daddr))
+       /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
+        * and call it once if daddr or/and saddr are loopback addresses
+        */
+       if (ipv4_is_loopback(daddr)) {
+               if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
                        goto martian_destination;
-
-               if (ipv4_is_loopback(saddr))
+       } else if (ipv4_is_loopback(saddr)) {
+               if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
                        goto martian_source;
        }
 
@@ -1595,7 +1595,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
 
        if (res.type == RTN_LOCAL) {
                err = fib_validate_source(skb, saddr, daddr, tos,
-                                         net->loopback_dev->ifindex,
+                                         LOOPBACK_IFINDEX,
                                          dev, in_dev, &itag);
                if (err < 0)
                        goto martian_source_keep_err;
@@ -1871,7 +1871,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
 
        orig_oif = fl4->flowi4_oif;
 
-       fl4->flowi4_iif = net->loopback_dev->ifindex;
+       fl4->flowi4_iif = LOOPBACK_IFINDEX;
        fl4->flowi4_tos = tos & IPTOS_RT_MASK;
        fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
                         RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
@@ -1960,7 +1960,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
                if (!fl4->daddr)
                        fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
                dev_out = net->loopback_dev;
-               fl4->flowi4_oif = net->loopback_dev->ifindex;
+               fl4->flowi4_oif = LOOPBACK_IFINDEX;
                res.type = RTN_LOCAL;
                flags |= RTCF_LOCAL;
                goto make_route;
@@ -2131,7 +2131,7 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
 EXPORT_SYMBOL_GPL(ip_route_output_flow);
 
 static int rt_fill_info(struct net *net,  __be32 dst, __be32 src,
-                       struct flowi4 *fl4, struct sk_buff *skb, u32 pid,
+                       struct flowi4 *fl4, struct sk_buff *skb, u32 portid,
                        u32 seq, int event, int nowait, unsigned int flags)
 {
        struct rtable *rt = skb_rtable(skb);
@@ -2141,7 +2141,7 @@ static int rt_fill_info(struct net *net,  __be32 dst, __be32 src,
        u32 error;
        u32 metrics[RTAX_MAX];
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -2301,12 +2301,12 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
                rt->rt_flags |= RTCF_NOTIFY;
 
        err = rt_fill_info(net, dst, src, &fl4, skb,
-                          NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
+                          NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
                           RTM_NEWROUTE, 0, 0);
        if (err <= 0)
                goto errout_free;
 
-       err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
+       err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
 errout:
        return err;
 
index 650e1528e1e669a7828ab94bb4a5fa4bf65c1186..ba48e799b031b3a45c902dff2f8690a8ce2627fa 100644 (file)
@@ -319,6 +319,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
        ireq->tstamp_ok         = tcp_opt.saw_tstamp;
        req->ts_recent          = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
        treq->snt_synack        = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsecr : 0;
+       treq->listener          = NULL;
 
        /* We throwed the options of the initial SYN away, so we hope
         * the ACK carries the same options again (see RFC1122 4.2.3.8)
index 3e78c79b5586de71059b9e14efdb0fb45146b217..9205e492dc9d8a36b05f18ccedcdf4704867c986 100644 (file)
@@ -232,6 +232,45 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
        return 0;
 }
 
+int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user *buffer,
+                         size_t *lenp, loff_t *ppos)
+{
+       ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
+       struct tcp_fastopen_context *ctxt;
+       int ret;
+       u32  user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
+
+       tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
+       if (!tbl.data)
+               return -ENOMEM;
+
+       rcu_read_lock();
+       ctxt = rcu_dereference(tcp_fastopen_ctx);
+       if (ctxt)
+               memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
+       rcu_read_unlock();
+
+       snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x",
+               user_key[0], user_key[1], user_key[2], user_key[3]);
+       ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
+
+       if (write && ret == 0) {
+               if (sscanf(tbl.data, "%x-%x-%x-%x", user_key, user_key + 1,
+                          user_key + 2, user_key + 3) != 4) {
+                       ret = -EINVAL;
+                       goto bad_key;
+               }
+               tcp_fastopen_reset_cipher(user_key, TCP_FASTOPEN_KEY_LENGTH);
+       }
+
+bad_key:
+       pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n",
+              user_key[0], user_key[1], user_key[2], user_key[3],
+              (char *)tbl.data, ret);
+       kfree(tbl.data);
+       return ret;
+}
+
 static struct ctl_table ipv4_table[] = {
        {
                .procname       = "tcp_timestamps",
@@ -385,6 +424,12 @@ static struct ctl_table ipv4_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
+       {
+               .procname       = "tcp_fastopen_key",
+               .mode           = 0600,
+               .maxlen         = ((TCP_FASTOPEN_KEY_LENGTH * 2) + 10),
+               .proc_handler   = proc_tcp_fastopen_key,
+       },
        {
                .procname       = "tcp_tw_recycle",
                .data           = &tcp_death_row.sysctl_tw_recycle,
index 5f64193418216393448ec9d8e839f83e85636716..f32c02e2a54346cf4e120d39e17d4d1b5e966189 100644 (file)
@@ -486,8 +486,9 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
        if (sk->sk_shutdown & RCV_SHUTDOWN)
                mask |= POLLIN | POLLRDNORM | POLLRDHUP;
 
-       /* Connected? */
-       if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
+       /* Connected or passive Fast Open socket? */
+       if (sk->sk_state != TCP_SYN_SENT &&
+           (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk != NULL)) {
                int target = sock_rcvlowat(sk, 0, INT_MAX);
 
                if (tp->urg_seq == tp->copied_seq &&
@@ -840,10 +841,15 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
        ssize_t copied;
        long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
 
-       /* Wait for a connection to finish. */
-       if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
+       /* Wait for a connection to finish. One exception is TCP Fast Open
+        * (passive side) where data is allowed to be sent before a connection
+        * is fully established.
+        */
+       if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
+           !tcp_passive_fastopen(sk)) {
                if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
                        goto out_err;
+       }
 
        clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
 
@@ -1042,10 +1048,15 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 
        timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
 
-       /* Wait for a connection to finish. */
-       if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
+       /* Wait for a connection to finish. One exception is TCP Fast Open
+        * (passive side) where data is allowed to be sent before a connection
+        * is fully established.
+        */
+       if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
+           !tcp_passive_fastopen(sk)) {
                if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
                        goto do_error;
+       }
 
        if (unlikely(tp->repair)) {
                if (tp->repair_queue == TCP_RECV_QUEUE) {
@@ -1139,78 +1150,43 @@ new_segment:
                                if (err)
                                        goto do_fault;
                        } else {
-                               bool merge = false;
+                               bool merge = true;
                                int i = skb_shinfo(skb)->nr_frags;
-                               struct page *page = sk->sk_sndmsg_page;
-                               int off;
-
-                               if (page && page_count(page) == 1)
-                                       sk->sk_sndmsg_off = 0;
-
-                               off = sk->sk_sndmsg_off;
-
-                               if (skb_can_coalesce(skb, i, page, off) &&
-                                   off != PAGE_SIZE) {
-                                       /* We can extend the last page
-                                        * fragment. */
-                                       merge = true;
-                               } else if (i == MAX_SKB_FRAGS || !sg) {
-                                       /* Need to add new fragment and cannot
-                                        * do this because interface is non-SG,
-                                        * or because all the page slots are
-                                        * busy. */
-                                       tcp_mark_push(tp, skb);
-                                       goto new_segment;
-                               } else if (page) {
-                                       if (off == PAGE_SIZE) {
-                                               put_page(page);
-                                               sk->sk_sndmsg_page = page = NULL;
-                                               off = 0;
+                               struct page_frag *pfrag = sk_page_frag(sk);
+
+                               if (!sk_page_frag_refill(sk, pfrag))
+                                       goto wait_for_memory;
+
+                               if (!skb_can_coalesce(skb, i, pfrag->page,
+                                                     pfrag->offset)) {
+                                       if (i == MAX_SKB_FRAGS || !sg) {
+                                               tcp_mark_push(tp, skb);
+                                               goto new_segment;
                                        }
-                               } else
-                                       off = 0;
+                                       merge = false;
+                               }
 
-                               if (copy > PAGE_SIZE - off)
-                                       copy = PAGE_SIZE - off;
+                               copy = min_t(int, copy, pfrag->size - pfrag->offset);
 
                                if (!sk_wmem_schedule(sk, copy))
                                        goto wait_for_memory;
 
-                               if (!page) {
-                                       /* Allocate new cache page. */
-                                       if (!(page = sk_stream_alloc_page(sk)))
-                                               goto wait_for_memory;
-                               }
-
-                               /* Time to copy data. We are close to
-                                * the end! */
                                err = skb_copy_to_page_nocache(sk, from, skb,
-                                                              page, off, copy);
-                               if (err) {
-                                       /* If this page was new, give it to the
-                                        * socket so it does not get leaked.
-                                        */
-                                       if (!sk->sk_sndmsg_page) {
-                                               sk->sk_sndmsg_page = page;
-                                               sk->sk_sndmsg_off = 0;
-                                       }
+                                                              pfrag->page,
+                                                              pfrag->offset,
+                                                              copy);
+                               if (err)
                                        goto do_error;
-                               }
 
                                /* Update the skb. */
                                if (merge) {
                                        skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
                                } else {
-                                       skb_fill_page_desc(skb, i, page, off, copy);
-                                       if (sk->sk_sndmsg_page) {
-                                               get_page(page);
-                                       } else if (off + copy < PAGE_SIZE) {
-                                               get_page(page);
-                                               sk->sk_sndmsg_page = page;
-                                       }
+                                       skb_fill_page_desc(skb, i, pfrag->page,
+                                                          pfrag->offset, copy);
+                                       get_page(pfrag->page);
                                }
-
-                               sk->sk_sndmsg_off = off + copy;
+                               pfrag->offset += copy;
                        }
 
                        if (!copied)
@@ -2150,6 +2126,10 @@ void tcp_close(struct sock *sk, long timeout)
                 * they look as CLOSING or LAST_ACK for Linux)
                 * Probably, I missed some more holelets.
                 *                                              --ANK
+                * XXX (TFO) - To start off we don't support SYN+ACK+FIN
+                * in a single packet! (May consider it later but will
+                * probably need API support or TCP_CORK SYN-ACK until
+                * data is written and socket is closed.)
                 */
                tcp_send_fin(sk);
        }
@@ -2221,8 +2201,16 @@ adjudge_to_death:
                }
        }
 
-       if (sk->sk_state == TCP_CLOSE)
+       if (sk->sk_state == TCP_CLOSE) {
+               struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
+               /* We could get here with a non-NULL req if the socket is
+                * aborted (e.g., closed with unread data) before 3WHS
+                * finishes.
+                */
+               if (req != NULL)
+                       reqsk_fastopen_remove(sk, req, false);
                inet_csk_destroy_sock(sk);
+       }
        /* Otherwise, socket is reprieved until protocol close. */
 
 out:
@@ -2308,6 +2296,13 @@ int tcp_disconnect(struct sock *sk, int flags)
 }
 EXPORT_SYMBOL(tcp_disconnect);
 
+void tcp_sock_destruct(struct sock *sk)
+{
+       inet_sock_destruct(sk);
+
+       kfree(inet_csk(sk)->icsk_accept_queue.fastopenq);
+}
+
 static inline bool tcp_can_repair_sock(const struct sock *sk)
 {
        return capable(CAP_NET_ADMIN) &&
@@ -2701,6 +2696,14 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                else
                        icsk->icsk_user_timeout = msecs_to_jiffies(val);
                break;
+
+       case TCP_FASTOPEN:
+               if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
+                   TCPF_LISTEN)))
+                       err = fastopen_init_queue(sk, val);
+               else
+                       err = -EINVAL;
+               break;
        default:
                err = -ENOPROTOOPT;
                break;
@@ -3514,11 +3517,15 @@ EXPORT_SYMBOL(tcp_cookie_generator);
 
 void tcp_done(struct sock *sk)
 {
+       struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
+
        if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
                TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
 
        tcp_set_state(sk, TCP_CLOSE);
        tcp_clear_xmit_timers(sk);
+       if (req != NULL)
+               reqsk_fastopen_remove(sk, req, false);
 
        sk->sk_shutdown = SHUTDOWN_MASK;
 
index a7f729c409d78bddb6ff4a1cc5a63fbdc69db581..8f7ef0ad80e5b6b062b7b40f3049634e63b93b40 100644 (file)
@@ -1,10 +1,91 @@
+#include <linux/err.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/tcp.h>
+#include <linux/rcupdate.h>
+#include <linux/rculist.h>
+#include <net/inetpeer.h>
+#include <net/tcp.h>
 
-int sysctl_tcp_fastopen;
+int sysctl_tcp_fastopen __read_mostly;
+
+struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
+
+static DEFINE_SPINLOCK(tcp_fastopen_ctx_lock);
+
+static void tcp_fastopen_ctx_free(struct rcu_head *head)
+{
+       struct tcp_fastopen_context *ctx =
+           container_of(head, struct tcp_fastopen_context, rcu);
+       crypto_free_cipher(ctx->tfm);
+       kfree(ctx);
+}
+
+int tcp_fastopen_reset_cipher(void *key, unsigned int len)
+{
+       int err;
+       struct tcp_fastopen_context *ctx, *octx;
+
+       ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+       ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
+
+       if (IS_ERR(ctx->tfm)) {
+               err = PTR_ERR(ctx->tfm);
+error:         kfree(ctx);
+               pr_err("TCP: TFO aes cipher alloc error: %d\n", err);
+               return err;
+       }
+       err = crypto_cipher_setkey(ctx->tfm, key, len);
+       if (err) {
+               pr_err("TCP: TFO cipher key error: %d\n", err);
+               crypto_free_cipher(ctx->tfm);
+               goto error;
+       }
+       memcpy(ctx->key, key, len);
+
+       spin_lock(&tcp_fastopen_ctx_lock);
+
+       octx = rcu_dereference_protected(tcp_fastopen_ctx,
+                               lockdep_is_held(&tcp_fastopen_ctx_lock));
+       rcu_assign_pointer(tcp_fastopen_ctx, ctx);
+       spin_unlock(&tcp_fastopen_ctx_lock);
+
+       if (octx)
+               call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
+       return err;
+}
+
+/* Computes the fastopen cookie for the peer.
+ * The peer address is a 128 bits long (pad with zeros for IPv4).
+ *
+ * The caller must check foc->len to determine if a valid cookie
+ * has been generated successfully.
+*/
+void tcp_fastopen_cookie_gen(__be32 addr, struct tcp_fastopen_cookie *foc)
+{
+       __be32 peer_addr[4] = { addr, 0, 0, 0 };
+       struct tcp_fastopen_context *ctx;
+
+       rcu_read_lock();
+       ctx = rcu_dereference(tcp_fastopen_ctx);
+       if (ctx) {
+               crypto_cipher_encrypt_one(ctx->tfm,
+                                         foc->val,
+                                         (__u8 *)peer_addr);
+               foc->len = TCP_FASTOPEN_COOKIE_SIZE;
+       }
+       rcu_read_unlock();
+}
 
 static int __init tcp_fastopen_init(void)
 {
+       __u8 key[TCP_FASTOPEN_KEY_LENGTH];
+
+       get_random_bytes(key, sizeof(key));
+       tcp_fastopen_reset_cipher(key, sizeof(key));
        return 0;
 }
 
index d377f4854cb853c454541062fb6f8438608d470c..432c36649db3dd8d579ddb05ae886dd3251dcb66 100644 (file)
@@ -237,7 +237,11 @@ static inline void TCP_ECN_check_ce(struct tcp_sock *tp, const struct sk_buff *s
                        tcp_enter_quickack_mode((struct sock *)tp);
                break;
        case INET_ECN_CE:
-               tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
+               if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
+                       /* Better not delay acks, sender can have a very low cwnd */
+                       tcp_enter_quickack_mode((struct sock *)tp);
+                       tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
+               }
                /* fallinto */
        default:
                tp->ecn_flags |= TCP_ECN_SEEN;
@@ -374,7 +378,7 @@ static void tcp_fixup_rcvbuf(struct sock *sk)
 /* 4. Try to fixup all. It is made immediately after connection enters
  *    established state.
  */
-static void tcp_init_buffer_space(struct sock *sk)
+void tcp_init_buffer_space(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        int maxwin;
@@ -739,29 +743,6 @@ __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst)
        return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
 }
 
-/* Set slow start threshold and cwnd not falling to slow start */
-void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
-{
-       struct tcp_sock *tp = tcp_sk(sk);
-       const struct inet_connection_sock *icsk = inet_csk(sk);
-
-       tp->prior_ssthresh = 0;
-       tp->bytes_acked = 0;
-       if (icsk->icsk_ca_state < TCP_CA_CWR) {
-               tp->undo_marker = 0;
-               if (set_ssthresh)
-                       tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
-               tp->snd_cwnd = min(tp->snd_cwnd,
-                                  tcp_packets_in_flight(tp) + 1U);
-               tp->snd_cwnd_cnt = 0;
-               tp->high_seq = tp->snd_nxt;
-               tp->snd_cwnd_stamp = tcp_time_stamp;
-               TCP_ECN_queue_cwr(tp);
-
-               tcp_set_ca_state(sk, TCP_CA_CWR);
-       }
-}
-
 /*
  * Packet counting of FACK is based on in-order assumptions, therefore TCP
  * disables it when reordering is detected
@@ -2489,35 +2470,6 @@ static inline void tcp_moderate_cwnd(struct tcp_sock *tp)
        tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
-/* Lower bound on congestion window is slow start threshold
- * unless congestion avoidance choice decides to overide it.
- */
-static inline u32 tcp_cwnd_min(const struct sock *sk)
-{
-       const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
-
-       return ca_ops->min_cwnd ? ca_ops->min_cwnd(sk) : tcp_sk(sk)->snd_ssthresh;
-}
-
-/* Decrease cwnd each second ack. */
-static void tcp_cwnd_down(struct sock *sk, int flag)
-{
-       struct tcp_sock *tp = tcp_sk(sk);
-       int decr = tp->snd_cwnd_cnt + 1;
-
-       if ((flag & (FLAG_ANY_PROGRESS | FLAG_DSACKING_ACK)) ||
-           (tcp_is_reno(tp) && !(flag & FLAG_NOT_DUP))) {
-               tp->snd_cwnd_cnt = decr & 1;
-               decr >>= 1;
-
-               if (decr && tp->snd_cwnd > tcp_cwnd_min(sk))
-                       tp->snd_cwnd -= decr;
-
-               tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1);
-               tp->snd_cwnd_stamp = tcp_time_stamp;
-       }
-}
-
 /* Nothing was retransmitted or returned timestamp is less
  * than timestamp of the first retransmission.
  */
@@ -2719,24 +2671,80 @@ static bool tcp_try_undo_loss(struct sock *sk)
        return false;
 }
 
-static inline void tcp_complete_cwr(struct sock *sk)
+/* The cwnd reduction in CWR and Recovery use the PRR algorithm
+ * https://datatracker.ietf.org/doc/draft-ietf-tcpm-proportional-rate-reduction/
+ * It computes the number of packets to send (sndcnt) based on packets newly
+ * delivered:
+ *   1) If the packets in flight is larger than ssthresh, PRR spreads the
+ *     cwnd reductions across a full RTT.
+ *   2) If packets in flight is lower than ssthresh (such as due to excess
+ *     losses and/or application stalls), do not perform any further cwnd
+ *     reductions, but instead slow start up to ssthresh.
+ */
+static void tcp_init_cwnd_reduction(struct sock *sk, const bool set_ssthresh)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       /* Do not moderate cwnd if it's already undone in cwr or recovery. */
-       if (tp->undo_marker) {
-               if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) {
-                       tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
-                       tp->snd_cwnd_stamp = tcp_time_stamp;
-               } else if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH) {
-                       /* PRR algorithm. */
-                       tp->snd_cwnd = tp->snd_ssthresh;
-                       tp->snd_cwnd_stamp = tcp_time_stamp;
-               }
+       tp->high_seq = tp->snd_nxt;
+       tp->bytes_acked = 0;
+       tp->snd_cwnd_cnt = 0;
+       tp->prior_cwnd = tp->snd_cwnd;
+       tp->prr_delivered = 0;
+       tp->prr_out = 0;
+       if (set_ssthresh)
+               tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
+       TCP_ECN_queue_cwr(tp);
+}
+
+static void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked,
+                              int fast_rexmit)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       int sndcnt = 0;
+       int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp);
+
+       tp->prr_delivered += newly_acked_sacked;
+       if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) {
+               u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
+                              tp->prior_cwnd - 1;
+               sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
+       } else {
+               sndcnt = min_t(int, delta,
+                              max_t(int, tp->prr_delivered - tp->prr_out,
+                                    newly_acked_sacked) + 1);
+       }
+
+       sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0));
+       tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
+}
+
+static inline void tcp_end_cwnd_reduction(struct sock *sk)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
+       if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR ||
+           (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) {
+               tp->snd_cwnd = tp->snd_ssthresh;
+               tp->snd_cwnd_stamp = tcp_time_stamp;
        }
        tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
 }
 
+/* Enter CWR state. Disable cwnd undo since congestion is proven with ECN */
+void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       tp->prior_ssthresh = 0;
+       tp->bytes_acked = 0;
+       if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
+               tp->undo_marker = 0;
+               tcp_init_cwnd_reduction(sk, set_ssthresh);
+               tcp_set_ca_state(sk, TCP_CA_CWR);
+       }
+}
+
 static void tcp_try_keep_open(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
@@ -2751,7 +2759,7 @@ static void tcp_try_keep_open(struct sock *sk)
        }
 }
 
-static void tcp_try_to_open(struct sock *sk, int flag)
+static void tcp_try_to_open(struct sock *sk, int flag, int newly_acked_sacked)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
@@ -2768,7 +2776,7 @@ static void tcp_try_to_open(struct sock *sk, int flag)
                if (inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
                        tcp_moderate_cwnd(tp);
        } else {
-               tcp_cwnd_down(sk, flag);
+               tcp_cwnd_reduction(sk, newly_acked_sacked, 0);
        }
 }
 
@@ -2850,38 +2858,6 @@ void tcp_simple_retransmit(struct sock *sk)
 }
 EXPORT_SYMBOL(tcp_simple_retransmit);
 
-/* This function implements the PRR algorithm, specifcally the PRR-SSRB
- * (proportional rate reduction with slow start reduction bound) as described in
- * http://www.ietf.org/id/draft-mathis-tcpm-proportional-rate-reduction-01.txt.
- * It computes the number of packets to send (sndcnt) based on packets newly
- * delivered:
- *   1) If the packets in flight is larger than ssthresh, PRR spreads the
- *     cwnd reductions across a full RTT.
- *   2) If packets in flight is lower than ssthresh (such as due to excess
- *     losses and/or application stalls), do not perform any further cwnd
- *     reductions, but instead slow start up to ssthresh.
- */
-static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked,
-                                       int fast_rexmit, int flag)
-{
-       struct tcp_sock *tp = tcp_sk(sk);
-       int sndcnt = 0;
-       int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp);
-
-       if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) {
-               u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
-                              tp->prior_cwnd - 1;
-               sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
-       } else {
-               sndcnt = min_t(int, delta,
-                              max_t(int, tp->prr_delivered - tp->prr_out,
-                                    newly_acked_sacked) + 1);
-       }
-
-       sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0));
-       tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
-}
-
 static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
 {
        struct tcp_sock *tp = tcp_sk(sk);
@@ -2894,7 +2870,6 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
 
        NET_INC_STATS_BH(sock_net(sk), mib_idx);
 
-       tp->high_seq = tp->snd_nxt;
        tp->prior_ssthresh = 0;
        tp->undo_marker = tp->snd_una;
        tp->undo_retrans = tp->retrans_out;
@@ -2902,15 +2877,8 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
        if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
                if (!ece_ack)
                        tp->prior_ssthresh = tcp_current_ssthresh(sk);
-               tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
-               TCP_ECN_queue_cwr(tp);
+               tcp_init_cwnd_reduction(sk, true);
        }
-
-       tp->bytes_acked = 0;
-       tp->snd_cwnd_cnt = 0;
-       tp->prior_cwnd = tp->snd_cwnd;
-       tp->prr_delivered = 0;
-       tp->prr_out = 0;
        tcp_set_ca_state(sk, TCP_CA_Recovery);
 }
 
@@ -2970,7 +2938,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
                        /* CWR is to be held something *above* high_seq
                         * is ACKed for CWR bit to reach receiver. */
                        if (tp->snd_una != tp->high_seq) {
-                               tcp_complete_cwr(sk);
+                               tcp_end_cwnd_reduction(sk);
                                tcp_set_ca_state(sk, TCP_CA_Open);
                        }
                        break;
@@ -2980,7 +2948,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
                                tcp_reset_reno_sack(tp);
                        if (tcp_try_undo_recovery(sk))
                                return;
-                       tcp_complete_cwr(sk);
+                       tcp_end_cwnd_reduction(sk);
                        break;
                }
        }
@@ -3021,7 +2989,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
                        tcp_try_undo_dsack(sk);
 
                if (!tcp_time_to_recover(sk, flag)) {
-                       tcp_try_to_open(sk, flag);
+                       tcp_try_to_open(sk, flag, newly_acked_sacked);
                        return;
                }
 
@@ -3043,8 +3011,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
 
        if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk)))
                tcp_update_scoreboard(sk, fast_rexmit);
-       tp->prr_delivered += newly_acked_sacked;
-       tcp_update_cwnd_in_recovery(sk, newly_acked_sacked, fast_rexmit, flag);
+       tcp_cwnd_reduction(sk, newly_acked_sacked, fast_rexmit);
        tcp_xmit_retransmit_queue(sk);
 }
 
@@ -3123,6 +3090,12 @@ void tcp_rearm_rto(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
+       /* If the retrans timer is currently being used by Fast Open
+        * for SYN-ACK retrans purpose, stay put.
+        */
+       if (tp->fastopen_rsk)
+               return;
+
        if (!tp->packets_out) {
                inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
        } else {
@@ -3384,7 +3357,7 @@ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
        return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) &&
-               !((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_Recovery | TCPF_CA_CWR));
+               !tcp_in_cwnd_reduction(sk);
 }
 
 /* Check that window update is acceptable.
@@ -3452,9 +3425,9 @@ static void tcp_conservative_spur_to_response(struct tcp_sock *tp)
 }
 
 /* A conservative spurious RTO response algorithm: reduce cwnd using
- * rate halving and continue in congestion avoidance.
+ * PRR and continue in congestion avoidance.
  */
-static void tcp_ratehalving_spur_to_response(struct sock *sk)
+static void tcp_cwr_spur_to_response(struct sock *sk)
 {
        tcp_enter_cwr(sk, 0);
 }
@@ -3462,7 +3435,7 @@ static void tcp_ratehalving_spur_to_response(struct sock *sk)
 static void tcp_undo_spur_to_response(struct sock *sk, int flag)
 {
        if (flag & FLAG_ECE)
-               tcp_ratehalving_spur_to_response(sk);
+               tcp_cwr_spur_to_response(sk);
        else
                tcp_undo_cwr(sk, true);
 }
@@ -3569,7 +3542,7 @@ static bool tcp_process_frto(struct sock *sk, int flag)
                        tcp_conservative_spur_to_response(tp);
                        break;
                default:
-                       tcp_ratehalving_spur_to_response(sk);
+                       tcp_cwr_spur_to_response(sk);
                        break;
                }
                tp->frto_counter = 0;
@@ -4034,7 +4007,7 @@ static inline bool tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq)
 }
 
 /* When we get a reset we do this. */
-static void tcp_reset(struct sock *sk)
+void tcp_reset(struct sock *sk)
 {
        /* We want the right error as BSD sees it (and indeed as we do). */
        switch (sk->sk_state) {
@@ -5740,7 +5713,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
 
                TCP_ECN_rcv_synack(tp, th);
 
-               tp->snd_wl1 = TCP_SKB_CB(skb)->seq;
+               tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
                tcp_ack(sk, skb, FLAG_SLOWPATH);
 
                /* Ok.. it's good. Set up sequence numbers and
@@ -5753,7 +5726,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                 * never scaled.
                 */
                tp->snd_wnd = ntohs(th->window);
-               tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
 
                if (!tp->rx_opt.wscale_ok) {
                        tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0;
@@ -5891,7 +5863,9 @@ discard:
                tcp_send_synack(sk);
 #if 0
                /* Note, we could accept data and URG from this segment.
-                * There are no obstacles to make this.
+                * There are no obstacles to make this (except that we must
+                * either change tcp_recvmsg() to prevent it from returning data
+                * before 3WHS completes per RFC793, or employ TCP Fast Open).
                 *
                 * However, if we ignore data in ACKless segments sometimes,
                 * we have no reasons to accept it sometimes.
@@ -5931,6 +5905,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
+       struct request_sock *req;
        int queued = 0;
 
        tp->rx_opt.saw_tstamp = 0;
@@ -5986,6 +5961,14 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                return 0;
        }
 
+       req = tp->fastopen_rsk;
+       if (req != NULL) {
+               BUG_ON(sk->sk_state != TCP_SYN_RECV &&
+                   sk->sk_state != TCP_FIN_WAIT1);
+
+               if (tcp_check_req(sk, skb, req, NULL, true) == NULL)
+                       goto discard;
+       }
        if (!tcp_validate_incoming(sk, skb, th, 0))
                return 0;
 
@@ -5996,7 +5979,25 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                switch (sk->sk_state) {
                case TCP_SYN_RECV:
                        if (acceptable) {
-                               tp->copied_seq = tp->rcv_nxt;
+                               /* Once we leave TCP_SYN_RECV, we no longer
+                                * need req so release it.
+                                */
+                               if (req) {
+                                       tcp_synack_rtt_meas(sk, req);
+                                       tp->total_retrans = req->retrans;
+
+                                       reqsk_fastopen_remove(sk, req, false);
+                               } else {
+                                       /* Make sure socket is routed, for
+                                        * correct metrics.
+                                        */
+                                       icsk->icsk_af_ops->rebuild_header(sk);
+                                       tcp_init_congestion_control(sk);
+
+                                       tcp_mtup_init(sk);
+                                       tcp_init_buffer_space(sk);
+                                       tp->copied_seq = tp->rcv_nxt;
+                               }
                                smp_mb();
                                tcp_set_state(sk, TCP_ESTABLISHED);
                                sk->sk_state_change(sk);
@@ -6018,23 +6019,27 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                                if (tp->rx_opt.tstamp_ok)
                                        tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
 
-                               /* Make sure socket is routed, for
-                                * correct metrics.
-                                */
-                               icsk->icsk_af_ops->rebuild_header(sk);
-
-                               tcp_init_metrics(sk);
-
-                               tcp_init_congestion_control(sk);
+                               if (req) {
+                                       /* Re-arm the timer because data may
+                                        * have been sent out. This is similar
+                                        * to the regular data transmission case
+                                        * when new data has just been ack'ed.
+                                        *
+                                        * (TFO) - we could try to be more
+                                        * aggressive and retranmitting any data
+                                        * sooner based on when they were sent
+                                        * out.
+                                        */
+                                       tcp_rearm_rto(sk);
+                               } else
+                                       tcp_init_metrics(sk);
 
                                /* Prevent spurious tcp_cwnd_restart() on
                                 * first data packet.
                                 */
                                tp->lsndtime = tcp_time_stamp;
 
-                               tcp_mtup_init(sk);
                                tcp_initialize_rcv_mss(sk);
-                               tcp_init_buffer_space(sk);
                                tcp_fast_path_on(tp);
                        } else {
                                return 1;
@@ -6042,6 +6047,16 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                        break;
 
                case TCP_FIN_WAIT1:
+                       /* If we enter the TCP_FIN_WAIT1 state and we are a
+                        * Fast Open socket and this is the first acceptable
+                        * ACK we have received, this would have acknowledged
+                        * our SYNACK so stop the SYNACK timer.
+                        */
+                       if (acceptable && req != NULL) {
+                               /* We no longer need the request sock. */
+                               reqsk_fastopen_remove(sk, req, false);
+                               tcp_rearm_rto(sk);
+                       }
                        if (tp->snd_una == tp->write_seq) {
                                struct dst_entry *dst;
 
index be23a0b7b89e17e23f7b3a92d424ff8159c03721..75735c9a6a9df2bf4026266c8c30d92bd474092c 100644 (file)
@@ -352,6 +352,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
        const int code = icmp_hdr(icmp_skb)->code;
        struct sock *sk;
        struct sk_buff *skb;
+       struct request_sock *req;
        __u32 seq;
        __u32 remaining;
        int err;
@@ -394,9 +395,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
 
        icsk = inet_csk(sk);
        tp = tcp_sk(sk);
+       req = tp->fastopen_rsk;
        seq = ntohl(th->seq);
        if (sk->sk_state != TCP_LISTEN &&
-           !between(seq, tp->snd_una, tp->snd_nxt)) {
+           !between(seq, tp->snd_una, tp->snd_nxt) &&
+           (req == NULL || seq != tcp_rsk(req)->snt_isn)) {
+               /* For a Fast Open socket, allow seq to be snt_isn. */
                NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
                goto out;
        }
@@ -435,6 +439,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
                    !icsk->icsk_backoff)
                        break;
 
+               /* XXX (TFO) - revisit the following logic for TFO */
+
                if (sock_owned_by_user(sk))
                        break;
 
@@ -466,6 +472,14 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
                goto out;
        }
 
+       /* XXX (TFO) - if it's a TFO socket and has been accepted, rather
+        * than following the TCP_SYN_RECV case and closing the socket,
+        * we ignore the ICMP error and keep trying like a fully established
+        * socket. Is this the right thing to do?
+        */
+       if (req && req->sk == NULL)
+               goto out;
+
        switch (sk->sk_state) {
                struct request_sock *req, **prev;
        case TCP_LISTEN:
@@ -498,7 +512,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
 
        case TCP_SYN_SENT:
        case TCP_SYN_RECV:  /* Cannot happen.
-                              It can f.e. if SYNs crossed.
+                              It can f.e. if SYNs crossed,
+                              or Fast Open.
                             */
                if (!sock_owned_by_user(sk)) {
                        sk->sk_err = err;
@@ -809,8 +824,12 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
                                  struct request_sock *req)
 {
-       tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
-                       tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
+       /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
+        * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
+        */
+       tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
+                       tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
+                       tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
                        req->ts_recent,
                        0,
                        tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
@@ -839,7 +858,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
        if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
                return -1;
 
-       skb = tcp_make_synack(sk, dst, req, rvp);
+       skb = tcp_make_synack(sk, dst, req, rvp, NULL);
 
        if (skb) {
                __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
@@ -849,6 +868,8 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
                                            ireq->rmt_addr,
                                            ireq->opt);
                err = net_xmit_eval(err);
+               if (!tcp_rsk(req)->snt_synack && !err)
+                       tcp_rsk(req)->snt_synack = tcp_time_stamp;
        }
 
        return err;
@@ -904,8 +925,7 @@ EXPORT_SYMBOL(tcp_syn_flood_action);
 /*
  * Save and compile IPv4 options into the request_sock if needed.
  */
-static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk,
-                                                 struct sk_buff *skb)
+static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
 {
        const struct ip_options *opt = &(IPCB(skb)->opt);
        struct ip_options_rcu *dopt = NULL;
@@ -1272,6 +1292,182 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
 };
 #endif
 
+static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
+                              struct request_sock *req,
+                              struct tcp_fastopen_cookie *foc,
+                              struct tcp_fastopen_cookie *valid_foc)
+{
+       bool skip_cookie = false;
+       struct fastopen_queue *fastopenq;
+
+       if (likely(!fastopen_cookie_present(foc))) {
+               /* See include/net/tcp.h for the meaning of these knobs */
+               if ((sysctl_tcp_fastopen & TFO_SERVER_ALWAYS) ||
+                   ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD) &&
+                   (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1)))
+                       skip_cookie = true; /* no cookie to validate */
+               else
+                       return false;
+       }
+       fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
+       /* A FO option is present; bump the counter. */
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVE);
+
+       /* Make sure the listener has enabled fastopen, and we don't
+        * exceed the max # of pending TFO requests allowed before trying
+        * to validating the cookie in order to avoid burning CPU cycles
+        * unnecessarily.
+        *
+        * XXX (TFO) - The implication of checking the max_qlen before
+        * processing a cookie request is that clients can't differentiate
+        * between qlen overflow causing Fast Open to be disabled
+        * temporarily vs a server not supporting Fast Open at all.
+        */
+       if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) == 0 ||
+           fastopenq == NULL || fastopenq->max_qlen == 0)
+               return false;
+
+       if (fastopenq->qlen >= fastopenq->max_qlen) {
+               struct request_sock *req1;
+               spin_lock(&fastopenq->lock);
+               req1 = fastopenq->rskq_rst_head;
+               if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
+                       spin_unlock(&fastopenq->lock);
+                       NET_INC_STATS_BH(sock_net(sk),
+                           LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
+                       /* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
+                       foc->len = -1;
+                       return false;
+               }
+               fastopenq->rskq_rst_head = req1->dl_next;
+               fastopenq->qlen--;
+               spin_unlock(&fastopenq->lock);
+               reqsk_free(req1);
+       }
+       if (skip_cookie) {
+               tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+               return true;
+       }
+       if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) {
+               if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) {
+                       tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
+                       if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) ||
+                           memcmp(&foc->val[0], &valid_foc->val[0],
+                           TCP_FASTOPEN_COOKIE_SIZE) != 0)
+                               return false;
+                       valid_foc->len = -1;
+               }
+               /* Acknowledge the data received from the peer. */
+               tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+               return true;
+       } else if (foc->len == 0) { /* Client requesting a cookie */
+               tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
+               NET_INC_STATS_BH(sock_net(sk),
+                   LINUX_MIB_TCPFASTOPENCOOKIEREQD);
+       } else {
+               /* Client sent a cookie with wrong size. Treat it
+                * the same as invalid and return a valid one.
+                */
+               tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
+       }
+       return false;
+}
+
+static int tcp_v4_conn_req_fastopen(struct sock *sk,
+                                   struct sk_buff *skb,
+                                   struct sk_buff *skb_synack,
+                                   struct request_sock *req,
+                                   struct request_values *rvp)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
+       const struct inet_request_sock *ireq = inet_rsk(req);
+       struct sock *child;
+       int err;
+
+       req->retrans = 0;
+       req->sk = NULL;
+
+       child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
+       if (child == NULL) {
+               NET_INC_STATS_BH(sock_net(sk),
+                                LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
+               kfree_skb(skb_synack);
+               return -1;
+       }
+       err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
+                                   ireq->rmt_addr, ireq->opt);
+       err = net_xmit_eval(err);
+       if (!err)
+               tcp_rsk(req)->snt_synack = tcp_time_stamp;
+       /* XXX (TFO) - is it ok to ignore error and continue? */
+
+       spin_lock(&queue->fastopenq->lock);
+       queue->fastopenq->qlen++;
+       spin_unlock(&queue->fastopenq->lock);
+
+       /* Initialize the child socket. Have to fix some values to take
+        * into account the child is a Fast Open socket and is created
+        * only out of the bits carried in the SYN packet.
+        */
+       tp = tcp_sk(child);
+
+       tp->fastopen_rsk = req;
+       /* Do a hold on the listner sk so that if the listener is being
+        * closed, the child that has been accepted can live on and still
+        * access listen_lock.
+        */
+       sock_hold(sk);
+       tcp_rsk(req)->listener = sk;
+
+       /* RFC1323: The window in SYN & SYN/ACK segments is never
+        * scaled. So correct it appropriately.
+        */
+       tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
+
+       /* Activate the retrans timer so that SYNACK can be retransmitted.
+        * The request socket is not added to the SYN table of the parent
+        * because it's been added to the accept queue directly.
+        */
+       inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
+           TCP_TIMEOUT_INIT, TCP_RTO_MAX);
+
+       /* Add the child socket directly into the accept queue */
+       inet_csk_reqsk_queue_add(sk, req, child);
+
+       /* Now finish processing the fastopen child socket. */
+       inet_csk(child)->icsk_af_ops->rebuild_header(child);
+       tcp_init_congestion_control(child);
+       tcp_mtup_init(child);
+       tcp_init_buffer_space(child);
+       tcp_init_metrics(child);
+
+       /* Queue the data carried in the SYN packet. We need to first
+        * bump skb's refcnt because the caller will attempt to free it.
+        *
+        * XXX (TFO) - we honor a zero-payload TFO request for now.
+        * (Any reason not to?)
+        */
+       if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq + 1) {
+               /* Don't queue the skb if there is no payload in SYN.
+                * XXX (TFO) - How about SYN+FIN?
+                */
+               tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+       } else {
+               skb = skb_get(skb);
+               skb_dst_drop(skb);
+               __skb_pull(skb, tcp_hdr(skb)->doff * 4);
+               skb_set_owner_r(skb, child);
+               __skb_queue_tail(&child->sk_receive_queue, skb);
+               tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+       }
+       sk->sk_data_ready(sk, 0);
+       bh_unlock_sock(child);
+       sock_put(child);
+       WARN_ON(req->sk == NULL);
+       return 0;
+}
+
 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
 {
        struct tcp_extend_values tmp_ext;
@@ -1285,6 +1481,11 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
        __be32 daddr = ip_hdr(skb)->daddr;
        __u32 isn = TCP_SKB_CB(skb)->when;
        bool want_cookie = false;
+       struct flowi4 fl4;
+       struct tcp_fastopen_cookie foc = { .len = -1 };
+       struct tcp_fastopen_cookie valid_foc = { .len = -1 };
+       struct sk_buff *skb_synack;
+       int do_fastopen;
 
        /* Never answer to SYNs send to broadcast or multicast */
        if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -1319,7 +1520,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
        tcp_clear_options(&tmp_opt);
        tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
        tmp_opt.user_mss  = tp->rx_opt.user_mss;
-       tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
+       tcp_parse_options(skb, &tmp_opt, &hash_location, 0,
+           want_cookie ? NULL : &foc);
 
        if (tmp_opt.cookie_plus > 0 &&
            tmp_opt.saw_tstamp &&
@@ -1365,7 +1567,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
        ireq->loc_addr = daddr;
        ireq->rmt_addr = saddr;
        ireq->no_srccheck = inet_sk(sk)->transparent;
-       ireq->opt = tcp_v4_save_options(sk, skb);
+       ireq->opt = tcp_v4_save_options(skb);
 
        if (security_inet_conn_request(sk, skb, req))
                goto drop_and_free;
@@ -1377,8 +1579,6 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
                isn = cookie_v4_init_sequence(sk, skb, &req->mss);
                req->cookie_ts = tmp_opt.tstamp_ok;
        } else if (!isn) {
-               struct flowi4 fl4;
-
                /* VJ's idea. We save last timestamp seen
                 * from the destination in peer table, when entering
                 * state TIME-WAIT, and check against it before
@@ -1417,16 +1617,54 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
                isn = tcp_v4_init_sequence(skb);
        }
        tcp_rsk(req)->snt_isn = isn;
-       tcp_rsk(req)->snt_synack = tcp_time_stamp;
 
-       if (tcp_v4_send_synack(sk, dst, req,
-                              (struct request_values *)&tmp_ext,
-                              skb_get_queue_mapping(skb),
-                              want_cookie) ||
-           want_cookie)
+       if (dst == NULL) {
+               dst = inet_csk_route_req(sk, &fl4, req);
+               if (dst == NULL)
+                       goto drop_and_free;
+       }
+       do_fastopen = tcp_fastopen_check(sk, skb, req, &foc, &valid_foc);
+
+       /* We don't call tcp_v4_send_synack() directly because we need
+        * to make sure a child socket can be created successfully before
+        * sending back synack!
+        *
+        * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
+        * (or better yet, call tcp_send_synack() in the child context
+        * directly, but will have to fix bunch of other code first)
+        * after syn_recv_sock() except one will need to first fix the
+        * latter to remove its dependency on the current implementation
+        * of tcp_v4_send_synack()->tcp_select_initial_window().
+        */
+       skb_synack = tcp_make_synack(sk, dst, req,
+           (struct request_values *)&tmp_ext,
+           fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
+
+       if (skb_synack) {
+               __tcp_v4_send_check(skb_synack, ireq->loc_addr, ireq->rmt_addr);
+               skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
+       } else
+               goto drop_and_free;
+
+       if (likely(!do_fastopen)) {
+               int err;
+               err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
+                    ireq->rmt_addr, ireq->opt);
+               err = net_xmit_eval(err);
+               if (err || want_cookie)
+                       goto drop_and_free;
+
+               tcp_rsk(req)->snt_synack = tcp_time_stamp;
+               tcp_rsk(req)->listener = NULL;
+               /* Add the request_sock to the SYN table */
+               inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
+               if (fastopen_cookie_present(&foc) && foc.len != 0)
+                       NET_INC_STATS_BH(sock_net(sk),
+                           LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
+       } else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req,
+           (struct request_values *)&tmp_ext))
                goto drop_and_free;
 
-       inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
        return 0;
 
 drop_and_release:
@@ -1500,9 +1738,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
                newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
 
        tcp_initialize_rcv_mss(newsk);
-       if (tcp_rsk(req)->snt_synack)
-               tcp_valid_rtt_meas(newsk,
-                   tcp_time_stamp - tcp_rsk(req)->snt_synack);
+       tcp_synack_rtt_meas(newsk, req);
        newtp->total_retrans = req->retrans;
 
 #ifdef CONFIG_TCP_MD5SIG
@@ -1554,7 +1790,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
        struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
                                                       iph->saddr, iph->daddr);
        if (req)
-               return tcp_check_req(sk, skb, req, prev);
+               return tcp_check_req(sk, skb, req, prev, false);
 
        nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
                        th->source, iph->daddr, th->dest, inet_iif(skb));
@@ -1963,20 +2199,13 @@ void tcp_v4_destroy_sock(struct sock *sk)
        if (inet_csk(sk)->icsk_bind_hash)
                inet_put_port(sk);
 
-       /*
-        * If sendmsg cached page exists, toss it.
-        */
-       if (sk->sk_sndmsg_page) {
-               __free_page(sk->sk_sndmsg_page);
-               sk->sk_sndmsg_page = NULL;
-       }
-
        /* TCP Cookie Transactions */
        if (tp->cookie_values != NULL) {
                kref_put(&tp->cookie_values->kref,
                         tcp_cookie_values_release);
                tp->cookie_values = NULL;
        }
+       BUG_ON(tp->fastopen_rsk != NULL);
 
        /* If socket is aborted during connect operation */
        tcp_free_fastopen_req(tp);
@@ -2396,7 +2625,7 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
                         struct seq_file *f, int i, kuid_t uid, int *len)
 {
        const struct inet_request_sock *ireq = inet_rsk(req);
-       int ttd = req->expires - jiffies;
+       long delta = req->expires - jiffies;
 
        seq_printf(f, "%4d: %08X:%04X %08X:%04X"
                " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
@@ -2408,7 +2637,7 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
                TCP_SYN_RECV,
                0, 0, /* could print option size, but that is af dependent. */
                1,    /* timers active (only the expire timer) */
-               jiffies_to_clock_t(ttd),
+               jiffies_delta_to_clock_t(delta),
                req->retrans,
                from_kuid_munged(seq_user_ns(f), uid),
                0,  /* non standard timer */
@@ -2425,6 +2654,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
        const struct tcp_sock *tp = tcp_sk(sk);
        const struct inet_connection_sock *icsk = inet_csk(sk);
        const struct inet_sock *inet = inet_sk(sk);
+       struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
        __be32 dest = inet->inet_daddr;
        __be32 src = inet->inet_rcv_saddr;
        __u16 destp = ntohs(inet->inet_dport);
@@ -2459,7 +2689,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
                tp->write_seq - tp->snd_una,
                rx_queue,
                timer_active,
-               jiffies_to_clock_t(timer_expires - jiffies),
+               jiffies_delta_to_clock_t(timer_expires - jiffies),
                icsk->icsk_retransmits,
                from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
                icsk->icsk_probes_out,
@@ -2469,7 +2699,9 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
                jiffies_to_clock_t(icsk->icsk_ack.ato),
                (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
                tp->snd_cwnd,
-               tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
+               sk->sk_state == TCP_LISTEN ?
+                   (fastopenq ? fastopenq->max_qlen : 0) :
+                   (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh),
                len);
 }
 
@@ -2478,10 +2710,7 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
 {
        __be32 dest, src;
        __u16 destp, srcp;
-       int ttd = tw->tw_ttd - jiffies;
-
-       if (ttd < 0)
-               ttd = 0;
+       long delta = tw->tw_ttd - jiffies;
 
        dest  = tw->tw_daddr;
        src   = tw->tw_rcv_saddr;
@@ -2491,7 +2720,7 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
        seq_printf(f, "%4d: %08X:%04X %08X:%04X"
                " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
                i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
-               3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
+               3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
                atomic_read(&tw->tw_refcnt), tw, len);
 }
 
@@ -2574,6 +2803,8 @@ void tcp4_proc_exit(void)
 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
 {
        const struct iphdr *iph = skb_gro_network_header(skb);
+       __wsum wsum;
+       __sum16 sum;
 
        switch (skb->ip_summed) {
        case CHECKSUM_COMPLETE:
@@ -2582,11 +2813,22 @@ struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                        break;
                }
-
-               /* fall through */
-       case CHECKSUM_NONE:
+flush:
                NAPI_GRO_CB(skb)->flush = 1;
                return NULL;
+
+       case CHECKSUM_NONE:
+               wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
+                                         skb_gro_len(skb), IPPROTO_TCP, 0);
+               sum = csum_fold(skb_checksum(skb,
+                                            skb_gro_offset(skb),
+                                            skb_gro_len(skb),
+                                            wsum));
+               if (sum)
+                       goto flush;
+
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+               break;
        }
 
        return tcp_gro_receive(head, skb);
index 0abe67bb4d3a3adb0d9df820b1fe64b6ef9da591..4c752a6e0bcd91b0b932b483a2b9f988908c04a2 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/init.h>
 #include <linux/tcp.h>
 #include <linux/hash.h>
+#include <linux/tcp_metrics.h>
 
 #include <net/inet_connection_sock.h>
 #include <net/net_namespace.h>
 #include <net/ipv6.h>
 #include <net/dst.h>
 #include <net/tcp.h>
+#include <net/genetlink.h>
 
 int sysctl_tcp_nometrics_save __read_mostly;
 
-enum tcp_metric_index {
-       TCP_METRIC_RTT,
-       TCP_METRIC_RTTVAR,
-       TCP_METRIC_SSTHRESH,
-       TCP_METRIC_CWND,
-       TCP_METRIC_REORDERING,
-
-       /* Always last.  */
-       TCP_METRIC_MAX,
-};
-
 struct tcp_fastopen_metrics {
        u16     mss;
        u16     syn_loss:10;            /* Recurring Fast Open SYN losses */
@@ -45,8 +36,10 @@ struct tcp_metrics_block {
        u32                             tcpm_ts;
        u32                             tcpm_ts_stamp;
        u32                             tcpm_lock;
-       u32                             tcpm_vals[TCP_METRIC_MAX];
+       u32                             tcpm_vals[TCP_METRIC_MAX + 1];
        struct tcp_fastopen_metrics     tcpm_fastopen;
+
+       struct rcu_head                 rcu_head;
 };
 
 static bool tcp_metric_locked(struct tcp_metrics_block *tm,
@@ -690,6 +683,325 @@ void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
        rcu_read_unlock();
 }
 
+static struct genl_family tcp_metrics_nl_family = {
+       .id             = GENL_ID_GENERATE,
+       .hdrsize        = 0,
+       .name           = TCP_METRICS_GENL_NAME,
+       .version        = TCP_METRICS_GENL_VERSION,
+       .maxattr        = TCP_METRICS_ATTR_MAX,
+       .netnsok        = true,
+};
+
+static struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
+       [TCP_METRICS_ATTR_ADDR_IPV4]    = { .type = NLA_U32, },
+       [TCP_METRICS_ATTR_ADDR_IPV6]    = { .type = NLA_BINARY,
+                                           .len = sizeof(struct in6_addr), },
+       /* Following attributes are not received for GET/DEL,
+        * we keep them for reference
+        */
+#if 0
+       [TCP_METRICS_ATTR_AGE]          = { .type = NLA_MSECS, },
+       [TCP_METRICS_ATTR_TW_TSVAL]     = { .type = NLA_U32, },
+       [TCP_METRICS_ATTR_TW_TS_STAMP]  = { .type = NLA_S32, },
+       [TCP_METRICS_ATTR_VALS]         = { .type = NLA_NESTED, },
+       [TCP_METRICS_ATTR_FOPEN_MSS]    = { .type = NLA_U16, },
+       [TCP_METRICS_ATTR_FOPEN_SYN_DROPS]      = { .type = NLA_U16, },
+       [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS]    = { .type = NLA_MSECS, },
+       [TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY,
+                                           .len = TCP_FASTOPEN_COOKIE_MAX, },
+#endif
+};
+
+/* Add attributes, caller cancels its header on failure */
+static int tcp_metrics_fill_info(struct sk_buff *msg,
+                                struct tcp_metrics_block *tm)
+{
+       struct nlattr *nest;
+       int i;
+
+       switch (tm->tcpm_addr.family) {
+       case AF_INET:
+               if (nla_put_be32(msg, TCP_METRICS_ATTR_ADDR_IPV4,
+                               tm->tcpm_addr.addr.a4) < 0)
+                       goto nla_put_failure;
+               break;
+       case AF_INET6:
+               if (nla_put(msg, TCP_METRICS_ATTR_ADDR_IPV6, 16,
+                           tm->tcpm_addr.addr.a6) < 0)
+                       goto nla_put_failure;
+               break;
+       default:
+               return -EAFNOSUPPORT;
+       }
+
+       if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
+                         jiffies - tm->tcpm_stamp) < 0)
+               goto nla_put_failure;
+       if (tm->tcpm_ts_stamp) {
+               if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP,
+                               (s32) (get_seconds() - tm->tcpm_ts_stamp)) < 0)
+                       goto nla_put_failure;
+               if (nla_put_u32(msg, TCP_METRICS_ATTR_TW_TSVAL,
+                               tm->tcpm_ts) < 0)
+                       goto nla_put_failure;
+       }
+
+       {
+               int n = 0;
+
+               nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
+               if (!nest)
+                       goto nla_put_failure;
+               for (i = 0; i < TCP_METRIC_MAX + 1; i++) {
+                       if (!tm->tcpm_vals[i])
+                               continue;
+                       if (nla_put_u32(msg, i + 1, tm->tcpm_vals[i]) < 0)
+                               goto nla_put_failure;
+                       n++;
+               }
+               if (n)
+                       nla_nest_end(msg, nest);
+               else
+                       nla_nest_cancel(msg, nest);
+       }
+
+       {
+               struct tcp_fastopen_metrics tfom_copy[1], *tfom;
+               unsigned int seq;
+
+               do {
+                       seq = read_seqbegin(&fastopen_seqlock);
+                       tfom_copy[0] = tm->tcpm_fastopen;
+               } while (read_seqretry(&fastopen_seqlock, seq));
+
+               tfom = tfom_copy;
+               if (tfom->mss &&
+                   nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
+                               tfom->mss) < 0)
+                       goto nla_put_failure;
+               if (tfom->syn_loss &&
+                   (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
+                               tfom->syn_loss) < 0 ||
+                    nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
+                               jiffies - tfom->last_syn_loss) < 0))
+                       goto nla_put_failure;
+               if (tfom->cookie.len > 0 &&
+                   nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
+                           tfom->cookie.len, tfom->cookie.val) < 0)
+                       goto nla_put_failure;
+       }
+
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static int tcp_metrics_dump_info(struct sk_buff *skb,
+                                struct netlink_callback *cb,
+                                struct tcp_metrics_block *tm)
+{
+       void *hdr;
+
+       hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+                         &tcp_metrics_nl_family, NLM_F_MULTI,
+                         TCP_METRICS_CMD_GET);
+       if (!hdr)
+               return -EMSGSIZE;
+
+       if (tcp_metrics_fill_info(skb, tm) < 0)
+               goto nla_put_failure;
+
+       return genlmsg_end(skb, hdr);
+
+nla_put_failure:
+       genlmsg_cancel(skb, hdr);
+       return -EMSGSIZE;
+}
+
+static int tcp_metrics_nl_dump(struct sk_buff *skb,
+                              struct netlink_callback *cb)
+{
+       struct net *net = sock_net(skb->sk);
+       unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
+       unsigned int row, s_row = cb->args[0];
+       int s_col = cb->args[1], col = s_col;
+
+       for (row = s_row; row < max_rows; row++, s_col = 0) {
+               struct tcp_metrics_block *tm;
+               struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash + row;
+
+               rcu_read_lock();
+               for (col = 0, tm = rcu_dereference(hb->chain); tm;
+                    tm = rcu_dereference(tm->tcpm_next), col++) {
+                       if (col < s_col)
+                               continue;
+                       if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
+                               rcu_read_unlock();
+                               goto done;
+                       }
+               }
+               rcu_read_unlock();
+       }
+
+done:
+       cb->args[0] = row;
+       cb->args[1] = col;
+       return skb->len;
+}
+
+static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
+                        unsigned int *hash, int optional)
+{
+       struct nlattr *a;
+
+       a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV4];
+       if (a) {
+               addr->family = AF_INET;
+               addr->addr.a4 = nla_get_be32(a);
+               *hash = (__force unsigned int) addr->addr.a4;
+               return 0;
+       }
+       a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV6];
+       if (a) {
+               if (nla_len(a) != sizeof(sizeof(struct in6_addr)))
+                       return -EINVAL;
+               addr->family = AF_INET6;
+               memcpy(addr->addr.a6, nla_data(a), sizeof(addr->addr.a6));
+               *hash = ipv6_addr_hash((struct in6_addr *) addr->addr.a6);
+               return 0;
+       }
+       return optional ? 1 : -EAFNOSUPPORT;
+}
+
+static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
+{
+       struct tcp_metrics_block *tm;
+       struct inetpeer_addr addr;
+       unsigned int hash;
+       struct sk_buff *msg;
+       struct net *net = genl_info_net(info);
+       void *reply;
+       int ret;
+
+       ret = parse_nl_addr(info, &addr, &hash, 0);
+       if (ret < 0)
+               return ret;
+
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
+                                 info->genlhdr->cmd);
+       if (!reply)
+               goto nla_put_failure;
+
+       hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
+       ret = -ESRCH;
+       rcu_read_lock();
+       for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
+            tm = rcu_dereference(tm->tcpm_next)) {
+               if (addr_same(&tm->tcpm_addr, &addr)) {
+                       ret = tcp_metrics_fill_info(msg, tm);
+                       break;
+               }
+       }
+       rcu_read_unlock();
+       if (ret < 0)
+               goto out_free;
+
+       genlmsg_end(msg, reply);
+       return genlmsg_reply(msg, info);
+
+nla_put_failure:
+       ret = -EMSGSIZE;
+
+out_free:
+       nlmsg_free(msg);
+       return ret;
+}
+
+#define deref_locked_genl(p)   \
+       rcu_dereference_protected(p, lockdep_genl_is_held() && \
+                                    lockdep_is_held(&tcp_metrics_lock))
+
+#define deref_genl(p)  rcu_dereference_protected(p, lockdep_genl_is_held())
+
+static int tcp_metrics_flush_all(struct net *net)
+{
+       unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
+       struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash;
+       struct tcp_metrics_block *tm;
+       unsigned int row;
+
+       for (row = 0; row < max_rows; row++, hb++) {
+               spin_lock_bh(&tcp_metrics_lock);
+               tm = deref_locked_genl(hb->chain);
+               if (tm)
+                       hb->chain = NULL;
+               spin_unlock_bh(&tcp_metrics_lock);
+               while (tm) {
+                       struct tcp_metrics_block *next;
+
+                       next = deref_genl(tm->tcpm_next);
+                       kfree_rcu(tm, rcu_head);
+                       tm = next;
+               }
+       }
+       return 0;
+}
+
+static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
+{
+       struct tcpm_hash_bucket *hb;
+       struct tcp_metrics_block *tm;
+       struct tcp_metrics_block __rcu **pp;
+       struct inetpeer_addr addr;
+       unsigned int hash;
+       struct net *net = genl_info_net(info);
+       int ret;
+
+       ret = parse_nl_addr(info, &addr, &hash, 1);
+       if (ret < 0)
+               return ret;
+       if (ret > 0)
+               return tcp_metrics_flush_all(net);
+
+       hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
+       hb = net->ipv4.tcp_metrics_hash + hash;
+       pp = &hb->chain;
+       spin_lock_bh(&tcp_metrics_lock);
+       for (tm = deref_locked_genl(*pp); tm;
+            pp = &tm->tcpm_next, tm = deref_locked_genl(*pp)) {
+               if (addr_same(&tm->tcpm_addr, &addr)) {
+                       *pp = tm->tcpm_next;
+                       break;
+               }
+       }
+       spin_unlock_bh(&tcp_metrics_lock);
+       if (!tm)
+               return -ESRCH;
+       kfree_rcu(tm, rcu_head);
+       return 0;
+}
+
+static struct genl_ops tcp_metrics_nl_ops[] = {
+       {
+               .cmd = TCP_METRICS_CMD_GET,
+               .doit = tcp_metrics_nl_cmd_get,
+               .dumpit = tcp_metrics_nl_dump,
+               .policy = tcp_metrics_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+       },
+       {
+               .cmd = TCP_METRICS_CMD_DEL,
+               .doit = tcp_metrics_nl_cmd_del,
+               .policy = tcp_metrics_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+       },
+};
+
 static unsigned int tcpmhash_entries;
 static int __init set_tcpmhash_entries(char *str)
 {
@@ -753,5 +1065,21 @@ static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
 
 void __init tcp_metrics_init(void)
 {
-       register_pernet_subsys(&tcp_net_metrics_ops);
+       int ret;
+
+       ret = register_pernet_subsys(&tcp_net_metrics_ops);
+       if (ret < 0)
+               goto cleanup;
+       ret = genl_register_family_with_ops(&tcp_metrics_nl_family,
+                                           tcp_metrics_nl_ops,
+                                           ARRAY_SIZE(tcp_metrics_nl_ops));
+       if (ret < 0)
+               goto cleanup_subsys;
+       return;
+
+cleanup_subsys:
+       unregister_pernet_subsys(&tcp_net_metrics_ops);
+
+cleanup:
+       return;
 }
index 6ff7f10dce9d56c2f99f0cb13dab38f69eec4619..27536ba16c9da7b89d5385073c7ae4d4f9e27f3c 100644 (file)
@@ -85,6 +85,8 @@ static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
  * spinlock it. I do not want! Well, probability of misbehaviour
  * is ridiculously low and, seems, we could use some mb() tricks
  * to avoid misread sequence numbers, states etc.  --ANK
+ *
+ * We don't need to initialize tmp_out.sack_ok as we don't use the results
  */
 enum tcp_tw_status
 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
@@ -507,6 +509,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
                        newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
                newtp->rx_opt.mss_clamp = req->mss;
                TCP_ECN_openreq_child(newtp, req);
+               newtp->fastopen_rsk = NULL;
 
                TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
        }
@@ -515,13 +518,20 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
 EXPORT_SYMBOL(tcp_create_openreq_child);
 
 /*
- *     Process an incoming packet for SYN_RECV sockets represented
- *     as a request_sock.
+ * Process an incoming packet for SYN_RECV sockets represented as a
+ * request_sock. Normally sk is the listener socket but for TFO it
+ * points to the child socket.
+ *
+ * XXX (TFO) - The current impl contains a special check for ack
+ * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
+ *
+ * We don't need to initialize tmp_opt.sack_ok as we don't use the results
  */
 
 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
                           struct request_sock *req,
-                          struct request_sock **prev)
+                          struct request_sock **prev,
+                          bool fastopen)
 {
        struct tcp_options_received tmp_opt;
        const u8 *hash_location;
@@ -530,6 +540,8 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
        __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
        bool paws_reject = false;
 
+       BUG_ON(fastopen == (sk->sk_state == TCP_LISTEN));
+
        tmp_opt.saw_tstamp = 0;
        if (th->doff > (sizeof(struct tcphdr)>>2)) {
                tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
@@ -565,6 +577,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
                 *
                 * Enforce "SYN-ACK" according to figure 8, figure 6
                 * of RFC793, fixed by RFC1122.
+                *
+                * Note that even if there is new data in the SYN packet
+                * they will be thrown away too.
                 */
                req->rsk_ops->rtx_syn_ack(sk, req, NULL);
                return NULL;
@@ -622,9 +637,12 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
         *                  sent (the segment carries an unacceptable ACK) ...
         *                  a reset is sent."
         *
-        * Invalid ACK: reset will be sent by listening socket
+        * Invalid ACK: reset will be sent by listening socket.
+        * Note that the ACK validity check for a Fast Open socket is done
+        * elsewhere and is checked directly against the child socket rather
+        * than req because user data may have been sent out.
         */
-       if ((flg & TCP_FLAG_ACK) &&
+       if ((flg & TCP_FLAG_ACK) && !fastopen &&
            (TCP_SKB_CB(skb)->ack_seq !=
             tcp_rsk(req)->snt_isn + 1 + tcp_s_data_size(tcp_sk(sk))))
                return sk;
@@ -637,7 +655,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
        /* RFC793: "first check sequence number". */
 
        if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
-                                         tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) {
+                                         tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rcv_wnd)) {
                /* Out of window: send ACK and drop. */
                if (!(flg & TCP_FLAG_RST))
                        req->rsk_ops->send_ack(sk, skb, req);
@@ -648,7 +666,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
 
        /* In sequence, PAWS is OK. */
 
-       if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1))
+       if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
                req->ts_recent = tmp_opt.rcv_tsval;
 
        if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
@@ -667,10 +685,25 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
 
        /* ACK sequence verified above, just make sure ACK is
         * set.  If ACK not set, just silently drop the packet.
+        *
+        * XXX (TFO) - if we ever allow "data after SYN", the
+        * following check needs to be removed.
         */
        if (!(flg & TCP_FLAG_ACK))
                return NULL;
 
+       /* Got ACK for our SYNACK, so update baseline for SYNACK RTT sample. */
+       if (tmp_opt.saw_tstamp && tmp_opt.rcv_tsecr)
+               tcp_rsk(req)->snt_synack = tmp_opt.rcv_tsecr;
+       else if (req->retrans) /* don't take RTT sample if retrans && ~TS */
+               tcp_rsk(req)->snt_synack = 0;
+
+       /* For Fast Open no more processing is needed (sk is the
+        * child socket).
+        */
+       if (fastopen)
+               return sk;
+
        /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
        if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
            TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
@@ -678,10 +711,6 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
                return NULL;
        }
-       if (tmp_opt.saw_tstamp && tmp_opt.rcv_tsecr)
-               tcp_rsk(req)->snt_synack = tmp_opt.rcv_tsecr;
-       else if (req->retrans) /* don't take RTT sample if retrans && ~TS */
-               tcp_rsk(req)->snt_synack = 0;
 
        /* OK, ACK is valid, create big socket and
         * feed this segment to it. It will repeat all
@@ -706,11 +735,21 @@ listen_overflow:
        }
 
 embryonic_reset:
-       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
-       if (!(flg & TCP_FLAG_RST))
+       if (!(flg & TCP_FLAG_RST)) {
+               /* Received a bad SYN pkt - for TFO We try not to reset
+                * the local connection unless it's really necessary to
+                * avoid becoming vulnerable to outside attack aiming at
+                * resetting legit local connections.
+                */
                req->rsk_ops->send_reset(sk, skb);
-
-       inet_csk_reqsk_queue_drop(sk, req, prev);
+       } else if (fastopen) { /* received a valid RST pkt */
+               reqsk_fastopen_remove(sk, req, true);
+               tcp_reset(sk);
+       }
+       if (!fastopen) {
+               inet_csk_reqsk_queue_drop(sk, req, prev);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
+       }
        return NULL;
 }
 EXPORT_SYMBOL(tcp_check_req);
@@ -719,6 +758,12 @@ EXPORT_SYMBOL(tcp_check_req);
  * Queue segment on the new socket if the new socket is active,
  * otherwise we just shortcircuit this and continue with
  * the new socket.
+ *
+ * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
+ * when entering. But other states are possible due to a race condition
+ * where after __inet_lookup_established() fails but before the listener
+ * locked is obtained, other packets cause the same connection to
+ * be created.
  */
 
 int tcp_child_process(struct sock *parent, struct sock *child,
index d04632673a9e5f27725731e420d0997d91259aef..cfe6ffe1c1778b6517297ad3ac9b87d17ce14582 100644 (file)
@@ -702,7 +702,8 @@ static unsigned int tcp_synack_options(struct sock *sk,
                                   unsigned int mss, struct sk_buff *skb,
                                   struct tcp_out_options *opts,
                                   struct tcp_md5sig_key **md5,
-                                  struct tcp_extend_values *xvp)
+                                  struct tcp_extend_values *xvp,
+                                  struct tcp_fastopen_cookie *foc)
 {
        struct inet_request_sock *ireq = inet_rsk(req);
        unsigned int remaining = MAX_TCP_OPTION_SPACE;
@@ -747,7 +748,15 @@ static unsigned int tcp_synack_options(struct sock *sk,
                if (unlikely(!ireq->tstamp_ok))
                        remaining -= TCPOLEN_SACKPERM_ALIGNED;
        }
-
+       if (foc != NULL) {
+               u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
+               need = (need + 3) & ~3U;  /* Align to 32 bits */
+               if (remaining >= need) {
+                       opts->options |= OPTION_FAST_OPEN_COOKIE;
+                       opts->fastopen_cookie = foc;
+                       remaining -= need;
+               }
+       }
        /* Similar rationale to tcp_syn_options() applies here, too.
         * If the <SYN> options fit, the same options should fit now!
         */
@@ -2028,10 +2037,10 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                if (push_one)
                        break;
        }
-       if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery)
-               tp->prr_out += sent_pkts;
 
        if (likely(sent_pkts)) {
+               if (tcp_in_cwnd_reduction(sk))
+                       tp->prr_out += sent_pkts;
                tcp_cwnd_validate(sk);
                return false;
        }
@@ -2533,7 +2542,7 @@ begin_fwd:
                }
                NET_INC_STATS_BH(sock_net(sk), mib_idx);
 
-               if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery)
+               if (tcp_in_cwnd_reduction(sk))
                        tp->prr_out += tcp_skb_pcount(skb);
 
                if (skb == tcp_write_queue_head(sk))
@@ -2658,7 +2667,8 @@ int tcp_send_synack(struct sock *sk)
  */
 struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
                                struct request_sock *req,
-                               struct request_values *rvp)
+                               struct request_values *rvp,
+                               struct tcp_fastopen_cookie *foc)
 {
        struct tcp_out_options opts;
        struct tcp_extend_values *xvp = tcp_xv(rvp);
@@ -2718,7 +2728,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
 #endif
        TCP_SKB_CB(skb)->when = tcp_time_stamp;
        tcp_header_size = tcp_synack_options(sk, req, mss,
-                                            skb, &opts, &md5, xvp)
+                                            skb, &opts, &md5, xvp, foc)
                        + sizeof(*th);
 
        skb_push(skb, tcp_header_size);
@@ -2772,7 +2782,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
        }
 
        th->seq = htonl(TCP_SKB_CB(skb)->seq);
-       th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
+       /* XXX data is queued and acked as is. No buffer/window check */
+       th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
 
        /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
        th->window = htons(min(req->rcv_wnd, 65535U));
index b774a03bd1dcc1ccafa245a892ac0b312511a900..fc04711e80c89dd0dc92ff1027efaa7324b218c3 100644 (file)
@@ -304,6 +304,35 @@ static void tcp_probe_timer(struct sock *sk)
        }
 }
 
+/*
+ *     Timer for Fast Open socket to retransmit SYNACK. Note that the
+ *     sk here is the child socket, not the parent (listener) socket.
+ */
+static void tcp_fastopen_synack_timer(struct sock *sk)
+{
+       struct inet_connection_sock *icsk = inet_csk(sk);
+       int max_retries = icsk->icsk_syn_retries ? :
+           sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
+       struct request_sock *req;
+
+       req = tcp_sk(sk)->fastopen_rsk;
+       req->rsk_ops->syn_ack_timeout(sk, req);
+
+       if (req->retrans >= max_retries) {
+               tcp_write_err(sk);
+               return;
+       }
+       /* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
+        * returned from rtx_syn_ack() to make it more persistent like
+        * regular retransmit because if the child socket has been accepted
+        * it's not good to give up too easily.
+        */
+       req->rsk_ops->rtx_syn_ack(sk, req, NULL);
+       req->retrans++;
+       inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
+                         TCP_TIMEOUT_INIT << req->retrans, TCP_RTO_MAX);
+}
+
 /*
  *     The TCP retransmit timer.
  */
@@ -317,7 +346,15 @@ void tcp_retransmit_timer(struct sock *sk)
                tcp_resume_early_retransmit(sk);
                return;
        }
-
+       if (tp->fastopen_rsk) {
+               BUG_ON(sk->sk_state != TCP_SYN_RECV &&
+                   sk->sk_state != TCP_FIN_WAIT1);
+               tcp_fastopen_synack_timer(sk);
+               /* Before we receive ACK to our SYN-ACK don't retransmit
+                * anything else (e.g., data or FIN segments).
+                */
+               return;
+       }
        if (!tp->packets_out)
                goto out;
 
index d2f336ea82caa98cefdc6e84540fa784ea6703b7..505b30ad9182dc83e42e106b4bbaa507dd84f591 100644 (file)
@@ -26,7 +26,7 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
 
        return inet_sk_diag_fill(sk, NULL, skb, req,
                        sk_user_ns(NETLINK_CB(cb->skb).ssk),
-                       NETLINK_CB(cb->skb).pid,
+                       NETLINK_CB(cb->skb).portid,
                        cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
 }
 
@@ -72,14 +72,14 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
 
        err = inet_sk_diag_fill(sk, NULL, rep, req,
                           sk_user_ns(NETLINK_CB(in_skb).ssk),
-                          NETLINK_CB(in_skb).pid,
+                          NETLINK_CB(in_skb).portid,
                           nlh->nlmsg_seq, 0, nlh);
        if (err < 0) {
                WARN_ON(err == -EMSGSIZE);
                kfree_skb(rep);
                goto out;
        }
-       err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).pid,
+       err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
                              MSG_DONTWAIT);
        if (err > 0)
                err = 0;
index 5728695b54492dc2d275458b8db786f410e94002..4f7fe7270e3703226de121041d1e5e96a5b127df 100644 (file)
@@ -201,6 +201,22 @@ config IPV6_TUNNEL
 
          If unsure, say N.
 
+config IPV6_GRE
+       tristate "IPv6: GRE tunnel"
+       select IPV6_TUNNEL
+       ---help---
+         Tunneling means encapsulating data of one protocol type within
+         another protocol and sending it over a channel that understands the
+         encapsulating protocol. This particular tunneling driver implements
+         GRE (Generic Routing Encapsulation) and at this time allows
+         encapsulating of IPv4 or IPv6 over existing IPv6 infrastructure.
+         This driver is useful if the other endpoint is a Cisco router: Cisco
+         likes GRE much better than the other Linux tunneling driver ("IP
+         tunneling" above). In addition, GRE allows multicast redistribution
+         through the tunnel.
+
+         Saying M here will produce a module called ip6_gre. If unsure, say N.
+
 config IPV6_MULTIPLE_TABLES
        bool "IPv6: Multiple Routing Tables"
        depends on EXPERIMENTAL
index 686934acfac18eac215a17b8d4d01ea5ad03c860..b6d3f79151e28251f3b3b5a062869fa9acb64526 100644 (file)
@@ -36,6 +36,7 @@ obj-$(CONFIG_NETFILTER)       += netfilter/
 
 obj-$(CONFIG_IPV6_SIT) += sit.o
 obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
+obj-$(CONFIG_IPV6_GRE) += ip6_gre.o
 
 obj-y += addrconf_core.o exthdrs_core.o
 
index 6bc85f7c31e3c58a01a6d1aa351cd827584fad24..480e68422efb3c0f3ff7267ac89b8fe2b0d42fee 100644 (file)
@@ -127,8 +127,8 @@ static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
 #endif
 
 #ifdef CONFIG_IPV6_PRIVACY
-static int __ipv6_regen_rndid(struct inet6_dev *idev);
-static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
+static void __ipv6_regen_rndid(struct inet6_dev *idev);
+static void __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
 static void ipv6_regen_rndid(unsigned long data);
 #endif
 
@@ -788,10 +788,16 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
                struct in6_addr prefix;
                struct rt6_info *rt;
                struct net *net = dev_net(ifp->idev->dev);
+               struct flowi6 fl6 = {};
+
                ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len);
-               rt = rt6_lookup(net, &prefix, NULL, ifp->idev->dev->ifindex, 1);
+               fl6.flowi6_oif = ifp->idev->dev->ifindex;
+               fl6.daddr = prefix;
+               rt = (struct rt6_info *)ip6_route_lookup(net, &fl6,
+                                                        RT6_LOOKUP_F_IFACE);
 
-               if (rt && addrconf_is_prefix_route(rt)) {
+               if (rt != net->ipv6.ip6_null_entry &&
+                   addrconf_is_prefix_route(rt)) {
                        if (onlink == 0) {
                                ip6_del_rt(rt);
                                rt = NULL;
@@ -852,16 +858,7 @@ retry:
        }
        in6_ifa_hold(ifp);
        memcpy(addr.s6_addr, ifp->addr.s6_addr, 8);
-       if (__ipv6_try_regen_rndid(idev, tmpaddr) < 0) {
-               spin_unlock_bh(&ifp->lock);
-               write_unlock(&idev->lock);
-               pr_warn("%s: regeneration of randomized interface id failed\n",
-                       __func__);
-               in6_ifa_put(ifp);
-               in6_dev_put(idev);
-               ret = -1;
-               goto out;
-       }
+       __ipv6_try_regen_rndid(idev, tmpaddr);
        memcpy(&addr.s6_addr[8], idev->rndid, 8);
        age = (now - ifp->tstamp) / HZ;
        tmp_valid_lft = min_t(__u32,
@@ -1079,8 +1076,10 @@ static int ipv6_get_saddr_eval(struct net *net,
                break;
        case IPV6_SADDR_RULE_PREFIX:
                /* Rule 8: Use longest matching prefix */
-               score->matchlen = ret = ipv6_addr_diff(&score->ifa->addr,
-                                                      dst->addr);
+               ret = ipv6_addr_diff(&score->ifa->addr, dst->addr);
+               if (ret > score->ifa->prefix_len)
+                       ret = score->ifa->prefix_len;
+               score->matchlen = ret;
                break;
        default:
                ret = 0;
@@ -1093,7 +1092,7 @@ out:
        return ret;
 }
 
-int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev,
+int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
                       const struct in6_addr *daddr, unsigned int prefs,
                       struct in6_addr *saddr)
 {
@@ -1600,7 +1599,7 @@ static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
 
 #ifdef CONFIG_IPV6_PRIVACY
 /* (re)generation of randomized interface identifier (RFC 3041 3.2, 3.5) */
-static int __ipv6_regen_rndid(struct inet6_dev *idev)
+static void __ipv6_regen_rndid(struct inet6_dev *idev)
 {
 regen:
        get_random_bytes(idev->rndid, sizeof(idev->rndid));
@@ -1627,8 +1626,6 @@ regen:
                if ((idev->rndid[2]|idev->rndid[3]|idev->rndid[4]|idev->rndid[5]|idev->rndid[6]|idev->rndid[7]) == 0x00)
                        goto regen;
        }
-
-       return 0;
 }
 
 static void ipv6_regen_rndid(unsigned long data)
@@ -1642,8 +1639,7 @@ static void ipv6_regen_rndid(unsigned long data)
        if (idev->dead)
                goto out;
 
-       if (__ipv6_regen_rndid(idev) < 0)
-               goto out;
+       __ipv6_regen_rndid(idev);
 
        expires = jiffies +
                idev->cnf.temp_prefered_lft * HZ -
@@ -1664,13 +1660,10 @@ out:
        in6_dev_put(idev);
 }
 
-static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
+static void  __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
 {
-       int ret = 0;
-
        if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0)
-               ret = __ipv6_regen_rndid(idev);
-       return ret;
+               __ipv6_regen_rndid(idev);
 }
 #endif
 
@@ -1721,7 +1714,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
        if (table == NULL)
                return NULL;
 
-       write_lock_bh(&table->tb6_lock);
+       read_lock_bh(&table->tb6_lock);
        fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0);
        if (!fn)
                goto out;
@@ -1736,7 +1729,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
                break;
        }
 out:
-       write_unlock_bh(&table->tb6_lock);
+       read_unlock_bh(&table->tb6_lock);
        return rt;
 }
 
@@ -3549,12 +3542,12 @@ static inline int inet6_ifaddr_msgsize(void)
 }
 
 static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
-                            u32 pid, u32 seq, int event, unsigned int flags)
+                            u32 portid, u32 seq, int event, unsigned int flags)
 {
        struct nlmsghdr  *nlh;
        u32 preferred, valid;
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(struct ifaddrmsg), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -3592,7 +3585,7 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
 }
 
 static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
-                               u32 pid, u32 seq, int event, u16 flags)
+                               u32 portid, u32 seq, int event, u16 flags)
 {
        struct nlmsghdr  *nlh;
        u8 scope = RT_SCOPE_UNIVERSE;
@@ -3601,7 +3594,7 @@ static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
        if (ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE)
                scope = RT_SCOPE_SITE;
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(struct ifaddrmsg), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -3617,7 +3610,7 @@ static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
 }
 
 static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
-                               u32 pid, u32 seq, int event, unsigned int flags)
+                               u32 portid, u32 seq, int event, unsigned int flags)
 {
        struct nlmsghdr  *nlh;
        u8 scope = RT_SCOPE_UNIVERSE;
@@ -3626,7 +3619,7 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
        if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE)
                scope = RT_SCOPE_SITE;
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(struct ifaddrmsg), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -3667,7 +3660,7 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
                        if (++ip_idx < s_ip_idx)
                                continue;
                        err = inet6_fill_ifaddr(skb, ifa,
-                                               NETLINK_CB(cb->skb).pid,
+                                               NETLINK_CB(cb->skb).portid,
                                                cb->nlh->nlmsg_seq,
                                                RTM_NEWADDR,
                                                NLM_F_MULTI);
@@ -3683,7 +3676,7 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
                        if (ip_idx < s_ip_idx)
                                continue;
                        err = inet6_fill_ifmcaddr(skb, ifmca,
-                                                 NETLINK_CB(cb->skb).pid,
+                                                 NETLINK_CB(cb->skb).portid,
                                                  cb->nlh->nlmsg_seq,
                                                  RTM_GETMULTICAST,
                                                  NLM_F_MULTI);
@@ -3698,7 +3691,7 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
                        if (ip_idx < s_ip_idx)
                                continue;
                        err = inet6_fill_ifacaddr(skb, ifaca,
-                                                 NETLINK_CB(cb->skb).pid,
+                                                 NETLINK_CB(cb->skb).portid,
                                                  cb->nlh->nlmsg_seq,
                                                  RTM_GETANYCAST,
                                                  NLM_F_MULTI);
@@ -3820,7 +3813,7 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
                goto errout_ifa;
        }
 
-       err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).pid,
+       err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).portid,
                                nlh->nlmsg_seq, RTM_NEWADDR, 0);
        if (err < 0) {
                /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
@@ -3828,7 +3821,7 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
                kfree_skb(skb);
                goto errout_ifa;
        }
-       err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
+       err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
 errout_ifa:
        in6_ifa_put(ifa);
 errout:
@@ -4030,14 +4023,14 @@ static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev)
 }
 
 static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
-                            u32 pid, u32 seq, int event, unsigned int flags)
+                            u32 portid, u32 seq, int event, unsigned int flags)
 {
        struct net_device *dev = idev->dev;
        struct ifinfomsg *hdr;
        struct nlmsghdr *nlh;
        void *protoinfo;
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -4095,7 +4088,7 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
                        if (!idev)
                                goto cont;
                        if (inet6_fill_ifinfo(skb, idev,
-                                             NETLINK_CB(cb->skb).pid,
+                                             NETLINK_CB(cb->skb).portid,
                                              cb->nlh->nlmsg_seq,
                                              RTM_NEWLINK, NLM_F_MULTI) <= 0)
                                goto out;
@@ -4143,14 +4136,14 @@ static inline size_t inet6_prefix_nlmsg_size(void)
 }
 
 static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
-                            struct prefix_info *pinfo, u32 pid, u32 seq,
+                            struct prefix_info *pinfo, u32 portid, u32 seq,
                             int event, unsigned int flags)
 {
        struct prefixmsg *pmsg;
        struct nlmsghdr *nlh;
        struct prefix_cacheinfo ci;
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*pmsg), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*pmsg), flags);
        if (nlh == NULL)
                return -EMSGSIZE;
 
index eb6a63632d3c1b6b2fab973588760053c4d9e3bb..4be23da32b89c14ef19d5b0b349c244e33b881cd 100644 (file)
@@ -57,7 +57,7 @@ struct net *ip6addrlbl_net(const struct ip6addrlbl_entry *lbl)
 }
 
 /*
- * Default policy table (RFC3484 + extensions)
+ * Default policy table (RFC6724 + extensions)
  *
  * prefix              addr_type       label
  * -------------------------------------------------------------------------
@@ -69,8 +69,12 @@ struct net *ip6addrlbl_net(const struct ip6addrlbl_entry *lbl)
  * fc00::/7            N/A             5               ULA (RFC 4193)
  * 2001::/32           N/A             6               Teredo (RFC 4380)
  * 2001:10::/28                N/A             7               ORCHID (RFC 4843)
+ * fec0::/10           N/A             11              Site-local
+ *                                                     (deprecated by RFC3879)
+ * 3ffe::/16           N/A             12              6bone
  *
  * Note: 0xffffffff is used if we do not have any policies.
+ * Note: Labels for ULA and 6to4 are different from labels listed in RFC6724.
  */
 
 #define IPV6_ADDR_LABEL_DEFAULT        0xffffffffUL
@@ -88,10 +92,18 @@ static const __net_initdata struct ip6addrlbl_init_table
                .prefix = &(struct in6_addr){{{ 0xfc }}},
                .prefixlen = 7,
                .label = 5,
+       },{     /* fec0::/10 */
+               .prefix = &(struct in6_addr){{{ 0xfe, 0xc0 }}},
+               .prefixlen = 10,
+               .label = 11,
        },{     /* 2002::/16 */
                .prefix = &(struct in6_addr){{{ 0x20, 0x02 }}},
                .prefixlen = 16,
                .label = 2,
+       },{     /* 3ffe::/16 */
+               .prefix = &(struct in6_addr){{{ 0x3f, 0xfe }}},
+               .prefixlen = 16,
+               .label = 12,
        },{     /* 2001::/32 */
                .prefix = &(struct in6_addr){{{ 0x20, 0x01 }}},
                .prefixlen = 32,
@@ -470,10 +482,10 @@ static void ip6addrlbl_putmsg(struct nlmsghdr *nlh,
 static int ip6addrlbl_fill(struct sk_buff *skb,
                           struct ip6addrlbl_entry *p,
                           u32 lseq,
-                          u32 pid, u32 seq, int event,
+                          u32 portid, u32 seq, int event,
                           unsigned int flags)
 {
-       struct nlmsghdr *nlh = nlmsg_put(skb, pid, seq, event,
+       struct nlmsghdr *nlh = nlmsg_put(skb, portid, seq, event,
                                         sizeof(struct ifaddrlblmsg), flags);
        if (!nlh)
                return -EMSGSIZE;
@@ -503,7 +515,7 @@ static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb)
                    net_eq(ip6addrlbl_net(p), net)) {
                        if ((err = ip6addrlbl_fill(skb, p,
                                                   ip6addrlbl_table.seq,
-                                                  NETLINK_CB(cb->skb).pid,
+                                                  NETLINK_CB(cb->skb).portid,
                                                   cb->nlh->nlmsg_seq,
                                                   RTM_NEWADDRLABEL,
                                                   NLM_F_MULTI)) <= 0)
@@ -574,7 +586,7 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh,
        }
 
        err = ip6addrlbl_fill(skb, p, lseq,
-                             NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
+                             NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
                              RTM_NEWADDRLABEL, 0);
 
        ip6addrlbl_put(p);
@@ -585,7 +597,7 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh,
                goto out;
        }
 
-       err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
+       err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
 out:
        return err;
 }
index 286acfc21250cd760565b182cbc2d3d4490463dd..24995a93ef8c94b22224dd344a9d474a72d28cc1 100644 (file)
@@ -514,7 +514,7 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
        ln = node_alloc();
 
        if (!ln)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
        ln->fn_bit = plen;
 
        ln->parent = pn;
@@ -561,7 +561,7 @@ insert_above:
                                node_free(in);
                        if (ln)
                                node_free(ln);
-                       return NULL;
+                       return ERR_PTR(-ENOMEM);
                }
 
                /*
@@ -611,7 +611,7 @@ insert_above:
                ln = node_alloc();
 
                if (!ln)
-                       return NULL;
+                       return ERR_PTR(-ENOMEM);
 
                ln->fn_bit = plen;
 
@@ -777,11 +777,8 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
 
        if (IS_ERR(fn)) {
                err = PTR_ERR(fn);
-               fn = NULL;
-       }
-
-       if (!fn)
                goto out;
+       }
 
        pn = fn;
 
@@ -820,15 +817,12 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
                                        allow_create, replace_required);
 
                        if (IS_ERR(sn)) {
-                               err = PTR_ERR(sn);
-                               sn = NULL;
-                       }
-                       if (!sn) {
                                /* If it is failed, discard just allocated
                                   root, and then (in st_failure) stale node
                                   in main tree.
                                 */
                                node_free(sfn);
+                               err = PTR_ERR(sn);
                                goto st_failure;
                        }
 
@@ -843,10 +837,8 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
 
                        if (IS_ERR(sn)) {
                                err = PTR_ERR(sn);
-                               sn = NULL;
-                       }
-                       if (!sn)
                                goto st_failure;
+                       }
                }
 
                if (!fn->leaf) {
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
new file mode 100644 (file)
index 0000000..0185679
--- /dev/null
@@ -0,0 +1,1770 @@
+/*
+ *     GRE over IPv6 protocol decoder.
+ *
+ *     Authors: Dmitry Kozlov (xeb@mail.ru)
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/capability.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if_arp.h>
+#include <linux/mroute.h>
+#include <linux/init.h>
+#include <linux/in6.h>
+#include <linux/inetdevice.h>
+#include <linux/igmp.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/hash.h>
+#include <linux/if_tunnel.h>
+#include <linux/ip6_tunnel.h>
+
+#include <net/sock.h>
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/protocol.h>
+#include <net/addrconf.h>
+#include <net/arp.h>
+#include <net/checksum.h>
+#include <net/dsfield.h>
+#include <net/inet_ecn.h>
+#include <net/xfrm.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <net/rtnetlink.h>
+
+#include <net/ipv6.h>
+#include <net/ip6_fib.h>
+#include <net/ip6_route.h>
+#include <net/ip6_tunnel.h>
+
+
+static bool log_ecn_error = true;
+module_param(log_ecn_error, bool, 0644);
+MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
+
+#define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
+#define IPV6_TCLASS_SHIFT 20
+
+#define HASH_SIZE_SHIFT  5
+#define HASH_SIZE (1 << HASH_SIZE_SHIFT)
+
+static int ip6gre_net_id __read_mostly;
+struct ip6gre_net {
+       struct ip6_tnl __rcu *tunnels[4][HASH_SIZE];
+
+       struct net_device *fb_tunnel_dev;
+};
+
+static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
+static int ip6gre_tunnel_init(struct net_device *dev);
+static void ip6gre_tunnel_setup(struct net_device *dev);
+static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
+static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu);
+
+/* Tunnel hash table */
+
+/*
+   4 hash tables:
+
+   3: (remote,local)
+   2: (remote,*)
+   1: (*,local)
+   0: (*,*)
+
+   We require exact key match i.e. if a key is present in packet
+   it will match only tunnel with the same key; if it is not present,
+   it will match only keyless tunnel.
+
+   All keysless packets, if not matched configured keyless tunnels
+   will match fallback tunnel.
+ */
+
+#define HASH_KEY(key) (((__force u32)key^((__force u32)key>>4))&(HASH_SIZE - 1))
+static u32 HASH_ADDR(const struct in6_addr *addr)
+{
+       u32 hash = ipv6_addr_hash(addr);
+
+       return hash_32(hash, HASH_SIZE_SHIFT);
+}
+
+#define tunnels_r_l    tunnels[3]
+#define tunnels_r      tunnels[2]
+#define tunnels_l      tunnels[1]
+#define tunnels_wc     tunnels[0]
+/*
+ * Locking : hash tables are protected by RCU and RTNL
+ */
+
+#define for_each_ip_tunnel_rcu(start) \
+       for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
+
+/* often modified stats are per cpu, other are shared (netdev->stats) */
+struct pcpu_tstats {
+       u64     rx_packets;
+       u64     rx_bytes;
+       u64     tx_packets;
+       u64     tx_bytes;
+       struct u64_stats_sync   syncp;
+};
+
+static struct rtnl_link_stats64 *ip6gre_get_stats64(struct net_device *dev,
+               struct rtnl_link_stats64 *tot)
+{
+       int i;
+
+       for_each_possible_cpu(i) {
+               const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
+               u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+               unsigned int start;
+
+               do {
+                       start = u64_stats_fetch_begin_bh(&tstats->syncp);
+                       rx_packets = tstats->rx_packets;
+                       tx_packets = tstats->tx_packets;
+                       rx_bytes = tstats->rx_bytes;
+                       tx_bytes = tstats->tx_bytes;
+               } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
+
+               tot->rx_packets += rx_packets;
+               tot->tx_packets += tx_packets;
+               tot->rx_bytes   += rx_bytes;
+               tot->tx_bytes   += tx_bytes;
+       }
+
+       tot->multicast = dev->stats.multicast;
+       tot->rx_crc_errors = dev->stats.rx_crc_errors;
+       tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
+       tot->rx_length_errors = dev->stats.rx_length_errors;
+       tot->rx_frame_errors = dev->stats.rx_frame_errors;
+       tot->rx_errors = dev->stats.rx_errors;
+
+       tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
+       tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
+       tot->tx_dropped = dev->stats.tx_dropped;
+       tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
+       tot->tx_errors = dev->stats.tx_errors;
+
+       return tot;
+}
+
+/* Given src, dst and key, find appropriate for input tunnel. */
+
+static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
+               const struct in6_addr *remote, const struct in6_addr *local,
+               __be32 key, __be16 gre_proto)
+{
+       struct net *net = dev_net(dev);
+       int link = dev->ifindex;
+       unsigned int h0 = HASH_ADDR(remote);
+       unsigned int h1 = HASH_KEY(key);
+       struct ip6_tnl *t, *cand = NULL;
+       struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+       int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
+                      ARPHRD_ETHER : ARPHRD_IP6GRE;
+       int score, cand_score = 4;
+
+       for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) {
+               if (!ipv6_addr_equal(local, &t->parms.laddr) ||
+                   !ipv6_addr_equal(remote, &t->parms.raddr) ||
+                   key != t->parms.i_key ||
+                   !(t->dev->flags & IFF_UP))
+                       continue;
+
+               if (t->dev->type != ARPHRD_IP6GRE &&
+                   t->dev->type != dev_type)
+                       continue;
+
+               score = 0;
+               if (t->parms.link != link)
+                       score |= 1;
+               if (t->dev->type != dev_type)
+                       score |= 2;
+               if (score == 0)
+                       return t;
+
+               if (score < cand_score) {
+                       cand = t;
+                       cand_score = score;
+               }
+       }
+
+       for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) {
+               if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
+                   key != t->parms.i_key ||
+                   !(t->dev->flags & IFF_UP))
+                       continue;
+
+               if (t->dev->type != ARPHRD_IP6GRE &&
+                   t->dev->type != dev_type)
+                       continue;
+
+               score = 0;
+               if (t->parms.link != link)
+                       score |= 1;
+               if (t->dev->type != dev_type)
+                       score |= 2;
+               if (score == 0)
+                       return t;
+
+               if (score < cand_score) {
+                       cand = t;
+                       cand_score = score;
+               }
+       }
+
+       for_each_ip_tunnel_rcu(ign->tunnels_l[h1]) {
+               if ((!ipv6_addr_equal(local, &t->parms.laddr) &&
+                         (!ipv6_addr_equal(local, &t->parms.raddr) ||
+                                !ipv6_addr_is_multicast(local))) ||
+                   key != t->parms.i_key ||
+                   !(t->dev->flags & IFF_UP))
+                       continue;
+
+               if (t->dev->type != ARPHRD_IP6GRE &&
+                   t->dev->type != dev_type)
+                       continue;
+
+               score = 0;
+               if (t->parms.link != link)
+                       score |= 1;
+               if (t->dev->type != dev_type)
+                       score |= 2;
+               if (score == 0)
+                       return t;
+
+               if (score < cand_score) {
+                       cand = t;
+                       cand_score = score;
+               }
+       }
+
+       for_each_ip_tunnel_rcu(ign->tunnels_wc[h1]) {
+               if (t->parms.i_key != key ||
+                   !(t->dev->flags & IFF_UP))
+                       continue;
+
+               if (t->dev->type != ARPHRD_IP6GRE &&
+                   t->dev->type != dev_type)
+                       continue;
+
+               score = 0;
+               if (t->parms.link != link)
+                       score |= 1;
+               if (t->dev->type != dev_type)
+                       score |= 2;
+               if (score == 0)
+                       return t;
+
+               if (score < cand_score) {
+                       cand = t;
+                       cand_score = score;
+               }
+       }
+
+       if (cand != NULL)
+               return cand;
+
+       dev = ign->fb_tunnel_dev;
+       if (dev->flags & IFF_UP)
+               return netdev_priv(dev);
+
+       return NULL;
+}
+
+static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign,
+               const struct __ip6_tnl_parm *p)
+{
+       const struct in6_addr *remote = &p->raddr;
+       const struct in6_addr *local = &p->laddr;
+       unsigned int h = HASH_KEY(p->i_key);
+       int prio = 0;
+
+       if (!ipv6_addr_any(local))
+               prio |= 1;
+       if (!ipv6_addr_any(remote) && !ipv6_addr_is_multicast(remote)) {
+               prio |= 2;
+               h ^= HASH_ADDR(remote);
+       }
+
+       return &ign->tunnels[prio][h];
+}
+
+static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign,
+               const struct ip6_tnl *t)
+{
+       return __ip6gre_bucket(ign, &t->parms);
+}
+
+static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t)
+{
+       struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t);
+
+       rcu_assign_pointer(t->next, rtnl_dereference(*tp));
+       rcu_assign_pointer(*tp, t);
+}
+
+static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t)
+{
+       struct ip6_tnl __rcu **tp;
+       struct ip6_tnl *iter;
+
+       for (tp = ip6gre_bucket(ign, t);
+            (iter = rtnl_dereference(*tp)) != NULL;
+            tp = &iter->next) {
+               if (t == iter) {
+                       rcu_assign_pointer(*tp, t->next);
+                       break;
+               }
+       }
+}
+
+static struct ip6_tnl *ip6gre_tunnel_find(struct net *net,
+                                          const struct __ip6_tnl_parm *parms,
+                                          int type)
+{
+       const struct in6_addr *remote = &parms->raddr;
+       const struct in6_addr *local = &parms->laddr;
+       __be32 key = parms->i_key;
+       int link = parms->link;
+       struct ip6_tnl *t;
+       struct ip6_tnl __rcu **tp;
+       struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+
+       for (tp = __ip6gre_bucket(ign, parms);
+            (t = rtnl_dereference(*tp)) != NULL;
+            tp = &t->next)
+               if (ipv6_addr_equal(local, &t->parms.laddr) &&
+                   ipv6_addr_equal(remote, &t->parms.raddr) &&
+                   key == t->parms.i_key &&
+                   link == t->parms.link &&
+                   type == t->dev->type)
+                       break;
+
+       return t;
+}
+
+static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
+               const struct __ip6_tnl_parm *parms, int create)
+{
+       struct ip6_tnl *t, *nt;
+       struct net_device *dev;
+       char name[IFNAMSIZ];
+       struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+
+       t = ip6gre_tunnel_find(net, parms, ARPHRD_IP6GRE);
+       if (t || !create)
+               return t;
+
+       if (parms->name[0])
+               strlcpy(name, parms->name, IFNAMSIZ);
+       else
+               strcpy(name, "ip6gre%d");
+
+       dev = alloc_netdev(sizeof(*t), name, ip6gre_tunnel_setup);
+       if (!dev)
+               return NULL;
+
+       dev_net_set(dev, net);
+
+       nt = netdev_priv(dev);
+       nt->parms = *parms;
+       dev->rtnl_link_ops = &ip6gre_link_ops;
+
+       nt->dev = dev;
+       ip6gre_tnl_link_config(nt, 1);
+
+       if (register_netdevice(dev) < 0)
+               goto failed_free;
+
+       /* Can use a lockless transmit, unless we generate output sequences */
+       if (!(nt->parms.o_flags & GRE_SEQ))
+               dev->features |= NETIF_F_LLTX;
+
+       dev_hold(dev);
+       ip6gre_tunnel_link(ign, nt);
+       return nt;
+
+failed_free:
+       free_netdev(dev);
+       return NULL;
+}
+
+static void ip6gre_tunnel_uninit(struct net_device *dev)
+{
+       struct net *net = dev_net(dev);
+       struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+
+       ip6gre_tunnel_unlink(ign, netdev_priv(dev));
+       dev_put(dev);
+}
+
+
+static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+               u8 type, u8 code, int offset, __be32 info)
+{
+       const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
+       __be16 *p = (__be16 *)(skb->data + offset);
+       int grehlen = offset + 4;
+       struct ip6_tnl *t;
+       __be16 flags;
+
+       flags = p[0];
+       if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
+               if (flags&(GRE_VERSION|GRE_ROUTING))
+                       return;
+               if (flags&GRE_KEY) {
+                       grehlen += 4;
+                       if (flags&GRE_CSUM)
+                               grehlen += 4;
+               }
+       }
+
+       /* If only 8 bytes returned, keyed message will be dropped here */
+       if (!pskb_may_pull(skb, grehlen))
+               return;
+       ipv6h = (const struct ipv6hdr *)skb->data;
+       p = (__be16 *)(skb->data + offset);
+
+       t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
+                               flags & GRE_KEY ?
+                               *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
+                               p[1]);
+       if (t == NULL)
+               return;
+
+       switch (type) {
+               __u32 teli;
+               struct ipv6_tlv_tnl_enc_lim *tel;
+               __u32 mtu;
+       case ICMPV6_DEST_UNREACH:
+               net_warn_ratelimited("%s: Path to destination invalid or inactive!\n",
+                                    t->parms.name);
+               break;
+       case ICMPV6_TIME_EXCEED:
+               if (code == ICMPV6_EXC_HOPLIMIT) {
+                       net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
+                                            t->parms.name);
+               }
+               break;
+       case ICMPV6_PARAMPROB:
+               teli = 0;
+               if (code == ICMPV6_HDR_FIELD)
+                       teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
+
+               if (teli && teli == info - 2) {
+                       tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
+                       if (tel->encap_limit == 0) {
+                               net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
+                                                    t->parms.name);
+                       }
+               } else {
+                       net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
+                                            t->parms.name);
+               }
+               break;
+       case ICMPV6_PKT_TOOBIG:
+               mtu = info - offset;
+               if (mtu < IPV6_MIN_MTU)
+                       mtu = IPV6_MIN_MTU;
+               t->dev->mtu = mtu;
+               break;
+       }
+
+       if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
+               t->err_count++;
+       else
+               t->err_count = 1;
+       t->err_time = jiffies;
+}
+
+static int ip6gre_rcv(struct sk_buff *skb)
+{
+       const struct ipv6hdr *ipv6h;
+       u8     *h;
+       __be16    flags;
+       __sum16   csum = 0;
+       __be32 key = 0;
+       u32    seqno = 0;
+       struct ip6_tnl *tunnel;
+       int    offset = 4;
+       __be16 gre_proto;
+       int err;
+
+       if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
+               goto drop;
+
+       ipv6h = ipv6_hdr(skb);
+       h = skb->data;
+       flags = *(__be16 *)h;
+
+       if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
+               /* - Version must be 0.
+                  - We do not support routing headers.
+                */
+               if (flags&(GRE_VERSION|GRE_ROUTING))
+                       goto drop;
+
+               if (flags&GRE_CSUM) {
+                       switch (skb->ip_summed) {
+                       case CHECKSUM_COMPLETE:
+                               csum = csum_fold(skb->csum);
+                               if (!csum)
+                                       break;
+                               /* fall through */
+                       case CHECKSUM_NONE:
+                               skb->csum = 0;
+                               csum = __skb_checksum_complete(skb);
+                               skb->ip_summed = CHECKSUM_COMPLETE;
+                       }
+                       offset += 4;
+               }
+               if (flags&GRE_KEY) {
+                       key = *(__be32 *)(h + offset);
+                       offset += 4;
+               }
+               if (flags&GRE_SEQ) {
+                       seqno = ntohl(*(__be32 *)(h + offset));
+                       offset += 4;
+               }
+       }
+
+       gre_proto = *(__be16 *)(h + 2);
+
+       tunnel = ip6gre_tunnel_lookup(skb->dev,
+                                         &ipv6h->saddr, &ipv6h->daddr, key,
+                                         gre_proto);
+       if (tunnel) {
+               struct pcpu_tstats *tstats;
+
+               if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
+                       goto drop;
+
+               if (!ip6_tnl_rcv_ctl(tunnel, &ipv6h->daddr, &ipv6h->saddr)) {
+                       tunnel->dev->stats.rx_dropped++;
+                       goto drop;
+               }
+
+               secpath_reset(skb);
+
+               skb->protocol = gre_proto;
+               /* WCCP version 1 and 2 protocol decoding.
+                * - Change protocol to IP
+                * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
+                */
+               if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
+                       skb->protocol = htons(ETH_P_IP);
+                       if ((*(h + offset) & 0xF0) != 0x40)
+                               offset += 4;
+               }
+
+               skb->mac_header = skb->network_header;
+               __pskb_pull(skb, offset);
+               skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
+               skb->pkt_type = PACKET_HOST;
+
+               if (((flags&GRE_CSUM) && csum) ||
+                   (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
+                       tunnel->dev->stats.rx_crc_errors++;
+                       tunnel->dev->stats.rx_errors++;
+                       goto drop;
+               }
+               if (tunnel->parms.i_flags&GRE_SEQ) {
+                       if (!(flags&GRE_SEQ) ||
+                           (tunnel->i_seqno &&
+                                       (s32)(seqno - tunnel->i_seqno) < 0)) {
+                               tunnel->dev->stats.rx_fifo_errors++;
+                               tunnel->dev->stats.rx_errors++;
+                               goto drop;
+                       }
+                       tunnel->i_seqno = seqno + 1;
+               }
+
+               /* Warning: All skb pointers will be invalidated! */
+               if (tunnel->dev->type == ARPHRD_ETHER) {
+                       if (!pskb_may_pull(skb, ETH_HLEN)) {
+                               tunnel->dev->stats.rx_length_errors++;
+                               tunnel->dev->stats.rx_errors++;
+                               goto drop;
+                       }
+
+                       ipv6h = ipv6_hdr(skb);
+                       skb->protocol = eth_type_trans(skb, tunnel->dev);
+                       skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+               }
+
+               __skb_tunnel_rx(skb, tunnel->dev);
+
+               skb_reset_network_header(skb);
+
+               err = IP6_ECN_decapsulate(ipv6h, skb);
+               if (unlikely(err)) {
+                       if (log_ecn_error)
+                               net_info_ratelimited("non-ECT from %pI6 with dsfield=%#x\n",
+                                                    &ipv6h->saddr,
+                                                    ipv6_get_dsfield(ipv6h));
+                       if (err > 1) {
+                               ++tunnel->dev->stats.rx_frame_errors;
+                               ++tunnel->dev->stats.rx_errors;
+                               goto drop;
+                       }
+               }
+
+               tstats = this_cpu_ptr(tunnel->dev->tstats);
+               u64_stats_update_begin(&tstats->syncp);
+               tstats->rx_packets++;
+               tstats->rx_bytes += skb->len;
+               u64_stats_update_end(&tstats->syncp);
+
+               netif_rx(skb);
+
+               return 0;
+       }
+       icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
+
+drop:
+       kfree_skb(skb);
+       return 0;
+}
+
+struct ipv6_tel_txoption {
+       struct ipv6_txoptions ops;
+       __u8 dst_opt[8];
+};
+
+static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
+{
+       memset(opt, 0, sizeof(struct ipv6_tel_txoption));
+
+       opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
+       opt->dst_opt[3] = 1;
+       opt->dst_opt[4] = encap_limit;
+       opt->dst_opt[5] = IPV6_TLV_PADN;
+       opt->dst_opt[6] = 1;
+
+       opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt;
+       opt->ops.opt_nflen = 8;
+}
+
+static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
+                        struct net_device *dev,
+                        __u8 dsfield,
+                        struct flowi6 *fl6,
+                        int encap_limit,
+                        __u32 *pmtu)
+{
+       struct net *net = dev_net(dev);
+       struct ip6_tnl *tunnel = netdev_priv(dev);
+       struct net_device *tdev;    /* Device to other host */
+       struct ipv6hdr  *ipv6h;     /* Our new IP header */
+       unsigned int max_headroom;  /* The extra header space needed */
+       int    gre_hlen;
+       struct ipv6_tel_txoption opt;
+       int    mtu;
+       struct dst_entry *dst = NULL, *ndst = NULL;
+       struct net_device_stats *stats = &tunnel->dev->stats;
+       int err = -1;
+       u8 proto;
+       int pkt_len;
+       struct sk_buff *new_skb;
+
+       if (dev->type == ARPHRD_ETHER)
+               IPCB(skb)->flags = 0;
+
+       if (dev->header_ops && dev->type == ARPHRD_IP6GRE) {
+               gre_hlen = 0;
+               ipv6h = (struct ipv6hdr *)skb->data;
+               fl6->daddr = ipv6h->daddr;
+       } else {
+               gre_hlen = tunnel->hlen;
+               fl6->daddr = tunnel->parms.raddr;
+       }
+
+       if (!fl6->flowi6_mark)
+               dst = ip6_tnl_dst_check(tunnel);
+
+       if (!dst) {
+               ndst = ip6_route_output(net, NULL, fl6);
+
+               if (ndst->error)
+                       goto tx_err_link_failure;
+               ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0);
+               if (IS_ERR(ndst)) {
+                       err = PTR_ERR(ndst);
+                       ndst = NULL;
+                       goto tx_err_link_failure;
+               }
+               dst = ndst;
+       }
+
+       tdev = dst->dev;
+
+       if (tdev == dev) {
+               stats->collisions++;
+               net_warn_ratelimited("%s: Local routing loop detected!\n",
+                                    tunnel->parms.name);
+               goto tx_err_dst_release;
+       }
+
+       mtu = dst_mtu(dst) - sizeof(*ipv6h);
+       if (encap_limit >= 0) {
+               max_headroom += 8;
+               mtu -= 8;
+       }
+       if (mtu < IPV6_MIN_MTU)
+               mtu = IPV6_MIN_MTU;
+       if (skb_dst(skb))
+               skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+       if (skb->len > mtu) {
+               *pmtu = mtu;
+               err = -EMSGSIZE;
+               goto tx_err_dst_release;
+       }
+
+       if (tunnel->err_count > 0) {
+               if (time_before(jiffies,
+                               tunnel->err_time + IP6TUNNEL_ERR_TIMEO)) {
+                       tunnel->err_count--;
+
+                       dst_link_failure(skb);
+               } else
+                       tunnel->err_count = 0;
+       }
+
+       max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len;
+
+       if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
+           (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
+               new_skb = skb_realloc_headroom(skb, max_headroom);
+               if (max_headroom > dev->needed_headroom)
+                       dev->needed_headroom = max_headroom;
+               if (!new_skb)
+                       goto tx_err_dst_release;
+
+               if (skb->sk)
+                       skb_set_owner_w(new_skb, skb->sk);
+               consume_skb(skb);
+               skb = new_skb;
+       }
+
+       skb_dst_drop(skb);
+
+       if (fl6->flowi6_mark) {
+               skb_dst_set(skb, dst);
+               ndst = NULL;
+       } else {
+               skb_dst_set_noref(skb, dst);
+       }
+
+       skb->transport_header = skb->network_header;
+
+       proto = NEXTHDR_GRE;
+       if (encap_limit >= 0) {
+               init_tel_txopt(&opt, encap_limit);
+               ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
+       }
+
+       skb_push(skb, gre_hlen);
+       skb_reset_network_header(skb);
+
+       /*
+        *      Push down and install the IP header.
+        */
+       ipv6h = ipv6_hdr(skb);
+       *(__be32 *)ipv6h = fl6->flowlabel | htonl(0x60000000);
+       dsfield = INET_ECN_encapsulate(0, dsfield);
+       ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield);
+       ipv6h->hop_limit = tunnel->parms.hop_limit;
+       ipv6h->nexthdr = proto;
+       ipv6h->saddr = fl6->saddr;
+       ipv6h->daddr = fl6->daddr;
+
+       ((__be16 *)(ipv6h + 1))[0] = tunnel->parms.o_flags;
+       ((__be16 *)(ipv6h + 1))[1] = (dev->type == ARPHRD_ETHER) ?
+                                  htons(ETH_P_TEB) : skb->protocol;
+
+       if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
+               __be32 *ptr = (__be32 *)(((u8 *)ipv6h) + tunnel->hlen - 4);
+
+               if (tunnel->parms.o_flags&GRE_SEQ) {
+                       ++tunnel->o_seqno;
+                       *ptr = htonl(tunnel->o_seqno);
+                       ptr--;
+               }
+               if (tunnel->parms.o_flags&GRE_KEY) {
+                       *ptr = tunnel->parms.o_key;
+                       ptr--;
+               }
+               if (tunnel->parms.o_flags&GRE_CSUM) {
+                       *ptr = 0;
+                       *(__sum16 *)ptr = ip_compute_csum((void *)(ipv6h+1),
+                               skb->len - sizeof(struct ipv6hdr));
+               }
+       }
+
+       nf_reset(skb);
+       pkt_len = skb->len;
+       err = ip6_local_out(skb);
+
+       if (net_xmit_eval(err) == 0) {
+               struct pcpu_tstats *tstats = this_cpu_ptr(tunnel->dev->tstats);
+
+               tstats->tx_bytes += pkt_len;
+               tstats->tx_packets++;
+       } else {
+               stats->tx_errors++;
+               stats->tx_aborted_errors++;
+       }
+
+       if (ndst)
+               ip6_tnl_dst_store(tunnel, ndst);
+
+       return 0;
+tx_err_link_failure:
+       stats->tx_carrier_errors++;
+       dst_link_failure(skb);
+tx_err_dst_release:
+       dst_release(ndst);
+       return err;
+}
+
+static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       const struct iphdr  *iph = ip_hdr(skb);
+       int encap_limit = -1;
+       struct flowi6 fl6;
+       __u8 dsfield;
+       __u32 mtu;
+       int err;
+
+       if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+               encap_limit = t->parms.encap_limit;
+
+       memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+       fl6.flowi6_proto = IPPROTO_IPIP;
+
+       dsfield = ipv4_get_dsfield(iph);
+
+       if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
+               fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
+                                         & IPV6_TCLASS_MASK;
+       if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
+               fl6.flowi6_mark = skb->mark;
+
+       err = ip6gre_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
+       if (err != 0) {
+               /* XXX: send ICMP error even if DF is not set. */
+               if (err == -EMSGSIZE)
+                       icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+                                 htonl(mtu));
+               return -1;
+       }
+
+       return 0;
+}
+
+static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+       int encap_limit = -1;
+       __u16 offset;
+       struct flowi6 fl6;
+       __u8 dsfield;
+       __u32 mtu;
+       int err;
+
+       if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
+               return -1;
+
+       offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
+       if (offset > 0) {
+               struct ipv6_tlv_tnl_enc_lim *tel;
+               tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
+               if (tel->encap_limit == 0) {
+                       icmpv6_send(skb, ICMPV6_PARAMPROB,
+                                   ICMPV6_HDR_FIELD, offset + 2);
+                       return -1;
+               }
+               encap_limit = tel->encap_limit - 1;
+       } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+               encap_limit = t->parms.encap_limit;
+
+       memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+       fl6.flowi6_proto = IPPROTO_IPV6;
+
+       dsfield = ipv6_get_dsfield(ipv6h);
+       if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
+               fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
+       if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
+               fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);
+       if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
+               fl6.flowi6_mark = skb->mark;
+
+       err = ip6gre_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
+       if (err != 0) {
+               if (err == -EMSGSIZE)
+                       icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+               return -1;
+       }
+
+       return 0;
+}
+
+/**
+ * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
+ *   @t: the outgoing tunnel device
+ *   @hdr: IPv6 header from the incoming packet
+ *
+ * Description:
+ *   Avoid trivial tunneling loop by checking that tunnel exit-point
+ *   doesn't match source of incoming packet.
+ *
+ * Return:
+ *   1 if conflict,
+ *   0 else
+ **/
+
+static inline bool ip6gre_tnl_addr_conflict(const struct ip6_tnl *t,
+       const struct ipv6hdr *hdr)
+{
+       return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
+}
+
+static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       int encap_limit = -1;
+       struct flowi6 fl6;
+       __u32 mtu;
+       int err;
+
+       if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+               encap_limit = t->parms.encap_limit;
+
+       memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+       fl6.flowi6_proto = skb->protocol;
+
+       err = ip6gre_xmit2(skb, dev, 0, &fl6, encap_limit, &mtu);
+
+       return err;
+}
+
+static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
+       struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct net_device_stats *stats = &t->dev->stats;
+       int ret;
+
+       if (!ip6_tnl_xmit_ctl(t))
+               return -1;
+
+       switch (skb->protocol) {
+       case htons(ETH_P_IP):
+               ret = ip6gre_xmit_ipv4(skb, dev);
+               break;
+       case htons(ETH_P_IPV6):
+               ret = ip6gre_xmit_ipv6(skb, dev);
+               break;
+       default:
+               ret = ip6gre_xmit_other(skb, dev);
+               break;
+       }
+
+       if (ret < 0)
+               goto tx_err;
+
+       return NETDEV_TX_OK;
+
+tx_err:
+       stats->tx_errors++;
+       stats->tx_dropped++;
+       kfree_skb(skb);
+       return NETDEV_TX_OK;
+}
+
+static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
+{
+       struct net_device *dev = t->dev;
+       struct __ip6_tnl_parm *p = &t->parms;
+       struct flowi6 *fl6 = &t->fl.u.ip6;
+       int addend = sizeof(struct ipv6hdr) + 4;
+
+       if (dev->type != ARPHRD_ETHER) {
+               memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
+               memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
+       }
+
+       /* Set up flowi template */
+       fl6->saddr = p->laddr;
+       fl6->daddr = p->raddr;
+       fl6->flowi6_oif = p->link;
+       fl6->flowlabel = 0;
+
+       if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
+               fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
+       if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
+               fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
+
+       p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
+       p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
+
+       if (p->flags&IP6_TNL_F_CAP_XMIT &&
+                       p->flags&IP6_TNL_F_CAP_RCV && dev->type != ARPHRD_ETHER)
+               dev->flags |= IFF_POINTOPOINT;
+       else
+               dev->flags &= ~IFF_POINTOPOINT;
+
+       dev->iflink = p->link;
+
+       /* Precalculate GRE options length */
+       if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
+               if (t->parms.o_flags&GRE_CSUM)
+                       addend += 4;
+               if (t->parms.o_flags&GRE_KEY)
+                       addend += 4;
+               if (t->parms.o_flags&GRE_SEQ)
+                       addend += 4;
+       }
+
+       if (p->flags & IP6_TNL_F_CAP_XMIT) {
+               int strict = (ipv6_addr_type(&p->raddr) &
+                             (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
+
+               struct rt6_info *rt = rt6_lookup(dev_net(dev),
+                                                &p->raddr, &p->laddr,
+                                                p->link, strict);
+
+               if (rt == NULL)
+                       return;
+
+               if (rt->dst.dev) {
+                       dev->hard_header_len = rt->dst.dev->hard_header_len + addend;
+
+                       if (set_mtu) {
+                               dev->mtu = rt->dst.dev->mtu - addend;
+                               if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+                                       dev->mtu -= 8;
+
+                               if (dev->mtu < IPV6_MIN_MTU)
+                                       dev->mtu = IPV6_MIN_MTU;
+                       }
+               }
+               dst_release(&rt->dst);
+       }
+
+       t->hlen = addend;
+}
+
+static int ip6gre_tnl_change(struct ip6_tnl *t,
+       const struct __ip6_tnl_parm *p, int set_mtu)
+{
+       t->parms.laddr = p->laddr;
+       t->parms.raddr = p->raddr;
+       t->parms.flags = p->flags;
+       t->parms.hop_limit = p->hop_limit;
+       t->parms.encap_limit = p->encap_limit;
+       t->parms.flowinfo = p->flowinfo;
+       t->parms.link = p->link;
+       t->parms.proto = p->proto;
+       t->parms.i_key = p->i_key;
+       t->parms.o_key = p->o_key;
+       t->parms.i_flags = p->i_flags;
+       t->parms.o_flags = p->o_flags;
+       ip6_tnl_dst_reset(t);
+       ip6gre_tnl_link_config(t, set_mtu);
+       return 0;
+}
+
+static void ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm *p,
+       const struct ip6_tnl_parm2 *u)
+{
+       p->laddr = u->laddr;
+       p->raddr = u->raddr;
+       p->flags = u->flags;
+       p->hop_limit = u->hop_limit;
+       p->encap_limit = u->encap_limit;
+       p->flowinfo = u->flowinfo;
+       p->link = u->link;
+       p->i_key = u->i_key;
+       p->o_key = u->o_key;
+       p->i_flags = u->i_flags;
+       p->o_flags = u->o_flags;
+       memcpy(p->name, u->name, sizeof(u->name));
+}
+
+static void ip6gre_tnl_parm_to_user(struct ip6_tnl_parm2 *u,
+       const struct __ip6_tnl_parm *p)
+{
+       u->proto = IPPROTO_GRE;
+       u->laddr = p->laddr;
+       u->raddr = p->raddr;
+       u->flags = p->flags;
+       u->hop_limit = p->hop_limit;
+       u->encap_limit = p->encap_limit;
+       u->flowinfo = p->flowinfo;
+       u->link = p->link;
+       u->i_key = p->i_key;
+       u->o_key = p->o_key;
+       u->i_flags = p->i_flags;
+       u->o_flags = p->o_flags;
+       memcpy(u->name, p->name, sizeof(u->name));
+}
+
+static int ip6gre_tunnel_ioctl(struct net_device *dev,
+       struct ifreq *ifr, int cmd)
+{
+       int err = 0;
+       struct ip6_tnl_parm2 p;
+       struct __ip6_tnl_parm p1;
+       struct ip6_tnl *t;
+       struct net *net = dev_net(dev);
+       struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+
+       switch (cmd) {
+       case SIOCGETTUNNEL:
+               t = NULL;
+               if (dev == ign->fb_tunnel_dev) {
+                       if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
+                               err = -EFAULT;
+                               break;
+                       }
+                       ip6gre_tnl_parm_from_user(&p1, &p);
+                       t = ip6gre_tunnel_locate(net, &p1, 0);
+               }
+               if (t == NULL)
+                       t = netdev_priv(dev);
+               ip6gre_tnl_parm_to_user(&p, &t->parms);
+               if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
+                       err = -EFAULT;
+               break;
+
+       case SIOCADDTUNNEL:
+       case SIOCCHGTUNNEL:
+               err = -EPERM;
+               if (!capable(CAP_NET_ADMIN))
+                       goto done;
+
+               err = -EFAULT;
+               if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
+                       goto done;
+
+               err = -EINVAL;
+               if ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING))
+                       goto done;
+
+               if (!(p.i_flags&GRE_KEY))
+                       p.i_key = 0;
+               if (!(p.o_flags&GRE_KEY))
+                       p.o_key = 0;
+
+               ip6gre_tnl_parm_from_user(&p1, &p);
+               t = ip6gre_tunnel_locate(net, &p1, cmd == SIOCADDTUNNEL);
+
+               if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
+                       if (t != NULL) {
+                               if (t->dev != dev) {
+                                       err = -EEXIST;
+                                       break;
+                               }
+                       } else {
+                               t = netdev_priv(dev);
+
+                               ip6gre_tunnel_unlink(ign, t);
+                               synchronize_net();
+                               ip6gre_tnl_change(t, &p1, 1);
+                               ip6gre_tunnel_link(ign, t);
+                               netdev_state_change(dev);
+                       }
+               }
+
+               if (t) {
+                       err = 0;
+
+                       ip6gre_tnl_parm_to_user(&p, &t->parms);
+                       if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
+                               err = -EFAULT;
+               } else
+                       err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
+               break;
+
+       case SIOCDELTUNNEL:
+               err = -EPERM;
+               if (!capable(CAP_NET_ADMIN))
+                       goto done;
+
+               if (dev == ign->fb_tunnel_dev) {
+                       err = -EFAULT;
+                       if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
+                               goto done;
+                       err = -ENOENT;
+                       ip6gre_tnl_parm_from_user(&p1, &p);
+                       t = ip6gre_tunnel_locate(net, &p1, 0);
+                       if (t == NULL)
+                               goto done;
+                       err = -EPERM;
+                       if (t == netdev_priv(ign->fb_tunnel_dev))
+                               goto done;
+                       dev = t->dev;
+               }
+               unregister_netdevice(dev);
+               err = 0;
+               break;
+
+       default:
+               err = -EINVAL;
+       }
+
+done:
+       return err;
+}
+
+static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
+{
+       struct ip6_tnl *tunnel = netdev_priv(dev);
+       if (new_mtu < 68 ||
+           new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen)
+               return -EINVAL;
+       dev->mtu = new_mtu;
+       return 0;
+}
+
+static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
+                       unsigned short type,
+                       const void *daddr, const void *saddr, unsigned int len)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb_push(skb, t->hlen);
+       __be16 *p = (__be16 *)(ipv6h+1);
+
+       *(__be32 *)ipv6h = t->fl.u.ip6.flowlabel | htonl(0x60000000);
+       ipv6h->hop_limit = t->parms.hop_limit;
+       ipv6h->nexthdr = NEXTHDR_GRE;
+       ipv6h->saddr = t->parms.laddr;
+       ipv6h->daddr = t->parms.raddr;
+
+       p[0]            = t->parms.o_flags;
+       p[1]            = htons(type);
+
+       /*
+        *      Set the source hardware address.
+        */
+
+       if (saddr)
+               memcpy(&ipv6h->saddr, saddr, sizeof(struct in6_addr));
+       if (daddr)
+               memcpy(&ipv6h->daddr, daddr, sizeof(struct in6_addr));
+       if (!ipv6_addr_any(&ipv6h->daddr))
+               return t->hlen;
+
+       return -t->hlen;
+}
+
+static const struct header_ops ip6gre_header_ops = {
+       .create = ip6gre_header,
+};
+
+static const struct net_device_ops ip6gre_netdev_ops = {
+       .ndo_init               = ip6gre_tunnel_init,
+       .ndo_uninit             = ip6gre_tunnel_uninit,
+       .ndo_start_xmit         = ip6gre_tunnel_xmit,
+       .ndo_do_ioctl           = ip6gre_tunnel_ioctl,
+       .ndo_change_mtu         = ip6gre_tunnel_change_mtu,
+       .ndo_get_stats64        = ip6gre_get_stats64,
+};
+
+static void ip6gre_dev_free(struct net_device *dev)
+{
+       free_percpu(dev->tstats);
+       free_netdev(dev);
+}
+
+static void ip6gre_tunnel_setup(struct net_device *dev)
+{
+       struct ip6_tnl *t;
+
+       dev->netdev_ops = &ip6gre_netdev_ops;
+       dev->destructor = ip6gre_dev_free;
+
+       dev->type = ARPHRD_IP6GRE;
+       dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr) + 4;
+       dev->mtu = ETH_DATA_LEN - sizeof(struct ipv6hdr) - 4;
+       t = netdev_priv(dev);
+       if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+               dev->mtu -= 8;
+       dev->flags |= IFF_NOARP;
+       dev->iflink = 0;
+       dev->addr_len = sizeof(struct in6_addr);
+       dev->features |= NETIF_F_NETNS_LOCAL;
+       dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+}
+
+static int ip6gre_tunnel_init(struct net_device *dev)
+{
+       struct ip6_tnl *tunnel;
+
+       tunnel = netdev_priv(dev);
+
+       tunnel->dev = dev;
+       strcpy(tunnel->parms.name, dev->name);
+
+       memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr));
+       memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr));
+
+       if (ipv6_addr_any(&tunnel->parms.raddr))
+               dev->header_ops = &ip6gre_header_ops;
+
+       dev->tstats = alloc_percpu(struct pcpu_tstats);
+       if (!dev->tstats)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void ip6gre_fb_tunnel_init(struct net_device *dev)
+{
+       struct ip6_tnl *tunnel = netdev_priv(dev);
+
+       tunnel->dev = dev;
+       strcpy(tunnel->parms.name, dev->name);
+
+       tunnel->hlen            = sizeof(struct ipv6hdr) + 4;
+
+       dev_hold(dev);
+}
+
+
+static struct inet6_protocol ip6gre_protocol __read_mostly = {
+       .handler     = ip6gre_rcv,
+       .err_handler = ip6gre_err,
+       .flags       = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
+};
+
+static void ip6gre_destroy_tunnels(struct ip6gre_net *ign,
+       struct list_head *head)
+{
+       int prio;
+
+       for (prio = 0; prio < 4; prio++) {
+               int h;
+               for (h = 0; h < HASH_SIZE; h++) {
+                       struct ip6_tnl *t;
+
+                       t = rtnl_dereference(ign->tunnels[prio][h]);
+
+                       while (t != NULL) {
+                               unregister_netdevice_queue(t->dev, head);
+                               t = rtnl_dereference(t->next);
+                       }
+               }
+       }
+}
+
+static int __net_init ip6gre_init_net(struct net *net)
+{
+       struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+       int err;
+
+       ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
+                                          ip6gre_tunnel_setup);
+       if (!ign->fb_tunnel_dev) {
+               err = -ENOMEM;
+               goto err_alloc_dev;
+       }
+       dev_net_set(ign->fb_tunnel_dev, net);
+
+       ip6gre_fb_tunnel_init(ign->fb_tunnel_dev);
+       ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops;
+
+       err = register_netdev(ign->fb_tunnel_dev);
+       if (err)
+               goto err_reg_dev;
+
+       rcu_assign_pointer(ign->tunnels_wc[0],
+                          netdev_priv(ign->fb_tunnel_dev));
+       return 0;
+
+err_reg_dev:
+       ip6gre_dev_free(ign->fb_tunnel_dev);
+err_alloc_dev:
+       return err;
+}
+
+static void __net_exit ip6gre_exit_net(struct net *net)
+{
+       struct ip6gre_net *ign;
+       LIST_HEAD(list);
+
+       ign = net_generic(net, ip6gre_net_id);
+       rtnl_lock();
+       ip6gre_destroy_tunnels(ign, &list);
+       unregister_netdevice_many(&list);
+       rtnl_unlock();
+}
+
+static struct pernet_operations ip6gre_net_ops = {
+       .init = ip6gre_init_net,
+       .exit = ip6gre_exit_net,
+       .id   = &ip6gre_net_id,
+       .size = sizeof(struct ip6gre_net),
+};
+
+static int ip6gre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+       __be16 flags;
+
+       if (!data)
+               return 0;
+
+       flags = 0;
+       if (data[IFLA_GRE_IFLAGS])
+               flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
+       if (data[IFLA_GRE_OFLAGS])
+               flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
+       if (flags & (GRE_VERSION|GRE_ROUTING))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int ip6gre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+       struct in6_addr daddr;
+
+       if (tb[IFLA_ADDRESS]) {
+               if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+                       return -EINVAL;
+               if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+                       return -EADDRNOTAVAIL;
+       }
+
+       if (!data)
+               goto out;
+
+       if (data[IFLA_GRE_REMOTE]) {
+               nla_memcpy(&daddr, data[IFLA_GRE_REMOTE], sizeof(struct in6_addr));
+               if (ipv6_addr_any(&daddr))
+                       return -EINVAL;
+       }
+
+out:
+       return ip6gre_tunnel_validate(tb, data);
+}
+
+
+static void ip6gre_netlink_parms(struct nlattr *data[],
+                               struct __ip6_tnl_parm *parms)
+{
+       memset(parms, 0, sizeof(*parms));
+
+       if (!data)
+               return;
+
+       if (data[IFLA_GRE_LINK])
+               parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
+
+       if (data[IFLA_GRE_IFLAGS])
+               parms->i_flags = nla_get_be16(data[IFLA_GRE_IFLAGS]);
+
+       if (data[IFLA_GRE_OFLAGS])
+               parms->o_flags = nla_get_be16(data[IFLA_GRE_OFLAGS]);
+
+       if (data[IFLA_GRE_IKEY])
+               parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
+
+       if (data[IFLA_GRE_OKEY])
+               parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
+
+       if (data[IFLA_GRE_LOCAL])
+               nla_memcpy(&parms->laddr, data[IFLA_GRE_LOCAL], sizeof(struct in6_addr));
+
+       if (data[IFLA_GRE_REMOTE])
+               nla_memcpy(&parms->raddr, data[IFLA_GRE_REMOTE], sizeof(struct in6_addr));
+
+       if (data[IFLA_GRE_TTL])
+               parms->hop_limit = nla_get_u8(data[IFLA_GRE_TTL]);
+
+       if (data[IFLA_GRE_ENCAP_LIMIT])
+               parms->encap_limit = nla_get_u8(data[IFLA_GRE_ENCAP_LIMIT]);
+
+       if (data[IFLA_GRE_FLOWINFO])
+               parms->flowinfo = nla_get_u32(data[IFLA_GRE_FLOWINFO]);
+
+       if (data[IFLA_GRE_FLAGS])
+               parms->flags = nla_get_u32(data[IFLA_GRE_FLAGS]);
+}
+
+static int ip6gre_tap_init(struct net_device *dev)
+{
+       struct ip6_tnl *tunnel;
+
+       tunnel = netdev_priv(dev);
+
+       tunnel->dev = dev;
+       strcpy(tunnel->parms.name, dev->name);
+
+       ip6gre_tnl_link_config(tunnel, 1);
+
+       dev->tstats = alloc_percpu(struct pcpu_tstats);
+       if (!dev->tstats)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static const struct net_device_ops ip6gre_tap_netdev_ops = {
+       .ndo_init = ip6gre_tap_init,
+       .ndo_uninit = ip6gre_tunnel_uninit,
+       .ndo_start_xmit = ip6gre_tunnel_xmit,
+       .ndo_set_mac_address = eth_mac_addr,
+       .ndo_validate_addr = eth_validate_addr,
+       .ndo_change_mtu = ip6gre_tunnel_change_mtu,
+       .ndo_get_stats64 = ip6gre_get_stats64,
+};
+
+static void ip6gre_tap_setup(struct net_device *dev)
+{
+
+       ether_setup(dev);
+
+       dev->netdev_ops = &ip6gre_tap_netdev_ops;
+       dev->destructor = ip6gre_dev_free;
+
+       dev->iflink = 0;
+       dev->features |= NETIF_F_NETNS_LOCAL;
+}
+
+static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
+       struct nlattr *tb[], struct nlattr *data[])
+{
+       struct ip6_tnl *nt;
+       struct net *net = dev_net(dev);
+       struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+       int err;
+
+       nt = netdev_priv(dev);
+       ip6gre_netlink_parms(data, &nt->parms);
+
+       if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
+               return -EEXIST;
+
+       if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
+               eth_hw_addr_random(dev);
+
+       nt->dev = dev;
+       ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
+
+       /* Can use a lockless transmit, unless we generate output sequences */
+       if (!(nt->parms.o_flags & GRE_SEQ))
+               dev->features |= NETIF_F_LLTX;
+
+       err = register_netdevice(dev);
+       if (err)
+               goto out;
+
+       dev_hold(dev);
+       ip6gre_tunnel_link(ign, nt);
+
+out:
+       return err;
+}
+
+static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
+                           struct nlattr *data[])
+{
+       struct ip6_tnl *t, *nt;
+       struct net *net = dev_net(dev);
+       struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+       struct __ip6_tnl_parm p;
+
+       if (dev == ign->fb_tunnel_dev)
+               return -EINVAL;
+
+       nt = netdev_priv(dev);
+       ip6gre_netlink_parms(data, &p);
+
+       t = ip6gre_tunnel_locate(net, &p, 0);
+
+       if (t) {
+               if (t->dev != dev)
+                       return -EEXIST;
+       } else {
+               t = nt;
+
+               ip6gre_tunnel_unlink(ign, t);
+               ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
+               ip6gre_tunnel_link(ign, t);
+               netdev_state_change(dev);
+       }
+
+       return 0;
+}
+
+static size_t ip6gre_get_size(const struct net_device *dev)
+{
+       return
+               /* IFLA_GRE_LINK */
+               nla_total_size(4) +
+               /* IFLA_GRE_IFLAGS */
+               nla_total_size(2) +
+               /* IFLA_GRE_OFLAGS */
+               nla_total_size(2) +
+               /* IFLA_GRE_IKEY */
+               nla_total_size(4) +
+               /* IFLA_GRE_OKEY */
+               nla_total_size(4) +
+               /* IFLA_GRE_LOCAL */
+               nla_total_size(4) +
+               /* IFLA_GRE_REMOTE */
+               nla_total_size(4) +
+               /* IFLA_GRE_TTL */
+               nla_total_size(1) +
+               /* IFLA_GRE_TOS */
+               nla_total_size(1) +
+               /* IFLA_GRE_ENCAP_LIMIT */
+               nla_total_size(1) +
+               /* IFLA_GRE_FLOWINFO */
+               nla_total_size(4) +
+               /* IFLA_GRE_FLAGS */
+               nla_total_size(4) +
+               0;
+}
+
+static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct __ip6_tnl_parm *p = &t->parms;
+
+       if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
+           nla_put_be16(skb, IFLA_GRE_IFLAGS, p->i_flags) ||
+           nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) ||
+           nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
+           nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
+           nla_put(skb, IFLA_GRE_LOCAL, sizeof(struct in6_addr), &p->raddr) ||
+           nla_put(skb, IFLA_GRE_REMOTE, sizeof(struct in6_addr), &p->laddr) ||
+           nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) ||
+           /*nla_put_u8(skb, IFLA_GRE_TOS, t->priority) ||*/
+           nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
+           nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
+           nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
+       [IFLA_GRE_LINK]        = { .type = NLA_U32 },
+       [IFLA_GRE_IFLAGS]      = { .type = NLA_U16 },
+       [IFLA_GRE_OFLAGS]      = { .type = NLA_U16 },
+       [IFLA_GRE_IKEY]        = { .type = NLA_U32 },
+       [IFLA_GRE_OKEY]        = { .type = NLA_U32 },
+       [IFLA_GRE_LOCAL]       = { .len = FIELD_SIZEOF(struct ipv6hdr, saddr) },
+       [IFLA_GRE_REMOTE]      = { .len = FIELD_SIZEOF(struct ipv6hdr, daddr) },
+       [IFLA_GRE_TTL]         = { .type = NLA_U8 },
+       [IFLA_GRE_ENCAP_LIMIT] = { .type = NLA_U8 },
+       [IFLA_GRE_FLOWINFO]    = { .type = NLA_U32 },
+       [IFLA_GRE_FLAGS]       = { .type = NLA_U32 },
+};
+
+static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
+       .kind           = "ip6gre",
+       .maxtype        = IFLA_GRE_MAX,
+       .policy         = ip6gre_policy,
+       .priv_size      = sizeof(struct ip6_tnl),
+       .setup          = ip6gre_tunnel_setup,
+       .validate       = ip6gre_tunnel_validate,
+       .newlink        = ip6gre_newlink,
+       .changelink     = ip6gre_changelink,
+       .get_size       = ip6gre_get_size,
+       .fill_info      = ip6gre_fill_info,
+};
+
+static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
+       .kind           = "ip6gretap",
+       .maxtype        = IFLA_GRE_MAX,
+       .policy         = ip6gre_policy,
+       .priv_size      = sizeof(struct ip6_tnl),
+       .setup          = ip6gre_tap_setup,
+       .validate       = ip6gre_tap_validate,
+       .newlink        = ip6gre_newlink,
+       .changelink     = ip6gre_changelink,
+       .get_size       = ip6gre_get_size,
+       .fill_info      = ip6gre_fill_info,
+};
+
+/*
+ *     And now the modules code and kernel interface.
+ */
+
+static int __init ip6gre_init(void)
+{
+       int err;
+
+       pr_info("GRE over IPv6 tunneling driver\n");
+
+       err = register_pernet_device(&ip6gre_net_ops);
+       if (err < 0)
+               return err;
+
+       err = inet6_add_protocol(&ip6gre_protocol, IPPROTO_GRE);
+       if (err < 0) {
+               pr_info("%s: can't add protocol\n", __func__);
+               goto add_proto_failed;
+       }
+
+       err = rtnl_link_register(&ip6gre_link_ops);
+       if (err < 0)
+               goto rtnl_link_failed;
+
+       err = rtnl_link_register(&ip6gre_tap_ops);
+       if (err < 0)
+               goto tap_ops_failed;
+
+out:
+       return err;
+
+tap_ops_failed:
+       rtnl_link_unregister(&ip6gre_link_ops);
+rtnl_link_failed:
+       inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
+add_proto_failed:
+       unregister_pernet_device(&ip6gre_net_ops);
+       goto out;
+}
+
+static void __exit ip6gre_fini(void)
+{
+       rtnl_link_unregister(&ip6gre_tap_ops);
+       rtnl_link_unregister(&ip6gre_link_ops);
+       inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
+       unregister_pernet_device(&ip6gre_net_ops);
+}
+
+module_init(ip6gre_init);
+module_exit(ip6gre_fini);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
+MODULE_DESCRIPTION("GRE over IPv6 tunneling device");
+MODULE_ALIAS_RTNL_LINK("ip6gre");
+MODULE_ALIAS_NETDEV("ip6gre0");
index 5b2d63ed793e1ffe7a568e9c581ccf7bab9057a5..aece3e792f84ad4cfcc88ada01817c70242b396d 100644 (file)
@@ -123,16 +123,11 @@ static int ip6_finish_output2(struct sk_buff *skb)
                                skb->len);
        }
 
-       rcu_read_lock();
        rt = (struct rt6_info *) dst;
        neigh = rt->n;
-       if (neigh) {
-               int res = dst_neigh_output(dst, neigh, skb);
+       if (neigh)
+               return dst_neigh_output(dst, neigh, skb);
 
-               rcu_read_unlock();
-               return res;
-       }
-       rcu_read_unlock();
        IP6_INC_STATS_BH(dev_net(dst->dev),
                         ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
        kfree_skb(skb);
@@ -493,7 +488,8 @@ int ip6_forward(struct sk_buff *skb)
        if (mtu < IPV6_MIN_MTU)
                mtu = IPV6_MIN_MTU;
 
-       if (skb->len > mtu && !skb_is_gso(skb)) {
+       if ((!skb->local_df && skb->len > mtu && !skb_is_gso(skb)) ||
+           (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)) {
                /* Again, force OUTPUT device used as source address */
                skb->dev = dst->dev;
                icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
@@ -636,7 +632,9 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
        /* We must not fragment if the socket is set to force MTU discovery
         * or if the skb it not generated by a local socket.
         */
-       if (unlikely(!skb->local_df && skb->len > mtu)) {
+       if (unlikely(!skb->local_df && skb->len > mtu) ||
+                    (IP6CB(skb)->frag_max_size &&
+                     IP6CB(skb)->frag_max_size > mtu)) {
                if (skb->sk && dst_allfrag(skb_dst(skb)))
                        sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
 
@@ -980,7 +978,6 @@ static int ip6_dst_lookup_tail(struct sock *sk,
         * dst entry and replace it instead with the
         * dst entry of the nexthop router
         */
-       rcu_read_lock();
        rt = (struct rt6_info *) *dst;
        n = rt->n;
        if (n && !(n->nud_state & NUD_VALID)) {
@@ -988,7 +985,6 @@ static int ip6_dst_lookup_tail(struct sock *sk,
                struct flowi6 fl_gw6;
                int redirect;
 
-               rcu_read_unlock();
                ifp = ipv6_get_ifaddr(net, &fl6->saddr,
                                      (*dst)->dev, 1);
 
@@ -1008,8 +1004,6 @@ static int ip6_dst_lookup_tail(struct sock *sk,
                        if ((err = (*dst)->error))
                                goto out_err_release;
                }
-       } else {
-               rcu_read_unlock();
        }
 #endif
 
@@ -1285,8 +1279,6 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
                if (dst_allfrag(rt->dst.path))
                        cork->flags |= IPCORK_ALLFRAG;
                cork->length = 0;
-               sk->sk_sndmsg_page = NULL;
-               sk->sk_sndmsg_off = 0;
                exthdrlen = (opt ? opt->opt_flen : 0) - rt->rt6i_nfheader_len;
                length += exthdrlen;
                transhdrlen += exthdrlen;
@@ -1510,48 +1502,31 @@ alloc_new_skb:
                        }
                } else {
                        int i = skb_shinfo(skb)->nr_frags;
-                       skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
-                       struct page *page = sk->sk_sndmsg_page;
-                       int off = sk->sk_sndmsg_off;
-                       unsigned int left;
-
-                       if (page && (left = PAGE_SIZE - off) > 0) {
-                               if (copy >= left)
-                                       copy = left;
-                               if (page != skb_frag_page(frag)) {
-                                       if (i == MAX_SKB_FRAGS) {
-                                               err = -EMSGSIZE;
-                                               goto error;
-                                       }
-                                       skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
-                                       skb_frag_ref(skb, i);
-                                       frag = &skb_shinfo(skb)->frags[i];
-                               }
-                       } else if(i < MAX_SKB_FRAGS) {
-                               if (copy > PAGE_SIZE)
-                                       copy = PAGE_SIZE;
-                               page = alloc_pages(sk->sk_allocation, 0);
-                               if (page == NULL) {
-                                       err = -ENOMEM;
-                                       goto error;
-                               }
-                               sk->sk_sndmsg_page = page;
-                               sk->sk_sndmsg_off = 0;
+                       struct page_frag *pfrag = sk_page_frag(sk);
 
-                               skb_fill_page_desc(skb, i, page, 0, 0);
-                               frag = &skb_shinfo(skb)->frags[i];
-                       } else {
-                               err = -EMSGSIZE;
+                       err = -ENOMEM;
+                       if (!sk_page_frag_refill(sk, pfrag))
                                goto error;
+
+                       if (!skb_can_coalesce(skb, i, pfrag->page,
+                                             pfrag->offset)) {
+                               err = -EMSGSIZE;
+                               if (i == MAX_SKB_FRAGS)
+                                       goto error;
+
+                               __skb_fill_page_desc(skb, i, pfrag->page,
+                                                    pfrag->offset, 0);
+                               skb_shinfo(skb)->nr_frags = ++i;
+                               get_page(pfrag->page);
                        }
+                       copy = min_t(int, copy, pfrag->size - pfrag->offset);
                        if (getfrag(from,
-                                   skb_frag_address(frag) + skb_frag_size(frag),
-                                   offset, copy, skb->len, skb) < 0) {
-                               err = -EFAULT;
-                               goto error;
-                       }
-                       sk->sk_sndmsg_off += copy;
-                       skb_frag_size_add(frag, copy);
+                                   page_address(pfrag->page) + pfrag->offset,
+                                   offset, copy, skb->len, skb) < 0)
+                               goto error_efault;
+
+                       pfrag->offset += copy;
+                       skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
                        skb->len += copy;
                        skb->data_len += copy;
                        skb->truesize += copy;
@@ -1560,7 +1535,11 @@ alloc_new_skb:
                offset += copy;
                length -= copy;
        }
+
        return 0;
+
+error_efault:
+       err = -EFAULT;
 error:
        cork->length -= length;
        IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
index 9a1d5fe6aef8f229e22c8b2e8f3cef1663be09ce..cb7e2ded6f08cce17f8fb11a7e7e119e8564d661 100644 (file)
@@ -126,7 +126,7 @@ static struct net_device_stats *ip6_get_stats(struct net_device *dev)
  * Locking : hash tables are protected by RCU and RTNL
  */
 
-static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
+struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
 {
        struct dst_entry *dst = t->dst_cache;
 
@@ -139,20 +139,23 @@ static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
 
        return dst;
 }
+EXPORT_SYMBOL_GPL(ip6_tnl_dst_check);
 
-static inline void ip6_tnl_dst_reset(struct ip6_tnl *t)
+void ip6_tnl_dst_reset(struct ip6_tnl *t)
 {
        dst_release(t->dst_cache);
        t->dst_cache = NULL;
 }
+EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset);
 
-static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
+void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
 {
        struct rt6_info *rt = (struct rt6_info *) dst;
        t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
        dst_release(t->dst_cache);
        t->dst_cache = dst;
 }
+EXPORT_SYMBOL_GPL(ip6_tnl_dst_store);
 
 /**
  * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
@@ -200,7 +203,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
  **/
 
 static struct ip6_tnl __rcu **
-ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct ip6_tnl_parm *p)
+ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p)
 {
        const struct in6_addr *remote = &p->raddr;
        const struct in6_addr *local = &p->laddr;
@@ -267,7 +270,7 @@ static void ip6_dev_free(struct net_device *dev)
  *   created tunnel or NULL
  **/
 
-static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p)
+static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
 {
        struct net_device *dev;
        struct ip6_tnl *t;
@@ -322,7 +325,7 @@ failed:
  **/
 
 static struct ip6_tnl *ip6_tnl_locate(struct net *net,
-               struct ip6_tnl_parm *p, int create)
+               struct __ip6_tnl_parm *p, int create)
 {
        const struct in6_addr *remote = &p->raddr;
        const struct in6_addr *local = &p->laddr;
@@ -374,8 +377,7 @@ ip6_tnl_dev_uninit(struct net_device *dev)
  *   else index to encapsulation limit
  **/
 
-static __u16
-parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw)
+__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
 {
        const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
        __u8 nexthdr = ipv6h->nexthdr;
@@ -425,6 +427,7 @@ parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw)
        }
        return 0;
 }
+EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim);
 
 /**
  * ip6_tnl_err - tunnel error handler
@@ -480,7 +483,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
        case ICMPV6_PARAMPROB:
                teli = 0;
                if ((*code) == ICMPV6_HDR_FIELD)
-                       teli = parse_tlv_tnl_enc_lim(skb, skb->data);
+                       teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
 
                if (teli && teli == *info - 2) {
                        tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
@@ -693,11 +696,11 @@ static void ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
                IP6_ECN_set_ce(ipv6_hdr(skb));
 }
 
-static __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
+__u32 ip6_tnl_get_cap(struct ip6_tnl *t,
                             const struct in6_addr *laddr,
                             const struct in6_addr *raddr)
 {
-       struct ip6_tnl_parm *p = &t->parms;
+       struct __ip6_tnl_parm *p = &t->parms;
        int ltype = ipv6_addr_type(laddr);
        int rtype = ipv6_addr_type(raddr);
        __u32 flags = 0;
@@ -715,13 +718,14 @@ static __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
        }
        return flags;
 }
+EXPORT_SYMBOL(ip6_tnl_get_cap);
 
 /* called with rcu_read_lock() */
-static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
+int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
                                  const struct in6_addr *laddr,
                                  const struct in6_addr *raddr)
 {
-       struct ip6_tnl_parm *p = &t->parms;
+       struct __ip6_tnl_parm *p = &t->parms;
        int ret = 0;
        struct net *net = dev_net(t->dev);
 
@@ -740,6 +744,7 @@ static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
        }
        return ret;
 }
+EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
 
 /**
  * ip6_tnl_rcv - decapsulate IPv6 packet and retransmit it locally
@@ -859,9 +864,9 @@ ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
        return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
 }
 
-static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
+int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
 {
-       struct ip6_tnl_parm *p = &t->parms;
+       struct __ip6_tnl_parm *p = &t->parms;
        int ret = 0;
        struct net *net = dev_net(t->dev);
 
@@ -885,6 +890,8 @@ static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
        }
        return ret;
 }
+EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl);
+
 /**
  * ip6_tnl_xmit2 - encapsulate packet and send
  *   @skb: the outgoing socket buffer
@@ -1085,7 +1092,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
            !ip6_tnl_xmit_ctl(t) || ip6_tnl_addr_conflict(t, ipv6h))
                return -1;
 
-       offset = parse_tlv_tnl_enc_lim(skb, skb_network_header(skb));
+       offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
        if (offset > 0) {
                struct ipv6_tlv_tnl_enc_lim *tel;
                tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
@@ -1152,7 +1159,7 @@ tx_err:
 static void ip6_tnl_link_config(struct ip6_tnl *t)
 {
        struct net_device *dev = t->dev;
-       struct ip6_tnl_parm *p = &t->parms;
+       struct __ip6_tnl_parm *p = &t->parms;
        struct flowi6 *fl6 = &t->fl.u.ip6;
 
        memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
@@ -1215,7 +1222,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
  **/
 
 static int
-ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p)
+ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
 {
        t->parms.laddr = p->laddr;
        t->parms.raddr = p->raddr;
@@ -1230,6 +1237,34 @@ ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p)
        return 0;
 }
 
+static void
+ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u)
+{
+       p->laddr = u->laddr;
+       p->raddr = u->raddr;
+       p->flags = u->flags;
+       p->hop_limit = u->hop_limit;
+       p->encap_limit = u->encap_limit;
+       p->flowinfo = u->flowinfo;
+       p->link = u->link;
+       p->proto = u->proto;
+       memcpy(p->name, u->name, sizeof(u->name));
+}
+
+static void
+ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p)
+{
+       u->laddr = p->laddr;
+       u->raddr = p->raddr;
+       u->flags = p->flags;
+       u->hop_limit = p->hop_limit;
+       u->encap_limit = p->encap_limit;
+       u->flowinfo = p->flowinfo;
+       u->link = p->link;
+       u->proto = p->proto;
+       memcpy(u->name, p->name, sizeof(u->name));
+}
+
 /**
  * ip6_tnl_ioctl - configure ipv6 tunnels from userspace
  *   @dev: virtual device associated with tunnel
@@ -1263,6 +1298,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
        int err = 0;
        struct ip6_tnl_parm p;
+       struct __ip6_tnl_parm p1;
        struct ip6_tnl *t = NULL;
        struct net *net = dev_net(dev);
        struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
@@ -1274,11 +1310,14 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                                err = -EFAULT;
                                break;
                        }
-                       t = ip6_tnl_locate(net, &p, 0);
+                       ip6_tnl_parm_from_user(&p1, &p);
+                       t = ip6_tnl_locate(net, &p1, 0);
+               } else {
+                       memset(&p, 0, sizeof(p));
                }
                if (t == NULL)
                        t = netdev_priv(dev);
-               memcpy(&p, &t->parms, sizeof (p));
+               ip6_tnl_parm_to_user(&p, &t->parms);
                if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) {
                        err = -EFAULT;
                }
@@ -1295,7 +1334,8 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
                    p.proto != 0)
                        break;
-               t = ip6_tnl_locate(net, &p, cmd == SIOCADDTUNNEL);
+               ip6_tnl_parm_from_user(&p1, &p);
+               t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
                if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) {
                        if (t != NULL) {
                                if (t->dev != dev) {
@@ -1307,13 +1347,14 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 
                        ip6_tnl_unlink(ip6n, t);
                        synchronize_net();
-                       err = ip6_tnl_change(t, &p);
+                       err = ip6_tnl_change(t, &p1);
                        ip6_tnl_link(ip6n, t);
                        netdev_state_change(dev);
                }
                if (t) {
                        err = 0;
-                       if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof (p)))
+                       ip6_tnl_parm_to_user(&p, &t->parms);
+                       if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
                                err = -EFAULT;
 
                } else
@@ -1329,7 +1370,9 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                        if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p)))
                                break;
                        err = -ENOENT;
-                       if ((t = ip6_tnl_locate(net, &p, 0)) == NULL)
+                       ip6_tnl_parm_from_user(&p1, &p);
+                       t = ip6_tnl_locate(net, &p1, 0);
+                       if (t == NULL)
                                break;
                        err = -EPERM;
                        if (t->dev == ip6n->fb_tnl_dev)
index 4532973f0dd4fc8fd3467a81cc0a2ce0581c72f1..08ea3f0b6e55f9557ec1e919e77f1496ccb1fdf1 100644 (file)
@@ -838,7 +838,7 @@ static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c)
                        nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
                        skb_trim(skb, nlh->nlmsg_len);
                        ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -ETIMEDOUT;
-                       rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
+                       rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
                } else
                        kfree_skb(skb);
        }
@@ -1052,7 +1052,7 @@ static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
                                skb_trim(skb, nlh->nlmsg_len);
                                ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE;
                        }
-                       rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
+                       rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
                } else
                        ip6_mr_forward(net, mrt, skb, c);
        }
@@ -2202,12 +2202,12 @@ int ip6mr_get_route(struct net *net,
 }
 
 static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
-                            u32 pid, u32 seq, struct mfc6_cache *c)
+                            u32 portid, u32 seq, struct mfc6_cache *c)
 {
        struct nlmsghdr *nlh;
        struct rtmsg *rtm;
 
-       nlh = nlmsg_put(skb, pid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI);
+       nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -2260,7 +2260,7 @@ static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
                                if (e < s_e)
                                        goto next_entry;
                                if (ip6mr_fill_mroute(mrt, skb,
-                                                     NETLINK_CB(cb->skb).pid,
+                                                     NETLINK_CB(cb->skb).portid,
                                                      cb->nlh->nlmsg_seq,
                                                      mfc) < 0)
                                        goto done;
index db31561cc8df31afbf7f18fd4dd849811be5a8dd..429089cb073dd1ae4f565a6b5382d6b12b9354ce 100644 (file)
@@ -15,6 +15,7 @@ int ip6_route_me_harder(struct sk_buff *skb)
 {
        struct net *net = dev_net(skb_dst(skb)->dev);
        const struct ipv6hdr *iph = ipv6_hdr(skb);
+       unsigned int hh_len;
        struct dst_entry *dst;
        struct flowi6 fl6 = {
                .flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
@@ -47,6 +48,13 @@ int ip6_route_me_harder(struct sk_buff *skb)
        }
 #endif
 
+       /* Change in oif may mean change in hh_len. */
+       hh_len = skb_dst(skb)->dev->hard_header_len;
+       if (skb_headroom(skb) < hh_len &&
+           pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)),
+                            0, GFP_ATOMIC))
+               return -1;
+
        return 0;
 }
 EXPORT_SYMBOL(ip6_route_me_harder);
index 10135342799e293bfa62e87ac01f7e1ec81c8c65..c72532a60d887aeaaf31187f470622de24aabda8 100644 (file)
@@ -181,9 +181,44 @@ config IP6_NF_SECURITY
        help
          This option adds a `security' table to iptables, for use
          with Mandatory Access Control (MAC) policy.
-        
+
          If unsure, say N.
 
+config NF_NAT_IPV6
+       tristate "IPv6 NAT"
+       depends on NF_CONNTRACK_IPV6
+       depends on NETFILTER_ADVANCED
+       select NF_NAT
+       help
+         The IPv6 NAT option allows masquerading, port forwarding and other
+         forms of full Network Address Port Translation. It is controlled by
+         the `nat' table in ip6tables, see the man page for ip6tables(8).
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+if NF_NAT_IPV6
+
+config IP6_NF_TARGET_MASQUERADE
+       tristate "MASQUERADE target support"
+       help
+         Masquerading is a special case of NAT: all outgoing connections are
+         changed to seem to come from a particular interface's address, and
+         if the interface goes down, those connections are lost.  This is
+         only useful for dialup accounts with dynamic IP address (ie. your IP
+         address will be different on next dialup).
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config IP6_NF_TARGET_NPT
+       tristate "NPT (Network Prefix translation) target support"
+       help
+         This option adds the `SNPT' and `DNPT' target, which perform
+         stateless IPv6-to-IPv6 Network Prefix Translation per RFC 6296.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+endif # NF_NAT_IPV6
+
 endif # IP6_NF_IPTABLES
 
 endmenu
index 534d3f216f7b4fa14475e438cf3ebf2fbd4d1552..2d11fcc2cf3c5c3906b851e9c5daad69de26e0ed 100644 (file)
@@ -8,6 +8,7 @@ obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
 obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
 obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
 obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o
+obj-$(CONFIG_NF_NAT_IPV6) += ip6table_nat.o
 
 # objects for l3 independent conntrack
 nf_conntrack_ipv6-y  :=  nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o
@@ -15,6 +16,9 @@ nf_conntrack_ipv6-y  :=  nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o
 # l3 independent conntrack
 obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o nf_defrag_ipv6.o
 
+nf_nat_ipv6-y          := nf_nat_l3proto_ipv6.o nf_nat_proto_icmpv6.o
+obj-$(CONFIG_NF_NAT_IPV6) += nf_nat_ipv6.o
+
 # defrag
 nf_defrag_ipv6-y := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o
 obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o
@@ -30,4 +34,6 @@ obj-$(CONFIG_IP6_NF_MATCH_RPFILTER) += ip6t_rpfilter.o
 obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o
 
 # targets
+obj-$(CONFIG_IP6_NF_TARGET_MASQUERADE) += ip6t_MASQUERADE.o
+obj-$(CONFIG_IP6_NF_TARGET_NPT) += ip6t_NPT.o
 obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o
diff --git a/net/ipv6/netfilter/ip6t_MASQUERADE.c b/net/ipv6/netfilter/ip6t_MASQUERADE.c
new file mode 100644 (file)
index 0000000..60e9053
--- /dev/null
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based on Rusty Russell's IPv6 MASQUERADE target. Development of IPv6
+ * NAT funded by Astaro.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/ipv6.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter/x_tables.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/addrconf.h>
+#include <net/ipv6.h>
+
+static unsigned int
+masquerade_tg6(struct sk_buff *skb, const struct xt_action_param *par)
+{
+       const struct nf_nat_range *range = par->targinfo;
+       enum ip_conntrack_info ctinfo;
+       struct in6_addr src;
+       struct nf_conn *ct;
+       struct nf_nat_range newrange;
+
+       ct = nf_ct_get(skb, &ctinfo);
+       NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
+                           ctinfo == IP_CT_RELATED_REPLY));
+
+       if (ipv6_dev_get_saddr(dev_net(par->out), par->out,
+                              &ipv6_hdr(skb)->daddr, 0, &src) < 0)
+               return NF_DROP;
+
+       nfct_nat(ct)->masq_index = par->out->ifindex;
+
+       newrange.flags          = range->flags | NF_NAT_RANGE_MAP_IPS;
+       newrange.min_addr.in6   = src;
+       newrange.max_addr.in6   = src;
+       newrange.min_proto      = range->min_proto;
+       newrange.max_proto      = range->max_proto;
+
+       return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC);
+}
+
+static int masquerade_tg6_checkentry(const struct xt_tgchk_param *par)
+{
+       const struct nf_nat_range *range = par->targinfo;
+
+       if (range->flags & NF_NAT_RANGE_MAP_IPS)
+               return -EINVAL;
+       return 0;
+}
+
+static int device_cmp(struct nf_conn *ct, void *ifindex)
+{
+       const struct nf_conn_nat *nat = nfct_nat(ct);
+
+       if (!nat)
+               return 0;
+       if (nf_ct_l3num(ct) != NFPROTO_IPV6)
+               return 0;
+       return nat->masq_index == (int)(long)ifindex;
+}
+
+static int masq_device_event(struct notifier_block *this,
+                            unsigned long event, void *ptr)
+{
+       const struct net_device *dev = ptr;
+       struct net *net = dev_net(dev);
+
+       if (event == NETDEV_DOWN)
+               nf_ct_iterate_cleanup(net, device_cmp,
+                                     (void *)(long)dev->ifindex);
+
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block masq_dev_notifier = {
+       .notifier_call  = masq_device_event,
+};
+
+static int masq_inet_event(struct notifier_block *this,
+                          unsigned long event, void *ptr)
+{
+       struct inet6_ifaddr *ifa = ptr;
+
+       return masq_device_event(this, event, ifa->idev->dev);
+}
+
+static struct notifier_block masq_inet_notifier = {
+       .notifier_call  = masq_inet_event,
+};
+
+static struct xt_target masquerade_tg6_reg __read_mostly = {
+       .name           = "MASQUERADE",
+       .family         = NFPROTO_IPV6,
+       .checkentry     = masquerade_tg6_checkentry,
+       .target         = masquerade_tg6,
+       .targetsize     = sizeof(struct nf_nat_range),
+       .table          = "nat",
+       .hooks          = 1 << NF_INET_POST_ROUTING,
+       .me             = THIS_MODULE,
+};
+
+static int __init masquerade_tg6_init(void)
+{
+       int err;
+
+       err = xt_register_target(&masquerade_tg6_reg);
+       if (err == 0) {
+               register_netdevice_notifier(&masq_dev_notifier);
+               register_inet6addr_notifier(&masq_inet_notifier);
+       }
+
+       return err;
+}
+static void __exit masquerade_tg6_exit(void)
+{
+       unregister_inet6addr_notifier(&masq_inet_notifier);
+       unregister_netdevice_notifier(&masq_dev_notifier);
+       xt_unregister_target(&masquerade_tg6_reg);
+}
+
+module_init(masquerade_tg6_init);
+module_exit(masquerade_tg6_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("Xtables: automatic address SNAT");
diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c
new file mode 100644 (file)
index 0000000..e948691
--- /dev/null
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2011, 2012 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ipv6.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter_ipv6/ip6t_NPT.h>
+#include <linux/netfilter/x_tables.h>
+
+static __sum16 csum16_complement(__sum16 a)
+{
+       return (__force __sum16)(0xffff - (__force u16)a);
+}
+
+static __sum16 csum16_add(__sum16 a, __sum16 b)
+{
+       u16 sum;
+
+       sum = (__force u16)a + (__force u16)b;
+       sum += (__force u16)a < (__force u16)b;
+       return (__force __sum16)sum;
+}
+
+static __sum16 csum16_sub(__sum16 a, __sum16 b)
+{
+       return csum16_add(a, csum16_complement(b));
+}
+
+static int ip6t_npt_checkentry(const struct xt_tgchk_param *par)
+{
+       struct ip6t_npt_tginfo *npt = par->targinfo;
+       __sum16 src_sum = 0, dst_sum = 0;
+       unsigned int i;
+
+       if (npt->src_pfx_len > 64 || npt->dst_pfx_len > 64)
+               return -EINVAL;
+
+       for (i = 0; i < ARRAY_SIZE(npt->src_pfx.in6.s6_addr16); i++) {
+               src_sum = csum16_add(src_sum,
+                               (__force __sum16)npt->src_pfx.in6.s6_addr16[i]);
+               dst_sum = csum16_add(dst_sum,
+                               (__force __sum16)npt->dst_pfx.in6.s6_addr16[i]);
+       }
+
+       npt->adjustment = csum16_sub(src_sum, dst_sum);
+       return 0;
+}
+
+static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt,
+                            struct in6_addr *addr)
+{
+       unsigned int pfx_len;
+       unsigned int i, idx;
+       __be32 mask;
+       __sum16 sum;
+
+       pfx_len = max(npt->src_pfx_len, npt->dst_pfx_len);
+       for (i = 0; i < pfx_len; i += 32) {
+               if (pfx_len - i >= 32)
+                       mask = 0;
+               else
+                       mask = htonl(~((1 << (pfx_len - i)) - 1));
+
+               idx = i / 32;
+               addr->s6_addr32[idx] &= mask;
+               addr->s6_addr32[idx] |= npt->dst_pfx.in6.s6_addr32[idx];
+       }
+
+       if (pfx_len <= 48)
+               idx = 3;
+       else {
+               for (idx = 4; idx < ARRAY_SIZE(addr->s6_addr16); idx++) {
+                       if ((__force __sum16)addr->s6_addr16[idx] !=
+                           CSUM_MANGLED_0)
+                               break;
+               }
+               if (idx == ARRAY_SIZE(addr->s6_addr16))
+                       return false;
+       }
+
+       sum = csum16_add((__force __sum16)addr->s6_addr16[idx],
+                        npt->adjustment);
+       if (sum == CSUM_MANGLED_0)
+               sum = 0;
+       *(__force __sum16 *)&addr->s6_addr16[idx] = sum;
+
+       return true;
+}
+
+static unsigned int
+ip6t_snpt_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+       const struct ip6t_npt_tginfo *npt = par->targinfo;
+
+       if (!ip6t_npt_map_pfx(npt, &ipv6_hdr(skb)->saddr)) {
+               icmpv6_send(skb, ICMPV6_PARAMPROB, ICMPV6_HDR_FIELD,
+                           offsetof(struct ipv6hdr, saddr));
+               return NF_DROP;
+       }
+       return XT_CONTINUE;
+}
+
+static unsigned int
+ip6t_dnpt_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+       const struct ip6t_npt_tginfo *npt = par->targinfo;
+
+       if (!ip6t_npt_map_pfx(npt, &ipv6_hdr(skb)->daddr)) {
+               icmpv6_send(skb, ICMPV6_PARAMPROB, ICMPV6_HDR_FIELD,
+                           offsetof(struct ipv6hdr, daddr));
+               return NF_DROP;
+       }
+       return XT_CONTINUE;
+}
+
+static struct xt_target ip6t_npt_target_reg[] __read_mostly = {
+       {
+               .name           = "SNPT",
+               .target         = ip6t_snpt_tg,
+               .targetsize     = sizeof(struct ip6t_npt_tginfo),
+               .checkentry     = ip6t_npt_checkentry,
+               .family         = NFPROTO_IPV6,
+               .hooks          = (1 << NF_INET_LOCAL_IN) |
+                                 (1 << NF_INET_POST_ROUTING),
+               .me             = THIS_MODULE,
+       },
+       {
+               .name           = "DNPT",
+               .target         = ip6t_dnpt_tg,
+               .targetsize     = sizeof(struct ip6t_npt_tginfo),
+               .checkentry     = ip6t_npt_checkentry,
+               .family         = NFPROTO_IPV6,
+               .hooks          = (1 << NF_INET_PRE_ROUTING) |
+                                 (1 << NF_INET_LOCAL_OUT),
+               .me             = THIS_MODULE,
+       },
+};
+
+static int __init ip6t_npt_init(void)
+{
+       return xt_register_targets(ip6t_npt_target_reg,
+                                  ARRAY_SIZE(ip6t_npt_target_reg));
+}
+
+static void __exit ip6t_npt_exit(void)
+{
+       xt_unregister_targets(ip6t_npt_target_reg,
+                             ARRAY_SIZE(ip6t_npt_target_reg));
+}
+
+module_init(ip6t_npt_init);
+module_exit(ip6t_npt_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IPv6-to-IPv6 Network Prefix Translation (RFC 6296)");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS("ip6t_SNPT");
+MODULE_ALIAS("ip6t_DNPT");
index 325e59a0224ffa3f0b08e7474628d614e2d3ee49..beb5777d20437321cf19d37ad3e7c630d8f0663c 100644 (file)
@@ -61,9 +61,7 @@ static int __net_init ip6table_filter_net_init(struct net *net)
        net->ipv6.ip6table_filter =
                ip6t_register_table(net, &packet_filter, repl);
        kfree(repl);
-       if (IS_ERR(net->ipv6.ip6table_filter))
-               return PTR_ERR(net->ipv6.ip6table_filter);
-       return 0;
+       return PTR_RET(net->ipv6.ip6table_filter);
 }
 
 static void __net_exit ip6table_filter_net_exit(struct net *net)
index 4d782405f125da5e7819333ce7dfadd501044c12..7431121b87dee6fa628f53d37dfa7f93c546a60b 100644 (file)
@@ -97,9 +97,7 @@ static int __net_init ip6table_mangle_net_init(struct net *net)
        net->ipv6.ip6table_mangle =
                ip6t_register_table(net, &packet_mangler, repl);
        kfree(repl);
-       if (IS_ERR(net->ipv6.ip6table_mangle))
-               return PTR_ERR(net->ipv6.ip6table_mangle);
-       return 0;
+       return PTR_RET(net->ipv6.ip6table_mangle);
 }
 
 static void __net_exit ip6table_mangle_net_exit(struct net *net)
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
new file mode 100644 (file)
index 0000000..e418bd6
--- /dev/null
@@ -0,0 +1,321 @@
+/*
+ * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based on Rusty Russell's IPv4 NAT code. Development of IPv6 NAT
+ * funded by Astaro.
+ */
+
+#include <linux/module.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+
+static const struct xt_table nf_nat_ipv6_table = {
+       .name           = "nat",
+       .valid_hooks    = (1 << NF_INET_PRE_ROUTING) |
+                         (1 << NF_INET_POST_ROUTING) |
+                         (1 << NF_INET_LOCAL_OUT) |
+                         (1 << NF_INET_LOCAL_IN),
+       .me             = THIS_MODULE,
+       .af             = NFPROTO_IPV6,
+};
+
+static unsigned int alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
+{
+       /* Force range to this IP; let proto decide mapping for
+        * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
+        */
+       struct nf_nat_range range;
+
+       range.flags = 0;
+       pr_debug("Allocating NULL binding for %p (%pI6)\n", ct,
+                HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ?
+                &ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip6 :
+                &ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip6);
+
+       return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum));
+}
+
+static unsigned int nf_nat_rule_find(struct sk_buff *skb, unsigned int hooknum,
+                                    const struct net_device *in,
+                                    const struct net_device *out,
+                                    struct nf_conn *ct)
+{
+       struct net *net = nf_ct_net(ct);
+       unsigned int ret;
+
+       ret = ip6t_do_table(skb, hooknum, in, out, net->ipv6.ip6table_nat);
+       if (ret == NF_ACCEPT) {
+               if (!nf_nat_initialized(ct, HOOK2MANIP(hooknum)))
+                       ret = alloc_null_binding(ct, hooknum);
+       }
+       return ret;
+}
+
+static unsigned int
+nf_nat_ipv6_fn(unsigned int hooknum,
+              struct sk_buff *skb,
+              const struct net_device *in,
+              const struct net_device *out,
+              int (*okfn)(struct sk_buff *))
+{
+       struct nf_conn *ct;
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn_nat *nat;
+       enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum);
+       __be16 frag_off;
+       int hdrlen;
+       u8 nexthdr;
+
+       ct = nf_ct_get(skb, &ctinfo);
+       /* Can't track?  It's not due to stress, or conntrack would
+        * have dropped it.  Hence it's the user's responsibilty to
+        * packet filter it out, or implement conntrack/NAT for that
+        * protocol. 8) --RR
+        */
+       if (!ct)
+               return NF_ACCEPT;
+
+       /* Don't try to NAT if this packet is not conntracked */
+       if (nf_ct_is_untracked(ct))
+               return NF_ACCEPT;
+
+       nat = nfct_nat(ct);
+       if (!nat) {
+               /* NAT module was loaded late. */
+               if (nf_ct_is_confirmed(ct))
+                       return NF_ACCEPT;
+               nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
+               if (nat == NULL) {
+                       pr_debug("failed to add NAT extension\n");
+                       return NF_ACCEPT;
+               }
+       }
+
+       switch (ctinfo) {
+       case IP_CT_RELATED:
+       case IP_CT_RELATED_REPLY:
+               nexthdr = ipv6_hdr(skb)->nexthdr;
+               hdrlen = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
+                                         &nexthdr, &frag_off);
+
+               if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
+                       if (!nf_nat_icmpv6_reply_translation(skb, ct, ctinfo,
+                                                            hooknum, hdrlen))
+                               return NF_DROP;
+                       else
+                               return NF_ACCEPT;
+               }
+               /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */
+       case IP_CT_NEW:
+               /* Seen it before?  This can happen for loopback, retrans,
+                * or local packets.
+                */
+               if (!nf_nat_initialized(ct, maniptype)) {
+                       unsigned int ret;
+
+                       ret = nf_nat_rule_find(skb, hooknum, in, out, ct);
+                       if (ret != NF_ACCEPT)
+                               return ret;
+               } else
+                       pr_debug("Already setup manip %s for ct %p\n",
+                                maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
+                                ct);
+               break;
+
+       default:
+               /* ESTABLISHED */
+               NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
+                            ctinfo == IP_CT_ESTABLISHED_REPLY);
+       }
+
+       return nf_nat_packet(ct, ctinfo, hooknum, skb);
+}
+
+static unsigned int
+nf_nat_ipv6_in(unsigned int hooknum,
+              struct sk_buff *skb,
+              const struct net_device *in,
+              const struct net_device *out,
+              int (*okfn)(struct sk_buff *))
+{
+       unsigned int ret;
+       struct in6_addr daddr = ipv6_hdr(skb)->daddr;
+
+       ret = nf_nat_ipv6_fn(hooknum, skb, in, out, okfn);
+       if (ret != NF_DROP && ret != NF_STOLEN &&
+           ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr))
+               skb_dst_drop(skb);
+
+       return ret;
+}
+
+static unsigned int
+nf_nat_ipv6_out(unsigned int hooknum,
+               struct sk_buff *skb,
+               const struct net_device *in,
+               const struct net_device *out,
+               int (*okfn)(struct sk_buff *))
+{
+#ifdef CONFIG_XFRM
+       const struct nf_conn *ct;
+       enum ip_conntrack_info ctinfo;
+#endif
+       unsigned int ret;
+
+       /* root is playing with raw sockets. */
+       if (skb->len < sizeof(struct ipv6hdr))
+               return NF_ACCEPT;
+
+       ret = nf_nat_ipv6_fn(hooknum, skb, in, out, okfn);
+#ifdef CONFIG_XFRM
+       if (ret != NF_DROP && ret != NF_STOLEN &&
+           !(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
+           (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
+               enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+               if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3,
+                                     &ct->tuplehash[!dir].tuple.dst.u3) ||
+                   (ct->tuplehash[dir].tuple.src.u.all !=
+                    ct->tuplehash[!dir].tuple.dst.u.all))
+                       if (nf_xfrm_me_harder(skb, AF_INET6) < 0)
+                               ret = NF_DROP;
+       }
+#endif
+       return ret;
+}
+
+static unsigned int
+nf_nat_ipv6_local_fn(unsigned int hooknum,
+                    struct sk_buff *skb,
+                    const struct net_device *in,
+                    const struct net_device *out,
+                    int (*okfn)(struct sk_buff *))
+{
+       const struct nf_conn *ct;
+       enum ip_conntrack_info ctinfo;
+       unsigned int ret;
+
+       /* root is playing with raw sockets. */
+       if (skb->len < sizeof(struct ipv6hdr))
+               return NF_ACCEPT;
+
+       ret = nf_nat_ipv6_fn(hooknum, skb, in, out, okfn);
+       if (ret != NF_DROP && ret != NF_STOLEN &&
+           (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
+               enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+               if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3,
+                                     &ct->tuplehash[!dir].tuple.src.u3)) {
+                       if (ip6_route_me_harder(skb))
+                               ret = NF_DROP;
+               }
+#ifdef CONFIG_XFRM
+               else if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
+                        ct->tuplehash[dir].tuple.dst.u.all !=
+                        ct->tuplehash[!dir].tuple.src.u.all)
+                       if (nf_xfrm_me_harder(skb, AF_INET6))
+                               ret = NF_DROP;
+#endif
+       }
+       return ret;
+}
+
+static struct nf_hook_ops nf_nat_ipv6_ops[] __read_mostly = {
+       /* Before packet filtering, change destination */
+       {
+               .hook           = nf_nat_ipv6_in,
+               .owner          = THIS_MODULE,
+               .pf             = NFPROTO_IPV6,
+               .hooknum        = NF_INET_PRE_ROUTING,
+               .priority       = NF_IP6_PRI_NAT_DST,
+       },
+       /* After packet filtering, change source */
+       {
+               .hook           = nf_nat_ipv6_out,
+               .owner          = THIS_MODULE,
+               .pf             = NFPROTO_IPV6,
+               .hooknum        = NF_INET_POST_ROUTING,
+               .priority       = NF_IP6_PRI_NAT_SRC,
+       },
+       /* Before packet filtering, change destination */
+       {
+               .hook           = nf_nat_ipv6_local_fn,
+               .owner          = THIS_MODULE,
+               .pf             = NFPROTO_IPV6,
+               .hooknum        = NF_INET_LOCAL_OUT,
+               .priority       = NF_IP6_PRI_NAT_DST,
+       },
+       /* After packet filtering, change source */
+       {
+               .hook           = nf_nat_ipv6_fn,
+               .owner          = THIS_MODULE,
+               .pf             = NFPROTO_IPV6,
+               .hooknum        = NF_INET_LOCAL_IN,
+               .priority       = NF_IP6_PRI_NAT_SRC,
+       },
+};
+
+static int __net_init ip6table_nat_net_init(struct net *net)
+{
+       struct ip6t_replace *repl;
+
+       repl = ip6t_alloc_initial_table(&nf_nat_ipv6_table);
+       if (repl == NULL)
+               return -ENOMEM;
+       net->ipv6.ip6table_nat = ip6t_register_table(net, &nf_nat_ipv6_table, repl);
+       kfree(repl);
+       if (IS_ERR(net->ipv6.ip6table_nat))
+               return PTR_ERR(net->ipv6.ip6table_nat);
+       return 0;
+}
+
+static void __net_exit ip6table_nat_net_exit(struct net *net)
+{
+       ip6t_unregister_table(net, net->ipv6.ip6table_nat);
+}
+
+static struct pernet_operations ip6table_nat_net_ops = {
+       .init   = ip6table_nat_net_init,
+       .exit   = ip6table_nat_net_exit,
+};
+
+static int __init ip6table_nat_init(void)
+{
+       int err;
+
+       err = register_pernet_subsys(&ip6table_nat_net_ops);
+       if (err < 0)
+               goto err1;
+
+       err = nf_register_hooks(nf_nat_ipv6_ops, ARRAY_SIZE(nf_nat_ipv6_ops));
+       if (err < 0)
+               goto err2;
+       return 0;
+
+err2:
+       unregister_pernet_subsys(&ip6table_nat_net_ops);
+err1:
+       return err;
+}
+
+static void __exit ip6table_nat_exit(void)
+{
+       nf_unregister_hooks(nf_nat_ipv6_ops, ARRAY_SIZE(nf_nat_ipv6_ops));
+       unregister_pernet_subsys(&ip6table_nat_net_ops);
+}
+
+module_init(ip6table_nat_init);
+module_exit(ip6table_nat_exit);
+
+MODULE_LICENSE("GPL");
index 5b9926a011bd99faff714042746f161ee58a152a..60d1bddff7a038c54c518e850c64f08745accccb 100644 (file)
@@ -40,9 +40,7 @@ static int __net_init ip6table_raw_net_init(struct net *net)
        net->ipv6.ip6table_raw =
                ip6t_register_table(net, &packet_raw, repl);
        kfree(repl);
-       if (IS_ERR(net->ipv6.ip6table_raw))
-               return PTR_ERR(net->ipv6.ip6table_raw);
-       return 0;
+       return PTR_RET(net->ipv6.ip6table_raw);
 }
 
 static void __net_exit ip6table_raw_net_exit(struct net *net)
index 91aa2b4d83c9c1571d4538ad380dfae5296a180b..db155351339c7c63ed48d23233ef47919192a05c 100644 (file)
@@ -58,10 +58,7 @@ static int __net_init ip6table_security_net_init(struct net *net)
        net->ipv6.ip6table_security =
                ip6t_register_table(net, &security_table, repl);
        kfree(repl);
-       if (IS_ERR(net->ipv6.ip6table_security))
-               return PTR_ERR(net->ipv6.ip6table_security);
-
-       return 0;
+       return PTR_RET(net->ipv6.ip6table_security);
 }
 
 static void __net_exit ip6table_security_net_exit(struct net *net)
index 4794f96cf2e01bfcd5cc858cc22f73b325dbc907..8860d23e61cfff646e3e034da7fe4c985fbb5575 100644 (file)
@@ -28,6 +28,7 @@
 #include <net/netfilter/nf_conntrack_core.h>
 #include <net/netfilter/nf_conntrack_zones.h>
 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
+#include <net/netfilter/nf_nat_helper.h>
 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
 #include <net/netfilter/nf_log.h>
 
@@ -64,82 +65,31 @@ static int ipv6_print_tuple(struct seq_file *s,
                          tuple->src.u3.ip6, tuple->dst.u3.ip6);
 }
 
-/*
- * Based on ipv6_skip_exthdr() in net/ipv6/exthdr.c
- *
- * This function parses (probably truncated) exthdr set "hdr"
- * of length "len". "nexthdrp" initially points to some place,
- * where type of the first header can be found.
- *
- * It skips all well-known exthdrs, and returns pointer to the start
- * of unparsable area i.e. the first header with unknown type.
- * if success, *nexthdr is updated by type/protocol of this header.
- *
- * NOTES: - it may return pointer pointing beyond end of packet,
- *          if the last recognized header is truncated in the middle.
- *        - if packet is truncated, so that all parsed headers are skipped,
- *          it returns -1.
- *        - if packet is fragmented, return pointer of the fragment header.
- *        - ESP is unparsable for now and considered like
- *          normal payload protocol.
- *        - Note also special handling of AUTH header. Thanks to IPsec wizards.
- */
-
-static int nf_ct_ipv6_skip_exthdr(const struct sk_buff *skb, int start,
-                                 u8 *nexthdrp, int len)
-{
-       u8 nexthdr = *nexthdrp;
-
-       while (ipv6_ext_hdr(nexthdr)) {
-               struct ipv6_opt_hdr hdr;
-               int hdrlen;
-
-               if (len < (int)sizeof(struct ipv6_opt_hdr))
-                       return -1;
-               if (nexthdr == NEXTHDR_NONE)
-                       break;
-               if (nexthdr == NEXTHDR_FRAGMENT)
-                       break;
-               if (skb_copy_bits(skb, start, &hdr, sizeof(hdr)))
-                       BUG();
-               if (nexthdr == NEXTHDR_AUTH)
-                       hdrlen = (hdr.hdrlen+2)<<2;
-               else
-                       hdrlen = ipv6_optlen(&hdr);
-
-               nexthdr = hdr.nexthdr;
-               len -= hdrlen;
-               start += hdrlen;
-       }
-
-       *nexthdrp = nexthdr;
-       return start;
-}
-
 static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
                            unsigned int *dataoff, u_int8_t *protonum)
 {
        unsigned int extoff = nhoff + sizeof(struct ipv6hdr);
-       unsigned char pnum;
+       __be16 frag_off;
        int protoff;
+       u8 nexthdr;
 
        if (skb_copy_bits(skb, nhoff + offsetof(struct ipv6hdr, nexthdr),
-                         &pnum, sizeof(pnum)) != 0) {
+                         &nexthdr, sizeof(nexthdr)) != 0) {
                pr_debug("ip6_conntrack_core: can't get nexthdr\n");
                return -NF_ACCEPT;
        }
-       protoff = nf_ct_ipv6_skip_exthdr(skb, extoff, &pnum, skb->len - extoff);
+       protoff = ipv6_skip_exthdr(skb, extoff, &nexthdr, &frag_off);
        /*
         * (protoff == skb->len) mean that the packet doesn't have no data
         * except of IPv6 & ext headers. but it's tracked anyway. - YK
         */
-       if ((protoff < 0) || (protoff > skb->len)) {
+       if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
                pr_debug("ip6_conntrack_core: can't find proto in pkt\n");
                return -NF_ACCEPT;
        }
 
        *dataoff = protoff;
-       *protonum = pnum;
+       *protonum = nexthdr;
        return NF_ACCEPT;
 }
 
@@ -153,10 +103,10 @@ static unsigned int ipv6_helper(unsigned int hooknum,
        const struct nf_conn_help *help;
        const struct nf_conntrack_helper *helper;
        enum ip_conntrack_info ctinfo;
-       unsigned int ret, protoff;
-       unsigned int extoff = (u8 *)(ipv6_hdr(skb) + 1) - skb->data;
-       unsigned char pnum = ipv6_hdr(skb)->nexthdr;
-
+       unsigned int ret;
+       __be16 frag_off;
+       int protoff;
+       u8 nexthdr;
 
        /* This is where we call the helper: as the packet goes out. */
        ct = nf_ct_get(skb, &ctinfo);
@@ -171,9 +121,10 @@ static unsigned int ipv6_helper(unsigned int hooknum,
        if (!helper)
                return NF_ACCEPT;
 
-       protoff = nf_ct_ipv6_skip_exthdr(skb, extoff, &pnum,
-                                        skb->len - extoff);
-       if (protoff > skb->len || pnum == NEXTHDR_FRAGMENT) {
+       nexthdr = ipv6_hdr(skb)->nexthdr;
+       protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
+                                  &frag_off);
+       if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
                pr_debug("proto header not found\n");
                return NF_ACCEPT;
        }
@@ -192,6 +143,36 @@ static unsigned int ipv6_confirm(unsigned int hooknum,
                                 const struct net_device *out,
                                 int (*okfn)(struct sk_buff *))
 {
+       struct nf_conn *ct;
+       enum ip_conntrack_info ctinfo;
+       unsigned char pnum = ipv6_hdr(skb)->nexthdr;
+       int protoff;
+       __be16 frag_off;
+
+       ct = nf_ct_get(skb, &ctinfo);
+       if (!ct || ctinfo == IP_CT_RELATED_REPLY)
+               goto out;
+
+       protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum,
+                                  &frag_off);
+       if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
+               pr_debug("proto header not found\n");
+               goto out;
+       }
+
+       /* adjust seqs for loopback traffic only in outgoing direction */
+       if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
+           !nf_is_loopback_packet(skb)) {
+               typeof(nf_nat_seq_adjust_hook) seq_adjust;
+
+               seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook);
+               if (!seq_adjust ||
+                   !seq_adjust(skb, ct, ctinfo, protoff)) {
+                       NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
+                       return NF_DROP;
+               }
+       }
+out:
        /* We've seen it coming out the other side: confirm it */
        return nf_conntrack_confirm(skb);
 }
@@ -199,9 +180,14 @@ static unsigned int ipv6_confirm(unsigned int hooknum,
 static unsigned int __ipv6_conntrack_in(struct net *net,
                                        unsigned int hooknum,
                                        struct sk_buff *skb,
+                                       const struct net_device *in,
+                                       const struct net_device *out,
                                        int (*okfn)(struct sk_buff *))
 {
        struct sk_buff *reasm = skb->nfct_reasm;
+       const struct nf_conn_help *help;
+       struct nf_conn *ct;
+       enum ip_conntrack_info ctinfo;
 
        /* This packet is fragmented and has reassembled packet. */
        if (reasm) {
@@ -213,6 +199,25 @@ static unsigned int __ipv6_conntrack_in(struct net *net,
                        if (ret != NF_ACCEPT)
                                return ret;
                }
+
+               /* Conntrack helpers need the entire reassembled packet in the
+                * POST_ROUTING hook. In case of unconfirmed connections NAT
+                * might reassign a helper, so the entire packet is also
+                * required.
+                */
+               ct = nf_ct_get(reasm, &ctinfo);
+               if (ct != NULL && !nf_ct_is_untracked(ct)) {
+                       help = nfct_help(ct);
+                       if ((help && help->helper) || !nf_ct_is_confirmed(ct)) {
+                               nf_conntrack_get_reasm(skb);
+                               NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, reasm,
+                                              (struct net_device *)in,
+                                              (struct net_device *)out,
+                                              okfn, NF_IP6_PRI_CONNTRACK + 1);
+                               return NF_DROP_ERR(-ECANCELED);
+                       }
+               }
+
                nf_conntrack_get(reasm->nfct);
                skb->nfct = reasm->nfct;
                skb->nfctinfo = reasm->nfctinfo;
@@ -228,7 +233,7 @@ static unsigned int ipv6_conntrack_in(unsigned int hooknum,
                                      const struct net_device *out,
                                      int (*okfn)(struct sk_buff *))
 {
-       return __ipv6_conntrack_in(dev_net(in), hooknum, skb, okfn);
+       return __ipv6_conntrack_in(dev_net(in), hooknum, skb, in, out, okfn);
 }
 
 static unsigned int ipv6_conntrack_local(unsigned int hooknum,
@@ -242,7 +247,7 @@ static unsigned int ipv6_conntrack_local(unsigned int hooknum,
                net_notice_ratelimited("ipv6_conntrack_local: packet too short\n");
                return NF_ACCEPT;
        }
-       return __ipv6_conntrack_in(dev_net(out), hooknum, skb, okfn);
+       return __ipv6_conntrack_in(dev_net(out), hooknum, skb, in, out, okfn);
 }
 
 static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
index c9c78c2e666b86b397d95756695afe49a6a47b5a..18bd9bbbd1c6c0f50d8bf74c947c05f91a4f20cd 100644 (file)
@@ -57,41 +57,27 @@ struct nf_ct_frag6_skb_cb
 
 #define NFCT_FRAG6_CB(skb)     ((struct nf_ct_frag6_skb_cb*)((skb)->cb))
 
-struct nf_ct_frag6_queue
-{
-       struct inet_frag_queue  q;
-
-       __be32                  id;             /* fragment id          */
-       u32                     user;
-       struct in6_addr         saddr;
-       struct in6_addr         daddr;
-
-       unsigned int            csum;
-       __u16                   nhoffset;
-};
-
 static struct inet_frags nf_frags;
-static struct netns_frags nf_init_frags;
 
 #ifdef CONFIG_SYSCTL
 static struct ctl_table nf_ct_frag6_sysctl_table[] = {
        {
                .procname       = "nf_conntrack_frag6_timeout",
-               .data           = &nf_init_frags.timeout,
+               .data           = &init_net.nf_frag.frags.timeout,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
        {
                .procname       = "nf_conntrack_frag6_low_thresh",
-               .data           = &nf_init_frags.low_thresh,
+               .data           = &init_net.nf_frag.frags.low_thresh,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
        {
                .procname       = "nf_conntrack_frag6_high_thresh",
-               .data           = &nf_init_frags.high_thresh,
+               .data           = &init_net.nf_frag.frags.high_thresh,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
@@ -99,68 +85,86 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
        { }
 };
 
-static struct ctl_table_header *nf_ct_frag6_sysctl_header;
-#endif
-
-static unsigned int nf_hashfn(struct inet_frag_queue *q)
+static int __net_init nf_ct_frag6_sysctl_register(struct net *net)
 {
-       const struct nf_ct_frag6_queue *nq;
+       struct ctl_table *table;
+       struct ctl_table_header *hdr;
+
+       table = nf_ct_frag6_sysctl_table;
+       if (!net_eq(net, &init_net)) {
+               table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
+                               GFP_KERNEL);
+               if (table == NULL)
+                       goto err_alloc;
+
+               table[0].data = &net->ipv6.frags.high_thresh;
+               table[1].data = &net->ipv6.frags.low_thresh;
+               table[2].data = &net->ipv6.frags.timeout;
+       }
 
-       nq = container_of(q, struct nf_ct_frag6_queue, q);
-       return inet6_hash_frag(nq->id, &nq->saddr, &nq->daddr, nf_frags.rnd);
+       hdr = register_net_sysctl(net, "net/netfilter", table);
+       if (hdr == NULL)
+               goto err_reg;
+
+       net->nf_frag.sysctl.frags_hdr = hdr;
+       return 0;
+
+err_reg:
+       if (!net_eq(net, &init_net))
+               kfree(table);
+err_alloc:
+       return -ENOMEM;
 }
 
-static void nf_skb_free(struct sk_buff *skb)
+static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
 {
-       if (NFCT_FRAG6_CB(skb)->orig)
-               kfree_skb(NFCT_FRAG6_CB(skb)->orig);
-}
+       struct ctl_table *table;
 
-/* Destruction primitives. */
+       table = net->nf_frag.sysctl.frags_hdr->ctl_table_arg;
+       unregister_net_sysctl_table(net->nf_frag.sysctl.frags_hdr);
+       if (!net_eq(net, &init_net))
+               kfree(table);
+}
 
-static __inline__ void fq_put(struct nf_ct_frag6_queue *fq)
+#else
+static int __net_init nf_ct_frag6_sysctl_register(struct net *net)
 {
-       inet_frag_put(&fq->q, &nf_frags);
+       return 0;
 }
+static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
+{
+}
+#endif
 
-/* Kill fq entry. It is not destroyed immediately,
- * because caller (and someone more) holds reference count.
- */
-static __inline__ void fq_kill(struct nf_ct_frag6_queue *fq)
+static unsigned int nf_hashfn(struct inet_frag_queue *q)
 {
-       inet_frag_kill(&fq->q, &nf_frags);
+       const struct frag_queue *nq;
+
+       nq = container_of(q, struct frag_queue, q);
+       return inet6_hash_frag(nq->id, &nq->saddr, &nq->daddr, nf_frags.rnd);
 }
 
-static void nf_ct_frag6_evictor(void)
+static void nf_skb_free(struct sk_buff *skb)
 {
-       local_bh_disable();
-       inet_frag_evictor(&nf_init_frags, &nf_frags);
-       local_bh_enable();
+       if (NFCT_FRAG6_CB(skb)->orig)
+               kfree_skb(NFCT_FRAG6_CB(skb)->orig);
 }
 
 static void nf_ct_frag6_expire(unsigned long data)
 {
-       struct nf_ct_frag6_queue *fq;
-
-       fq = container_of((struct inet_frag_queue *)data,
-                       struct nf_ct_frag6_queue, q);
-
-       spin_lock(&fq->q.lock);
+       struct frag_queue *fq;
+       struct net *net;
 
-       if (fq->q.last_in & INET_FRAG_COMPLETE)
-               goto out;
+       fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
+       net = container_of(fq->q.net, struct net, nf_frag.frags);
 
-       fq_kill(fq);
-
-out:
-       spin_unlock(&fq->q.lock);
-       fq_put(fq);
+       ip6_expire_frag_queue(net, fq, &nf_frags);
 }
 
 /* Creation primitives. */
-
-static __inline__ struct nf_ct_frag6_queue *
-fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst)
+static inline struct frag_queue *fq_find(struct net *net, __be32 id,
+                                        u32 user, struct in6_addr *src,
+                                        struct in6_addr *dst)
 {
        struct inet_frag_queue *q;
        struct ip6_create_arg arg;
@@ -174,22 +178,23 @@ fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst)
        read_lock_bh(&nf_frags.lock);
        hash = inet6_hash_frag(id, src, dst, nf_frags.rnd);
 
-       q = inet_frag_find(&nf_init_frags, &nf_frags, &arg, hash);
+       q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash);
        local_bh_enable();
        if (q == NULL)
                goto oom;
 
-       return container_of(q, struct nf_ct_frag6_queue, q);
+       return container_of(q, struct frag_queue, q);
 
 oom:
        return NULL;
 }
 
 
-static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
+static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
                             const struct frag_hdr *fhdr, int nhoff)
 {
        struct sk_buff *prev, *next;
+       unsigned int payload_len;
        int offset, end;
 
        if (fq->q.last_in & INET_FRAG_COMPLETE) {
@@ -197,8 +202,10 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
                goto err;
        }
 
+       payload_len = ntohs(ipv6_hdr(skb)->payload_len);
+
        offset = ntohs(fhdr->frag_off) & ~0x7;
-       end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
+       end = offset + (payload_len -
                        ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
 
        if ((unsigned int)end > IPV6_MAXPLEN) {
@@ -307,7 +314,9 @@ found:
        skb->dev = NULL;
        fq->q.stamp = skb->tstamp;
        fq->q.meat += skb->len;
-       atomic_add(skb->truesize, &nf_init_frags.mem);
+       if (payload_len > fq->q.max_size)
+               fq->q.max_size = payload_len;
+       atomic_add(skb->truesize, &fq->q.net->mem);
 
        /* The first fragment.
         * nhoffset is obtained from the first fragment, of course.
@@ -317,12 +326,12 @@ found:
                fq->q.last_in |= INET_FRAG_FIRST_IN;
        }
        write_lock(&nf_frags.lock);
-       list_move_tail(&fq->q.lru_list, &nf_init_frags.lru_list);
+       list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
        write_unlock(&nf_frags.lock);
        return 0;
 
 discard_fq:
-       fq_kill(fq);
+       inet_frag_kill(&fq->q, &nf_frags);
 err:
        return -1;
 }
@@ -337,12 +346,12 @@ err:
  *     the last and the first frames arrived and all the bits are here.
  */
 static struct sk_buff *
-nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
+nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
 {
        struct sk_buff *fp, *op, *head = fq->q.fragments;
        int    payload_len;
 
-       fq_kill(fq);
+       inet_frag_kill(&fq->q, &nf_frags);
 
        WARN_ON(head == NULL);
        WARN_ON(NFCT_FRAG6_CB(head)->offset != 0);
@@ -386,7 +395,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
                clone->ip_summed = head->ip_summed;
 
                NFCT_FRAG6_CB(clone)->orig = NULL;
-               atomic_add(clone->truesize, &nf_init_frags.mem);
+               atomic_add(clone->truesize, &fq->q.net->mem);
        }
 
        /* We have to remove fragment header from datagram and to relocate
@@ -410,12 +419,14 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
                        head->csum = csum_add(head->csum, fp->csum);
                head->truesize += fp->truesize;
        }
-       atomic_sub(head->truesize, &nf_init_frags.mem);
+       atomic_sub(head->truesize, &fq->q.net->mem);
 
+       head->local_df = 1;
        head->next = NULL;
        head->dev = dev;
        head->tstamp = fq->q.stamp;
        ipv6_hdr(head)->payload_len = htons(payload_len);
+       IP6CB(head)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
 
        /* Yes, and fold redundant checksum back. 8) */
        if (head->ip_summed == CHECKSUM_COMPLETE)
@@ -520,8 +531,10 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
 {
        struct sk_buff *clone;
        struct net_device *dev = skb->dev;
+       struct net *net = skb_dst(skb) ? dev_net(skb_dst(skb)->dev)
+                                      : dev_net(skb->dev);
        struct frag_hdr *fhdr;
-       struct nf_ct_frag6_queue *fq;
+       struct frag_queue *fq;
        struct ipv6hdr *hdr;
        int fhoff, nhoff;
        u8 prevhdr;
@@ -553,10 +566,11 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
        hdr = ipv6_hdr(clone);
        fhdr = (struct frag_hdr *)skb_transport_header(clone);
 
-       if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh)
-               nf_ct_frag6_evictor();
+       local_bh_disable();
+       inet_frag_evictor(&net->nf_frag.frags, &nf_frags, false);
+       local_bh_enable();
 
-       fq = fq_find(fhdr->identification, user, &hdr->saddr, &hdr->daddr);
+       fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr);
        if (fq == NULL) {
                pr_debug("Can't find and can't create new queue\n");
                goto ret_orig;
@@ -567,7 +581,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
        if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) {
                spin_unlock_bh(&fq->q.lock);
                pr_debug("Can't insert skb to queue\n");
-               fq_put(fq);
+               inet_frag_put(&fq->q, &nf_frags);
                goto ret_orig;
        }
 
@@ -579,7 +593,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
        }
        spin_unlock_bh(&fq->q.lock);
 
-       fq_put(fq);
+       inet_frag_put(&fq->q, &nf_frags);
        return ret_skb;
 
 ret_orig:
@@ -592,6 +606,7 @@ void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
                        int (*okfn)(struct sk_buff *))
 {
        struct sk_buff *s, *s2;
+       unsigned int ret = 0;
 
        for (s = NFCT_FRAG6_CB(skb)->orig; s;) {
                nf_conntrack_put_reasm(s->nfct_reasm);
@@ -601,49 +616,62 @@ void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
                s2 = s->next;
                s->next = NULL;
 
-               NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, s, in, out, okfn,
-                              NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
+               if (ret != -ECANCELED)
+                       ret = NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, s,
+                                            in, out, okfn,
+                                            NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
+               else
+                       kfree_skb(s);
+
                s = s2;
        }
        nf_conntrack_put_reasm(skb);
 }
 
+static int nf_ct_net_init(struct net *net)
+{
+       net->nf_frag.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
+       net->nf_frag.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
+       net->nf_frag.frags.timeout = IPV6_FRAG_TIMEOUT;
+       inet_frags_init_net(&net->nf_frag.frags);
+
+       return nf_ct_frag6_sysctl_register(net);
+}
+
+static void nf_ct_net_exit(struct net *net)
+{
+       nf_ct_frags6_sysctl_unregister(net);
+       inet_frags_exit_net(&net->nf_frag.frags, &nf_frags);
+}
+
+static struct pernet_operations nf_ct_net_ops = {
+       .init = nf_ct_net_init,
+       .exit = nf_ct_net_exit,
+};
+
 int nf_ct_frag6_init(void)
 {
+       int ret = 0;
+
        nf_frags.hashfn = nf_hashfn;
        nf_frags.constructor = ip6_frag_init;
        nf_frags.destructor = NULL;
        nf_frags.skb_free = nf_skb_free;
-       nf_frags.qsize = sizeof(struct nf_ct_frag6_queue);
+       nf_frags.qsize = sizeof(struct frag_queue);
        nf_frags.match = ip6_frag_match;
        nf_frags.frag_expire = nf_ct_frag6_expire;
        nf_frags.secret_interval = 10 * 60 * HZ;
-       nf_init_frags.timeout = IPV6_FRAG_TIMEOUT;
-       nf_init_frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
-       nf_init_frags.low_thresh = IPV6_FRAG_LOW_THRESH;
-       inet_frags_init_net(&nf_init_frags);
        inet_frags_init(&nf_frags);
 
-#ifdef CONFIG_SYSCTL
-       nf_ct_frag6_sysctl_header = register_net_sysctl(&init_net, "net/netfilter",
-                                                       nf_ct_frag6_sysctl_table);
-       if (!nf_ct_frag6_sysctl_header) {
+       ret = register_pernet_subsys(&nf_ct_net_ops);
+       if (ret)
                inet_frags_fini(&nf_frags);
-               return -ENOMEM;
-       }
-#endif
 
-       return 0;
+       return ret;
 }
 
 void nf_ct_frag6_cleanup(void)
 {
-#ifdef CONFIG_SYSCTL
-       unregister_net_sysctl_table(nf_ct_frag6_sysctl_header);
-       nf_ct_frag6_sysctl_header = NULL;
-#endif
+       unregister_pernet_subsys(&nf_ct_net_ops);
        inet_frags_fini(&nf_frags);
-
-       nf_init_frags.low_thresh = 0;
-       nf_ct_frag6_evictor();
 }
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
new file mode 100644 (file)
index 0000000..abfe75a
--- /dev/null
@@ -0,0 +1,288 @@
+/*
+ * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of IPv6 NAT funded by Astaro.
+ */
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ipv6.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv6.h>
+#include <net/secure_seq.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <net/ip6_route.h>
+#include <net/ipv6.h>
+
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/netfilter/nf_nat_l4proto.h>
+
+static const struct nf_nat_l3proto nf_nat_l3proto_ipv6;
+
+#ifdef CONFIG_XFRM
+static void nf_nat_ipv6_decode_session(struct sk_buff *skb,
+                                      const struct nf_conn *ct,
+                                      enum ip_conntrack_dir dir,
+                                      unsigned long statusbit,
+                                      struct flowi *fl)
+{
+       const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple;
+       struct flowi6 *fl6 = &fl->u.ip6;
+
+       if (ct->status & statusbit) {
+               fl6->daddr = t->dst.u3.in6;
+               if (t->dst.protonum == IPPROTO_TCP ||
+                   t->dst.protonum == IPPROTO_UDP ||
+                   t->dst.protonum == IPPROTO_UDPLITE ||
+                   t->dst.protonum == IPPROTO_DCCP ||
+                   t->dst.protonum == IPPROTO_SCTP)
+                       fl6->fl6_dport = t->dst.u.all;
+       }
+
+       statusbit ^= IPS_NAT_MASK;
+
+       if (ct->status & statusbit) {
+               fl6->saddr = t->src.u3.in6;
+               if (t->dst.protonum == IPPROTO_TCP ||
+                   t->dst.protonum == IPPROTO_UDP ||
+                   t->dst.protonum == IPPROTO_UDPLITE ||
+                   t->dst.protonum == IPPROTO_DCCP ||
+                   t->dst.protonum == IPPROTO_SCTP)
+                       fl6->fl6_sport = t->src.u.all;
+       }
+}
+#endif
+
+static bool nf_nat_ipv6_in_range(const struct nf_conntrack_tuple *t,
+                                const struct nf_nat_range *range)
+{
+       return ipv6_addr_cmp(&t->src.u3.in6, &range->min_addr.in6) >= 0 &&
+              ipv6_addr_cmp(&t->src.u3.in6, &range->max_addr.in6) <= 0;
+}
+
+static u32 nf_nat_ipv6_secure_port(const struct nf_conntrack_tuple *t,
+                                  __be16 dport)
+{
+       return secure_ipv6_port_ephemeral(t->src.u3.ip6, t->dst.u3.ip6, dport);
+}
+
+static bool nf_nat_ipv6_manip_pkt(struct sk_buff *skb,
+                                 unsigned int iphdroff,
+                                 const struct nf_nat_l4proto *l4proto,
+                                 const struct nf_conntrack_tuple *target,
+                                 enum nf_nat_manip_type maniptype)
+{
+       struct ipv6hdr *ipv6h;
+       __be16 frag_off;
+       int hdroff;
+       u8 nexthdr;
+
+       if (!skb_make_writable(skb, iphdroff + sizeof(*ipv6h)))
+               return false;
+
+       ipv6h = (void *)skb->data + iphdroff;
+       nexthdr = ipv6h->nexthdr;
+       hdroff = ipv6_skip_exthdr(skb, iphdroff + sizeof(*ipv6h),
+                                 &nexthdr, &frag_off);
+       if (hdroff < 0)
+               goto manip_addr;
+
+       if ((frag_off & htons(~0x7)) == 0 &&
+           !l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff,
+                               target, maniptype))
+               return false;
+manip_addr:
+       if (maniptype == NF_NAT_MANIP_SRC)
+               ipv6h->saddr = target->src.u3.in6;
+       else
+               ipv6h->daddr = target->dst.u3.in6;
+
+       return true;
+}
+
+static void nf_nat_ipv6_csum_update(struct sk_buff *skb,
+                                   unsigned int iphdroff, __sum16 *check,
+                                   const struct nf_conntrack_tuple *t,
+                                   enum nf_nat_manip_type maniptype)
+{
+       const struct ipv6hdr *ipv6h = (struct ipv6hdr *)(skb->data + iphdroff);
+       const struct in6_addr *oldip, *newip;
+
+       if (maniptype == NF_NAT_MANIP_SRC) {
+               oldip = &ipv6h->saddr;
+               newip = &t->src.u3.in6;
+       } else {
+               oldip = &ipv6h->daddr;
+               newip = &t->dst.u3.in6;
+       }
+       inet_proto_csum_replace16(check, skb, oldip->s6_addr32,
+                                 newip->s6_addr32, 1);
+}
+
+static void nf_nat_ipv6_csum_recalc(struct sk_buff *skb,
+                                   u8 proto, void *data, __sum16 *check,
+                                   int datalen, int oldlen)
+{
+       const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+       struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
+
+       if (skb->ip_summed != CHECKSUM_PARTIAL) {
+               if (!(rt->rt6i_flags & RTF_LOCAL) &&
+                   (!skb->dev || skb->dev->features & NETIF_F_V6_CSUM)) {
+                       skb->ip_summed = CHECKSUM_PARTIAL;
+                       skb->csum_start = skb_headroom(skb) +
+                                         skb_network_offset(skb) +
+                                         (data - (void *)skb->data);
+                       skb->csum_offset = (void *)check - data;
+                       *check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
+                                                 datalen, proto, 0);
+               } else {
+                       *check = 0;
+                       *check = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
+                                                datalen, proto,
+                                                csum_partial(data, datalen,
+                                                             0));
+                       if (proto == IPPROTO_UDP && !*check)
+                               *check = CSUM_MANGLED_0;
+               }
+       } else
+               inet_proto_csum_replace2(check, skb,
+                                        htons(oldlen), htons(datalen), 1);
+}
+
+static int nf_nat_ipv6_nlattr_to_range(struct nlattr *tb[],
+                                      struct nf_nat_range *range)
+{
+       if (tb[CTA_NAT_V6_MINIP]) {
+               nla_memcpy(&range->min_addr.ip6, tb[CTA_NAT_V6_MINIP],
+                          sizeof(struct in6_addr));
+               range->flags |= NF_NAT_RANGE_MAP_IPS;
+       }
+
+       if (tb[CTA_NAT_V6_MAXIP])
+               nla_memcpy(&range->max_addr.ip6, tb[CTA_NAT_V6_MAXIP],
+                          sizeof(struct in6_addr));
+       else
+               range->max_addr = range->min_addr;
+
+       return 0;
+}
+
+static const struct nf_nat_l3proto nf_nat_l3proto_ipv6 = {
+       .l3proto                = NFPROTO_IPV6,
+       .secure_port            = nf_nat_ipv6_secure_port,
+       .in_range               = nf_nat_ipv6_in_range,
+       .manip_pkt              = nf_nat_ipv6_manip_pkt,
+       .csum_update            = nf_nat_ipv6_csum_update,
+       .csum_recalc            = nf_nat_ipv6_csum_recalc,
+       .nlattr_to_range        = nf_nat_ipv6_nlattr_to_range,
+#ifdef CONFIG_XFRM
+       .decode_session = nf_nat_ipv6_decode_session,
+#endif
+};
+
+int nf_nat_icmpv6_reply_translation(struct sk_buff *skb,
+                                   struct nf_conn *ct,
+                                   enum ip_conntrack_info ctinfo,
+                                   unsigned int hooknum,
+                                   unsigned int hdrlen)
+{
+       struct {
+               struct icmp6hdr icmp6;
+               struct ipv6hdr  ip6;
+       } *inside;
+       enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+       enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
+       const struct nf_nat_l4proto *l4proto;
+       struct nf_conntrack_tuple target;
+       unsigned long statusbit;
+
+       NF_CT_ASSERT(ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY);
+
+       if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
+               return 0;
+       if (nf_ip6_checksum(skb, hooknum, hdrlen, IPPROTO_ICMPV6))
+               return 0;
+
+       inside = (void *)skb->data + hdrlen;
+       if (inside->icmp6.icmp6_type == NDISC_REDIRECT) {
+               if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
+                       return 0;
+               if (ct->status & IPS_NAT_MASK)
+                       return 0;
+       }
+
+       if (manip == NF_NAT_MANIP_SRC)
+               statusbit = IPS_SRC_NAT;
+       else
+               statusbit = IPS_DST_NAT;
+
+       /* Invert if this is reply direction */
+       if (dir == IP_CT_DIR_REPLY)
+               statusbit ^= IPS_NAT_MASK;
+
+       if (!(ct->status & statusbit))
+               return 1;
+
+       l4proto = __nf_nat_l4proto_find(NFPROTO_IPV6, inside->ip6.nexthdr);
+       if (!nf_nat_ipv6_manip_pkt(skb, hdrlen + sizeof(inside->icmp6),
+                                  l4proto, &ct->tuplehash[!dir].tuple, !manip))
+               return 0;
+
+       if (skb->ip_summed != CHECKSUM_PARTIAL) {
+               struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+               inside = (void *)skb->data + hdrlen;
+               inside->icmp6.icmp6_cksum = 0;
+               inside->icmp6.icmp6_cksum =
+                       csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
+                                       skb->len - hdrlen, IPPROTO_ICMPV6,
+                                       csum_partial(&inside->icmp6,
+                                                    skb->len - hdrlen, 0));
+       }
+
+       nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
+       l4proto = __nf_nat_l4proto_find(NFPROTO_IPV6, IPPROTO_ICMPV6);
+       if (!nf_nat_ipv6_manip_pkt(skb, 0, l4proto, &target, manip))
+               return 0;
+
+       return 1;
+}
+EXPORT_SYMBOL_GPL(nf_nat_icmpv6_reply_translation);
+
+static int __init nf_nat_l3proto_ipv6_init(void)
+{
+       int err;
+
+       err = nf_nat_l4proto_register(NFPROTO_IPV6, &nf_nat_l4proto_icmpv6);
+       if (err < 0)
+               goto err1;
+       err = nf_nat_l3proto_register(&nf_nat_l3proto_ipv6);
+       if (err < 0)
+               goto err2;
+       return err;
+
+err2:
+       nf_nat_l4proto_unregister(NFPROTO_IPV6, &nf_nat_l4proto_icmpv6);
+err1:
+       return err;
+}
+
+static void __exit nf_nat_l3proto_ipv6_exit(void)
+{
+       nf_nat_l3proto_unregister(&nf_nat_l3proto_ipv6);
+       nf_nat_l4proto_unregister(NFPROTO_IPV6, &nf_nat_l4proto_icmpv6);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("nf-nat-" __stringify(AF_INET6));
+
+module_init(nf_nat_l3proto_ipv6_init);
+module_exit(nf_nat_l3proto_ipv6_exit);
diff --git a/net/ipv6/netfilter/nf_nat_proto_icmpv6.c b/net/ipv6/netfilter/nf_nat_proto_icmpv6.c
new file mode 100644 (file)
index 0000000..5d6da78
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2011 Patrick Mchardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based on Rusty Russell's IPv4 ICMP NAT code. Development of IPv6
+ * NAT funded by Astaro.
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/icmpv6.h>
+
+#include <linux/netfilter.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/netfilter/nf_nat_l4proto.h>
+
+static bool
+icmpv6_in_range(const struct nf_conntrack_tuple *tuple,
+               enum nf_nat_manip_type maniptype,
+               const union nf_conntrack_man_proto *min,
+               const union nf_conntrack_man_proto *max)
+{
+       return ntohs(tuple->src.u.icmp.id) >= ntohs(min->icmp.id) &&
+              ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id);
+}
+
+static void
+icmpv6_unique_tuple(const struct nf_nat_l3proto *l3proto,
+                   struct nf_conntrack_tuple *tuple,
+                   const struct nf_nat_range *range,
+                   enum nf_nat_manip_type maniptype,
+                   const struct nf_conn *ct)
+{
+       static u16 id;
+       unsigned int range_size;
+       unsigned int i;
+
+       range_size = ntohs(range->max_proto.icmp.id) -
+                    ntohs(range->min_proto.icmp.id) + 1;
+
+       if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED))
+               range_size = 0xffff;
+
+       for (i = 0; ; ++id) {
+               tuple->src.u.icmp.id = htons(ntohs(range->min_proto.icmp.id) +
+                                            (id % range_size));
+               if (++i == range_size || !nf_nat_used_tuple(tuple, ct))
+                       return;
+       }
+}
+
+static bool
+icmpv6_manip_pkt(struct sk_buff *skb,
+                const struct nf_nat_l3proto *l3proto,
+                unsigned int iphdroff, unsigned int hdroff,
+                const struct nf_conntrack_tuple *tuple,
+                enum nf_nat_manip_type maniptype)
+{
+       struct icmp6hdr *hdr;
+
+       if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
+               return false;
+
+       hdr = (struct icmp6hdr *)(skb->data + hdroff);
+       l3proto->csum_update(skb, iphdroff, &hdr->icmp6_cksum,
+                            tuple, maniptype);
+       if (hdr->icmp6_code == ICMPV6_ECHO_REQUEST ||
+           hdr->icmp6_code == ICMPV6_ECHO_REPLY) {
+               inet_proto_csum_replace2(&hdr->icmp6_cksum, skb,
+                                        hdr->icmp6_identifier,
+                                        tuple->src.u.icmp.id, 0);
+               hdr->icmp6_identifier = tuple->src.u.icmp.id;
+       }
+       return true;
+}
+
+const struct nf_nat_l4proto nf_nat_l4proto_icmpv6 = {
+       .l4proto                = IPPROTO_ICMPV6,
+       .manip_pkt              = icmpv6_manip_pkt,
+       .in_range               = icmpv6_in_range,
+       .unique_tuple           = icmpv6_unique_tuple,
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+       .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
+#endif
+};
index 4ff9af628e72762843cf35c083c6ee1427b57b03..da8a4e301b1b04ec5d8d0d7aa042a328c986e1d9 100644 (file)
@@ -65,36 +65,8 @@ struct ip6frag_skb_cb
 #define FRAG6_CB(skb)  ((struct ip6frag_skb_cb*)((skb)->cb))
 
 
-/*
- *     Equivalent of ipv4 struct ipq
- */
-
-struct frag_queue
-{
-       struct inet_frag_queue  q;
-
-       __be32                  id;             /* fragment id          */
-       u32                     user;
-       struct in6_addr         saddr;
-       struct in6_addr         daddr;
-
-       int                     iif;
-       unsigned int            csum;
-       __u16                   nhoffset;
-};
-
 static struct inet_frags ip6_frags;
 
-int ip6_frag_nqueues(struct net *net)
-{
-       return net->ipv6.frags.nqueues;
-}
-
-int ip6_frag_mem(struct net *net)
-{
-       return atomic_read(&net->ipv6.frags.mem);
-}
-
 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
                          struct net_device *dev);
 
@@ -159,46 +131,18 @@ void ip6_frag_init(struct inet_frag_queue *q, void *a)
 }
 EXPORT_SYMBOL(ip6_frag_init);
 
-/* Destruction primitives. */
-
-static __inline__ void fq_put(struct frag_queue *fq)
-{
-       inet_frag_put(&fq->q, &ip6_frags);
-}
-
-/* Kill fq entry. It is not destroyed immediately,
- * because caller (and someone more) holds reference count.
- */
-static __inline__ void fq_kill(struct frag_queue *fq)
-{
-       inet_frag_kill(&fq->q, &ip6_frags);
-}
-
-static void ip6_evictor(struct net *net, struct inet6_dev *idev)
+void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
+                          struct inet_frags *frags)
 {
-       int evicted;
-
-       evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags);
-       if (evicted)
-               IP6_ADD_STATS_BH(net, idev, IPSTATS_MIB_REASMFAILS, evicted);
-}
-
-static void ip6_frag_expire(unsigned long data)
-{
-       struct frag_queue *fq;
        struct net_device *dev = NULL;
-       struct net *net;
-
-       fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
 
        spin_lock(&fq->q.lock);
 
        if (fq->q.last_in & INET_FRAG_COMPLETE)
                goto out;
 
-       fq_kill(fq);
+       inet_frag_kill(&fq->q, frags);
 
-       net = container_of(fq->q.net, struct net, ipv6.frags);
        rcu_read_lock();
        dev = dev_get_by_index_rcu(net, fq->iif);
        if (!dev)
@@ -222,7 +166,19 @@ out_rcu_unlock:
        rcu_read_unlock();
 out:
        spin_unlock(&fq->q.lock);
-       fq_put(fq);
+       inet_frag_put(&fq->q, frags);
+}
+EXPORT_SYMBOL(ip6_expire_frag_queue);
+
+static void ip6_frag_expire(unsigned long data)
+{
+       struct frag_queue *fq;
+       struct net *net;
+
+       fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
+       net = container_of(fq->q.net, struct net, ipv6.frags);
+
+       ip6_expire_frag_queue(net, fq, &ip6_frags);
 }
 
 static __inline__ struct frag_queue *
@@ -391,7 +347,7 @@ found:
        return -1;
 
 discard_fq:
-       fq_kill(fq);
+       inet_frag_kill(&fq->q, &ip6_frags);
 err:
        IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
                      IPSTATS_MIB_REASMFAILS);
@@ -417,7 +373,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
        unsigned int nhoff;
        int sum_truesize;
 
-       fq_kill(fq);
+       inet_frag_kill(&fq->q, &ip6_frags);
 
        /* Make the one we just received the head. */
        if (prev) {
@@ -550,6 +506,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
        struct frag_queue *fq;
        const struct ipv6hdr *hdr = ipv6_hdr(skb);
        struct net *net = dev_net(skb_dst(skb)->dev);
+       int evicted;
 
        IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
 
@@ -574,8 +531,10 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
                return 1;
        }
 
-       if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh)
-               ip6_evictor(net, ip6_dst_idev(skb_dst(skb)));
+       evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags, false);
+       if (evicted)
+               IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
+                                IPSTATS_MIB_REASMFAILS, evicted);
 
        fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr);
        if (fq != NULL) {
@@ -586,7 +545,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
                ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
 
                spin_unlock(&fq->q.lock);
-               fq_put(fq);
+               inet_frag_put(&fq->q, &ip6_frags);
                return ret;
        }
 
index 854e4018d205c826032633ce2641cbc1af2c057a..d1ddbc6ddac50907ea22983be8fdf3389967b39a 100644 (file)
@@ -222,7 +222,7 @@ static const u32 ip6_template_metrics[RTAX_MAX] = {
        [RTAX_HOPLIMIT - 1] = 255,
 };
 
-static struct rt6_info ip6_null_entry_template = {
+static const struct rt6_info ip6_null_entry_template = {
        .dst = {
                .__refcnt       = ATOMIC_INIT(1),
                .__use          = 1,
@@ -242,7 +242,7 @@ static struct rt6_info ip6_null_entry_template = {
 static int ip6_pkt_prohibit(struct sk_buff *skb);
 static int ip6_pkt_prohibit_out(struct sk_buff *skb);
 
-static struct rt6_info ip6_prohibit_entry_template = {
+static const struct rt6_info ip6_prohibit_entry_template = {
        .dst = {
                .__refcnt       = ATOMIC_INIT(1),
                .__use          = 1,
@@ -257,7 +257,7 @@ static struct rt6_info ip6_prohibit_entry_template = {
        .rt6i_ref       = ATOMIC_INIT(1),
 };
 
-static struct rt6_info ip6_blk_hole_entry_template = {
+static const struct rt6_info ip6_blk_hole_entry_template = {
        .dst = {
                .__refcnt       = ATOMIC_INIT(1),
                .__use          = 1,
@@ -370,15 +370,11 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
 
 static bool rt6_check_expired(const struct rt6_info *rt)
 {
-       struct rt6_info *ort = NULL;
-
        if (rt->rt6i_flags & RTF_EXPIRES) {
                if (time_after(jiffies, rt->dst.expires))
                        return true;
        } else if (rt->dst.from) {
-               ort = (struct rt6_info *) rt->dst.from;
-               return (ort->rt6i_flags & RTF_EXPIRES) &&
-                       time_after(jiffies, ort->dst.expires);
+               return rt6_check_expired((struct rt6_info *) rt->dst.from);
        }
        return false;
 }
@@ -452,10 +448,9 @@ static void rt6_probe(struct rt6_info *rt)
         * Router Reachability Probe MUST be rate-limited
         * to no more than one per minute.
         */
-       rcu_read_lock();
        neigh = rt ? rt->n : NULL;
        if (!neigh || (neigh->nud_state & NUD_VALID))
-               goto out;
+               return;
        read_lock_bh(&neigh->lock);
        if (!(neigh->nud_state & NUD_VALID) &&
            time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
@@ -471,8 +466,6 @@ static void rt6_probe(struct rt6_info *rt)
        } else {
                read_unlock_bh(&neigh->lock);
        }
-out:
-       rcu_read_unlock();
 }
 #else
 static inline void rt6_probe(struct rt6_info *rt)
@@ -499,7 +492,6 @@ static inline int rt6_check_neigh(struct rt6_info *rt)
        struct neighbour *neigh;
        int m;
 
-       rcu_read_lock();
        neigh = rt->n;
        if (rt->rt6i_flags & RTF_NONEXTHOP ||
            !(rt->rt6i_flags & RTF_GATEWAY))
@@ -517,7 +509,6 @@ static inline int rt6_check_neigh(struct rt6_info *rt)
                read_unlock_bh(&neigh->lock);
        } else
                m = 0;
-       rcu_read_unlock();
        return m;
 }
 
@@ -966,7 +957,7 @@ struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk,
 {
        int flags = 0;
 
-       fl6->flowi6_iif = net->loopback_dev->ifindex;
+       fl6->flowi6_iif = LOOPBACK_IFINDEX;
 
        if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
                flags |= RT6_LOOKUP_F_IFACE;
@@ -1469,8 +1460,21 @@ int ip6_route_add(struct fib6_config *cfg)
                }
                rt->dst.output = ip6_pkt_discard_out;
                rt->dst.input = ip6_pkt_discard;
-               rt->dst.error = -ENETUNREACH;
                rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
+               switch (cfg->fc_type) {
+               case RTN_BLACKHOLE:
+                       rt->dst.error = -EINVAL;
+                       break;
+               case RTN_PROHIBIT:
+                       rt->dst.error = -EACCES;
+                       break;
+               case RTN_THROW:
+                       rt->dst.error = -EAGAIN;
+                       break;
+               default:
+                       rt->dst.error = -ENETUNREACH;
+                       break;
+               }
                goto install_route;
        }
 
@@ -1835,7 +1839,7 @@ static struct rt6_info *rt6_get_route_info(struct net *net,
        if (!table)
                return NULL;
 
-       write_lock_bh(&table->tb6_lock);
+       read_lock_bh(&table->tb6_lock);
        fn = fib6_locate(&table->tb6_root, prefix ,prefixlen, NULL, 0);
        if (!fn)
                goto out;
@@ -1851,7 +1855,7 @@ static struct rt6_info *rt6_get_route_info(struct net *net,
                break;
        }
 out:
-       write_unlock_bh(&table->tb6_lock);
+       read_unlock_bh(&table->tb6_lock);
        return rt;
 }
 
@@ -1867,7 +1871,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
                .fc_dst_len     = prefixlen,
                .fc_flags       = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
                                  RTF_UP | RTF_PREF(pref),
-               .fc_nlinfo.pid = 0,
+               .fc_nlinfo.portid = 0,
                .fc_nlinfo.nlh = NULL,
                .fc_nlinfo.nl_net = net,
        };
@@ -1894,7 +1898,7 @@ struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_dev
        if (!table)
                return NULL;
 
-       write_lock_bh(&table->tb6_lock);
+       read_lock_bh(&table->tb6_lock);
        for (rt = table->tb6_root.leaf; rt; rt=rt->dst.rt6_next) {
                if (dev == rt->dst.dev &&
                    ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
@@ -1903,7 +1907,7 @@ struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_dev
        }
        if (rt)
                dst_hold(&rt->dst);
-       write_unlock_bh(&table->tb6_lock);
+       read_unlock_bh(&table->tb6_lock);
        return rt;
 }
 
@@ -1917,7 +1921,7 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
                .fc_ifindex     = dev->ifindex,
                .fc_flags       = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
                                  RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
-               .fc_nlinfo.pid = 0,
+               .fc_nlinfo.portid = 0,
                .fc_nlinfo.nlh = NULL,
                .fc_nlinfo.nl_net = dev_net(dev),
        };
@@ -2266,14 +2270,18 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
        cfg->fc_src_len = rtm->rtm_src_len;
        cfg->fc_flags = RTF_UP;
        cfg->fc_protocol = rtm->rtm_protocol;
+       cfg->fc_type = rtm->rtm_type;
 
-       if (rtm->rtm_type == RTN_UNREACHABLE)
+       if (rtm->rtm_type == RTN_UNREACHABLE ||
+           rtm->rtm_type == RTN_BLACKHOLE ||
+           rtm->rtm_type == RTN_PROHIBIT ||
+           rtm->rtm_type == RTN_THROW)
                cfg->fc_flags |= RTF_REJECT;
 
        if (rtm->rtm_type == RTN_LOCAL)
                cfg->fc_flags |= RTF_LOCAL;
 
-       cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid;
+       cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
        cfg->fc_nlinfo.nlh = nlh;
        cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
 
@@ -2364,7 +2372,7 @@ static inline size_t rt6_nlmsg_size(void)
 static int rt6_fill_node(struct net *net,
                         struct sk_buff *skb, struct rt6_info *rt,
                         struct in6_addr *dst, struct in6_addr *src,
-                        int iif, int type, u32 pid, u32 seq,
+                        int iif, int type, u32 portid, u32 seq,
                         int prefix, int nowait, unsigned int flags)
 {
        struct rtmsg *rtm;
@@ -2380,7 +2388,7 @@ static int rt6_fill_node(struct net *net,
                }
        }
 
-       nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtm), flags);
+       nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
        if (!nlh)
                return -EMSGSIZE;
 
@@ -2396,8 +2404,22 @@ static int rt6_fill_node(struct net *net,
        rtm->rtm_table = table;
        if (nla_put_u32(skb, RTA_TABLE, table))
                goto nla_put_failure;
-       if (rt->rt6i_flags & RTF_REJECT)
-               rtm->rtm_type = RTN_UNREACHABLE;
+       if (rt->rt6i_flags & RTF_REJECT) {
+               switch (rt->dst.error) {
+               case -EINVAL:
+                       rtm->rtm_type = RTN_BLACKHOLE;
+                       break;
+               case -EACCES:
+                       rtm->rtm_type = RTN_PROHIBIT;
+                       break;
+               case -EAGAIN:
+                       rtm->rtm_type = RTN_THROW;
+                       break;
+               default:
+                       rtm->rtm_type = RTN_UNREACHABLE;
+                       break;
+               }
+       }
        else if (rt->rt6i_flags & RTF_LOCAL)
                rtm->rtm_type = RTN_LOCAL;
        else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
@@ -2470,15 +2492,11 @@ static int rt6_fill_node(struct net *net,
        if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
                goto nla_put_failure;
 
-       rcu_read_lock();
        n = rt->n;
        if (n) {
-               if (nla_put(skb, RTA_GATEWAY, 16, &n->primary_key) < 0) {
-                       rcu_read_unlock();
+               if (nla_put(skb, RTA_GATEWAY, 16, &n->primary_key) < 0)
                        goto nla_put_failure;
-               }
        }
-       rcu_read_unlock();
 
        if (rt->dst.dev &&
            nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
@@ -2511,7 +2529,7 @@ int rt6_dump_route(struct rt6_info *rt, void *p_arg)
 
        return rt6_fill_node(arg->net,
                     arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
-                    NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq,
+                    NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq,
                     prefix, 0, NLM_F_MULTI);
 }
 
@@ -2591,14 +2609,14 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
        skb_dst_set(skb, &rt->dst);
 
        err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
-                           RTM_NEWROUTE, NETLINK_CB(in_skb).pid,
+                           RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
                            nlh->nlmsg_seq, 0, 0, 0);
        if (err < 0) {
                kfree_skb(skb);
                goto errout;
        }
 
-       err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
+       err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
 errout:
        return err;
 }
@@ -2618,14 +2636,14 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
                goto errout;
 
        err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
-                               event, info->pid, seq, 0, 0, 0);
+                               event, info->portid, seq, 0, 0, 0);
        if (err < 0) {
                /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
                WARN_ON(err == -EMSGSIZE);
                kfree_skb(skb);
                goto errout;
        }
-       rtnl_notify(skb, net, info->pid, RTNLGRP_IPV6_ROUTE,
+       rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
                    info->nlh, gfp_any());
        return;
 errout:
@@ -2680,14 +2698,12 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg)
 #else
        seq_puts(m, "00000000000000000000000000000000 00 ");
 #endif
-       rcu_read_lock();
        n = rt->n;
        if (n) {
                seq_printf(m, "%pi6", n->primary_key);
        } else {
                seq_puts(m, "00000000000000000000000000000000");
        }
-       rcu_read_unlock();
        seq_printf(m, " %08x %08x %08x %08x %8s\n",
                   rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
                   rt->dst.__use, rt->rt6i_flags,
index 3bd1bfc01f8523c9ad0e9698f24974fbf1cc5171..3ed54ffd8d50dfa1ccf5b4b539c5461fae29c4fb 100644 (file)
@@ -545,7 +545,6 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
 
        err = -ENOENT;
 
-       rcu_read_lock();
        t = ipip6_tunnel_lookup(dev_net(skb->dev),
                                skb->dev,
                                iph->daddr,
@@ -579,7 +578,6 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
                t->err_count = 1;
        t->err_time = jiffies;
 out:
-       rcu_read_unlock();
        return err;
 }
 
@@ -599,7 +597,6 @@ static int ipip6_rcv(struct sk_buff *skb)
 
        iph = ip_hdr(skb);
 
-       rcu_read_lock();
        tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
                                     iph->saddr, iph->daddr);
        if (tunnel != NULL) {
@@ -615,7 +612,6 @@ static int ipip6_rcv(struct sk_buff *skb)
                if ((tunnel->dev->priv_flags & IFF_ISATAP) &&
                    !isatap_chksrc(skb, iph, tunnel)) {
                        tunnel->dev->stats.rx_errors++;
-                       rcu_read_unlock();
                        kfree_skb(skb);
                        return 0;
                }
@@ -630,12 +626,10 @@ static int ipip6_rcv(struct sk_buff *skb)
 
                netif_rx(skb);
 
-               rcu_read_unlock();
                return 0;
        }
 
        /* no tunnel matched,  let upstream know, ipsec may handle it */
-       rcu_read_unlock();
        return 1;
 out:
        kfree_skb(skb);
index bb46061c813a45c1ef859f5c60c2c5e45773ed29..182ab9a85d6cb5c0ad88e89bab6f5c0d22c380e8 100644 (file)
@@ -190,6 +190,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
        ireq = inet_rsk(req);
        ireq6 = inet6_rsk(req);
        treq = tcp_rsk(req);
+       treq->listener = NULL;
 
        if (security_inet_conn_request(sk, skb, req))
                goto out_free;
index 342ec62cdbde9f7b3a3e47343d158e7cf9ba2564..49c890386ce9ba6b76e401191d7d64484da7a238 100644 (file)
@@ -476,7 +476,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
        if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
                goto done;
 
-       skb = tcp_make_synack(sk, dst, req, rvp);
+       skb = tcp_make_synack(sk, dst, req, rvp, NULL);
 
        if (skb) {
                __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
@@ -763,6 +763,8 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
                                         struct sk_buff *skb)
 {
        const struct ipv6hdr *iph = skb_gro_network_header(skb);
+       __wsum wsum;
+       __sum16 sum;
 
        switch (skb->ip_summed) {
        case CHECKSUM_COMPLETE:
@@ -771,11 +773,23 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                        break;
                }
-
-               /* fall through */
-       case CHECKSUM_NONE:
+flush:
                NAPI_GRO_CB(skb)->flush = 1;
                return NULL;
+
+       case CHECKSUM_NONE:
+               wsum = ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
+                                                   skb_gro_len(skb),
+                                                   IPPROTO_TCP, 0));
+               sum = csum_fold(skb_checksum(skb,
+                                            skb_gro_offset(skb),
+                                            skb_gro_len(skb),
+                                            wsum));
+               if (sum)
+                       goto flush;
+
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+               break;
        }
 
        return tcp_gro_receive(head, skb);
@@ -988,7 +1002,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
                                   &ipv6_hdr(skb)->saddr,
                                   &ipv6_hdr(skb)->daddr, inet6_iif(skb));
        if (req)
-               return tcp_check_req(sk, skb, req, prev);
+               return tcp_check_req(sk, skb, req, prev, false);
 
        nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
                        &ipv6_hdr(skb)->saddr, th->source,
@@ -1169,7 +1183,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        }
 have_isn:
        tcp_rsk(req)->snt_isn = isn;
-       tcp_rsk(req)->snt_synack = tcp_time_stamp;
 
        if (security_inet_conn_request(sk, skb, req))
                goto drop_and_release;
@@ -1180,6 +1193,8 @@ have_isn:
            want_cookie)
                goto drop_and_free;
 
+       tcp_rsk(req)->snt_synack = tcp_time_stamp;
+       tcp_rsk(req)->listener = NULL;
        inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
        return 0;
 
@@ -1347,9 +1362,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
                newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
 
        tcp_initialize_rcv_mss(newsk);
-       if (tcp_rsk(req)->snt_synack)
-               tcp_valid_rtt_meas(newsk,
-                   tcp_time_stamp - tcp_rsk(req)->snt_synack);
+       tcp_synack_rtt_meas(newsk, req);
        newtp->total_retrans = req->retrans;
 
        newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
@@ -1901,7 +1914,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
                   tp->write_seq-tp->snd_una,
                   (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
                   timer_active,
-                  jiffies_to_clock_t(timer_expires - jiffies),
+                  jiffies_delta_to_clock_t(timer_expires - jiffies),
                   icsk->icsk_retransmits,
                   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
                   icsk->icsk_probes_out,
@@ -1921,10 +1934,7 @@ static void get_timewait6_sock(struct seq_file *seq,
        const struct in6_addr *dest, *src;
        __u16 destp, srcp;
        const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
-       int ttd = tw->tw_ttd - jiffies;
-
-       if (ttd < 0)
-               ttd = 0;
+       long delta = tw->tw_ttd - jiffies;
 
        dest = &tw6->tw_v6_daddr;
        src  = &tw6->tw_v6_rcv_saddr;
@@ -1940,7 +1950,7 @@ static void get_timewait6_sock(struct seq_file *seq,
                   dest->s6_addr32[0], dest->s6_addr32[1],
                   dest->s6_addr32[2], dest->s6_addr32[3], destp,
                   tw->tw_substate, 0, 0,
-                  3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
+                  3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
                   atomic_read(&tw->tw_refcnt), tw);
 }
 
index 6c7c4b92e4f8ec0e5a2aad62b33a6ada0d813c0e..c32971269280116543c0bf560e1bbc3248df034d 100644 (file)
@@ -100,7 +100,7 @@ static int irda_nl_get_mode(struct sk_buff *skb, struct genl_info *info)
                goto err_out;
        }
 
-       hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
+       hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
                          &irda_nl_family, 0,  IRDA_NL_CMD_GET_MODE);
        if (hdr == NULL) {
                ret = -EMSGSIZE;
index 0481d4b514764ddf629ccf91615646c1c3fd6f34..08897a3c7ec764550d518777811fa8e3179a9492 100644 (file)
@@ -54,7 +54,7 @@ struct pfkey_sock {
 
        struct {
                uint8_t         msg_version;
-               uint32_t        msg_pid;
+               uint32_t        msg_portid;
                int             (*dump)(struct pfkey_sock *sk);
                void            (*done)(struct pfkey_sock *sk);
                union {
@@ -1447,7 +1447,7 @@ static int key_notify_sa(struct xfrm_state *x, const struct km_event *c)
        hdr->sadb_msg_errno = 0;
        hdr->sadb_msg_reserved = 0;
        hdr->sadb_msg_seq = c->seq;
-       hdr->sadb_msg_pid = c->pid;
+       hdr->sadb_msg_pid = c->portid;
 
        pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x));
 
@@ -1486,7 +1486,7 @@ static int pfkey_add(struct sock *sk, struct sk_buff *skb, const struct sadb_msg
        else
                c.event = XFRM_MSG_UPDSA;
        c.seq = hdr->sadb_msg_seq;
-       c.pid = hdr->sadb_msg_pid;
+       c.portid = hdr->sadb_msg_pid;
        km_state_notify(x, &c);
 out:
        xfrm_state_put(x);
@@ -1523,7 +1523,7 @@ static int pfkey_delete(struct sock *sk, struct sk_buff *skb, const struct sadb_
                goto out;
 
        c.seq = hdr->sadb_msg_seq;
-       c.pid = hdr->sadb_msg_pid;
+       c.portid = hdr->sadb_msg_pid;
        c.event = XFRM_MSG_DELSA;
        km_state_notify(x, &c);
 out:
@@ -1701,7 +1701,7 @@ static int key_notify_sa_flush(const struct km_event *c)
        hdr->sadb_msg_satype = pfkey_proto2satype(c->data.proto);
        hdr->sadb_msg_type = SADB_FLUSH;
        hdr->sadb_msg_seq = c->seq;
-       hdr->sadb_msg_pid = c->pid;
+       hdr->sadb_msg_pid = c->portid;
        hdr->sadb_msg_version = PF_KEY_V2;
        hdr->sadb_msg_errno = (uint8_t) 0;
        hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
@@ -1736,7 +1736,7 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_m
 
        c.data.proto = proto;
        c.seq = hdr->sadb_msg_seq;
-       c.pid = hdr->sadb_msg_pid;
+       c.portid = hdr->sadb_msg_pid;
        c.event = XFRM_MSG_FLUSHSA;
        c.net = net;
        km_state_notify(NULL, &c);
@@ -1764,7 +1764,7 @@ static int dump_sa(struct xfrm_state *x, int count, void *ptr)
        out_hdr->sadb_msg_errno = 0;
        out_hdr->sadb_msg_reserved = 0;
        out_hdr->sadb_msg_seq = count + 1;
-       out_hdr->sadb_msg_pid = pfk->dump.msg_pid;
+       out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
 
        if (pfk->dump.skb)
                pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
@@ -1798,7 +1798,7 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
                return -EINVAL;
 
        pfk->dump.msg_version = hdr->sadb_msg_version;
-       pfk->dump.msg_pid = hdr->sadb_msg_pid;
+       pfk->dump.msg_portid = hdr->sadb_msg_pid;
        pfk->dump.dump = pfkey_dump_sa;
        pfk->dump.done = pfkey_dump_sa_done;
        xfrm_state_walk_init(&pfk->dump.u.state, proto);
@@ -1923,6 +1923,9 @@ parse_ipsecrequests(struct xfrm_policy *xp, struct sadb_x_policy *pol)
        int len = pol->sadb_x_policy_len*8 - sizeof(struct sadb_x_policy);
        struct sadb_x_ipsecrequest *rq = (void*)(pol+1);
 
+       if (pol->sadb_x_policy_len * 8 < sizeof(struct sadb_x_policy))
+               return -EINVAL;
+
        while (len >= sizeof(struct sadb_x_ipsecrequest)) {
                if ((err = parse_ipsecrequest(xp, rq)) < 0)
                        return err;
@@ -2157,7 +2160,7 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_ev
                out_hdr->sadb_msg_type = event2poltype(c->event);
        out_hdr->sadb_msg_errno = 0;
        out_hdr->sadb_msg_seq = c->seq;
-       out_hdr->sadb_msg_pid = c->pid;
+       out_hdr->sadb_msg_pid = c->portid;
        pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp));
        return 0;
 
@@ -2272,7 +2275,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_
                c.event = XFRM_MSG_NEWPOLICY;
 
        c.seq = hdr->sadb_msg_seq;
-       c.pid = hdr->sadb_msg_pid;
+       c.portid = hdr->sadb_msg_pid;
 
        km_policy_notify(xp, pol->sadb_x_policy_dir-1, &c);
        xfrm_pol_put(xp);
@@ -2351,7 +2354,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa
                goto out;
 
        c.seq = hdr->sadb_msg_seq;
-       c.pid = hdr->sadb_msg_pid;
+       c.portid = hdr->sadb_msg_pid;
        c.data.byid = 0;
        c.event = XFRM_MSG_DELPOLICY;
        km_policy_notify(xp, pol->sadb_x_policy_dir-1, &c);
@@ -2597,7 +2600,7 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_
                if (err)
                        goto out;
                c.seq = hdr->sadb_msg_seq;
-               c.pid = hdr->sadb_msg_pid;
+               c.portid = hdr->sadb_msg_pid;
                c.data.byid = 1;
                c.event = XFRM_MSG_DELPOLICY;
                km_policy_notify(xp, dir, &c);
@@ -2634,7 +2637,7 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr)
        out_hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
        out_hdr->sadb_msg_errno = 0;
        out_hdr->sadb_msg_seq = count + 1;
-       out_hdr->sadb_msg_pid = pfk->dump.msg_pid;
+       out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
 
        if (pfk->dump.skb)
                pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
@@ -2663,7 +2666,7 @@ static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, const struct sadb
                return -EBUSY;
 
        pfk->dump.msg_version = hdr->sadb_msg_version;
-       pfk->dump.msg_pid = hdr->sadb_msg_pid;
+       pfk->dump.msg_portid = hdr->sadb_msg_pid;
        pfk->dump.dump = pfkey_dump_sp;
        pfk->dump.done = pfkey_dump_sp_done;
        xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN);
@@ -2682,7 +2685,7 @@ static int key_notify_policy_flush(const struct km_event *c)
        hdr = (struct sadb_msg *) skb_put(skb_out, sizeof(struct sadb_msg));
        hdr->sadb_msg_type = SADB_X_SPDFLUSH;
        hdr->sadb_msg_seq = c->seq;
-       hdr->sadb_msg_pid = c->pid;
+       hdr->sadb_msg_pid = c->portid;
        hdr->sadb_msg_version = PF_KEY_V2;
        hdr->sadb_msg_errno = (uint8_t) 0;
        hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
@@ -2711,7 +2714,7 @@ static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, const struct sad
 
        c.data.type = XFRM_POLICY_TYPE_MAIN;
        c.event = XFRM_MSG_FLUSHPOLICY;
-       c.pid = hdr->sadb_msg_pid;
+       c.portid = hdr->sadb_msg_pid;
        c.seq = hdr->sadb_msg_seq;
        c.net = net;
        km_policy_notify(NULL, 0, &c);
@@ -3024,7 +3027,7 @@ static u32 get_acqseq(void)
        return res;
 }
 
-static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *xp, int dir)
+static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *xp)
 {
        struct sk_buff *skb;
        struct sadb_msg *hdr;
@@ -3105,7 +3108,7 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
        pol->sadb_x_policy_len = sizeof(struct sadb_x_policy)/sizeof(uint64_t);
        pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
        pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
-       pol->sadb_x_policy_dir = dir+1;
+       pol->sadb_x_policy_dir = XFRM_POLICY_OUT + 1;
        pol->sadb_x_policy_id = xp->index;
 
        /* Set sadb_comb's. */
index 4b1e71751e1019de005ed9f4f8a9708f2c11f158..147a8fd47a17610dd392a49acee6aaf292c1b5d8 100644 (file)
@@ -4,6 +4,7 @@
 
 menuconfig L2TP
        tristate "Layer Two Tunneling Protocol (L2TP)"
+       depends on (IPV6 || IPV6=n)
        depends on INET
        ---help---
          Layer Two Tunneling Protocol
index 3bfb34aaee293cb697f36ae88a060f3329571214..37b8b8ba31f7395001cd2f36e234d82878bc22c7 100644 (file)
@@ -67,6 +67,7 @@ static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net)
        return net_generic(net, l2tp_eth_net_id);
 }
 
+static struct lock_class_key l2tp_eth_tx_busylock;
 static int l2tp_eth_dev_init(struct net_device *dev)
 {
        struct l2tp_eth *priv = netdev_priv(dev);
@@ -74,7 +75,7 @@ static int l2tp_eth_dev_init(struct net_device *dev)
        priv->dev = dev;
        eth_hw_addr_random(dev);
        memset(&dev->broadcast[0], 0xff, 6);
-
+       dev->qdisc_tx_busylock = &l2tp_eth_tx_busylock;
        return 0;
 }
 
index 6f936358d664cd3a8946317ca879180a937c8b22..6c4cc12c7414f90341513a29da3097f4c9f9ecb4 100644 (file)
@@ -78,7 +78,7 @@ static int l2tp_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
                goto out;
        }
 
-       hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
+       hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
                          &l2tp_nl_family, 0, L2TP_CMD_NOOP);
        if (!hdr) {
                ret = -EMSGSIZE;
@@ -87,7 +87,7 @@ static int l2tp_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
 
        genlmsg_end(msg, hdr);
 
-       return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
+       return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
 
 err_out:
        nlmsg_free(msg);
@@ -235,7 +235,7 @@ out:
        return ret;
 }
 
-static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
+static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int flags,
                               struct l2tp_tunnel *tunnel)
 {
        void *hdr;
@@ -248,7 +248,7 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
        struct l2tp_stats stats;
        unsigned int start;
 
-       hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags,
+       hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags,
                          L2TP_CMD_TUNNEL_GET);
        if (!hdr)
                return -EMSGSIZE;
@@ -359,12 +359,12 @@ static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info)
                goto out;
        }
 
-       ret = l2tp_nl_tunnel_send(msg, info->snd_pid, info->snd_seq,
+       ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq,
                                  NLM_F_ACK, tunnel);
        if (ret < 0)
                goto err_out;
 
-       return genlmsg_unicast(net, msg, info->snd_pid);
+       return genlmsg_unicast(net, msg, info->snd_portid);
 
 err_out:
        nlmsg_free(msg);
@@ -384,7 +384,7 @@ static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback
                if (tunnel == NULL)
                        goto out;
 
-               if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).pid,
+               if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).portid,
                                        cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                        tunnel) <= 0)
                        goto out;
@@ -604,7 +604,7 @@ out:
        return ret;
 }
 
-static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
+static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int flags,
                                struct l2tp_session *session)
 {
        void *hdr;
@@ -616,7 +616,7 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags
 
        sk = tunnel->sock;
 
-       hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET);
+       hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET);
        if (!hdr)
                return -EMSGSIZE;
 
@@ -705,12 +705,12 @@ static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info)
                goto out;
        }
 
-       ret = l2tp_nl_session_send(msg, info->snd_pid, info->snd_seq,
+       ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
                                   0, session);
        if (ret < 0)
                goto err_out;
 
-       return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
+       return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
 
 err_out:
        nlmsg_free(msg);
@@ -742,7 +742,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
                        continue;
                }
 
-               if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).pid,
+               if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid,
                                         cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                         session) <= 0)
                        break;
index b2f2bac2c2a2397b5fd6f6b79659996270b96377..204a8351efffc86f566e51b27f173c8bdee6c4cb 100644 (file)
 #include <net/llc_s_st.h>
 #include <net/llc_pdu.h>
 
-/**
- * struct llc_station - LLC station component
- *
- * SAP and connection resource manager, one per adapter.
- *
- * @state: state of station
- * @xid_r_count: XID response PDU counter
- * @mac_sa: MAC source address
- * @sap_list: list of related SAPs
- * @ev_q: events entering state mach.
- * @mac_pdu_q: PDUs ready to send to MAC
- */
-struct llc_station {
-       u8                          state;
-       u8                          xid_r_count;
-       struct timer_list           ack_timer;
-       u8                          retry_count;
-       u8                          maximum_retry;
-       struct {
-               struct sk_buff_head list;
-               spinlock_t          lock;
-       } ev_q;
-       struct sk_buff_head         mac_pdu_q;
-};
-
-#define LLC_STATION_ACK_TIME (3 * HZ)
-
-int sysctl_llc_station_ack_timeout = LLC_STATION_ACK_TIME;
-
-/* Types of events (possible values in 'ev->type') */
-#define LLC_STATION_EV_TYPE_SIMPLE     1
-#define LLC_STATION_EV_TYPE_CONDITION  2
-#define LLC_STATION_EV_TYPE_PRIM       3
-#define LLC_STATION_EV_TYPE_PDU                4       /* command/response PDU */
-#define LLC_STATION_EV_TYPE_ACK_TMR    5
-#define LLC_STATION_EV_TYPE_RPT_STATUS 6
-
-/* Events */
-#define LLC_STATION_EV_ENABLE_WITH_DUP_ADDR_CHECK              1
-#define LLC_STATION_EV_ENABLE_WITHOUT_DUP_ADDR_CHECK           2
-#define LLC_STATION_EV_ACK_TMR_EXP_LT_RETRY_CNT_MAX_RETRY      3
-#define LLC_STATION_EV_ACK_TMR_EXP_EQ_RETRY_CNT_MAX_RETRY      4
-#define LLC_STATION_EV_RX_NULL_DSAP_XID_C                      5
-#define LLC_STATION_EV_RX_NULL_DSAP_0_XID_R_XID_R_CNT_EQ       6
-#define LLC_STATION_EV_RX_NULL_DSAP_1_XID_R_XID_R_CNT_EQ       7
-#define LLC_STATION_EV_RX_NULL_DSAP_TEST_C                     8
-#define LLC_STATION_EV_DISABLE_REQ                             9
-
-struct llc_station_state_ev {
-       u8               type;
-       u8               prim;
-       u8               prim_type;
-       u8               reason;
-       struct list_head node; /* node in station->ev_q.list */
-};
-
-static __inline__ struct llc_station_state_ev *
-                                       llc_station_ev(struct sk_buff *skb)
-{
-       return (struct llc_station_state_ev *)skb->cb;
-}
-
-typedef int (*llc_station_ev_t)(struct sk_buff *skb);
-
-#define LLC_STATION_STATE_DOWN         1       /* initial state */
-#define LLC_STATION_STATE_DUP_ADDR_CHK 2
-#define LLC_STATION_STATE_UP           3
-
-#define LLC_NBR_STATION_STATES         3       /* size of state table */
-
-typedef int (*llc_station_action_t)(struct sk_buff *skb);
-
-/* Station component state table structure */
-struct llc_station_state_trans {
-       llc_station_ev_t ev;
-       u8 next_state;
-       llc_station_action_t *ev_actions;
-};
-
-struct llc_station_state {
-       u8 curr_state;
-       struct llc_station_state_trans **transitions;
-};
-
-static struct llc_station llc_main_station;
-
-static int llc_stat_ev_enable_with_dup_addr_check(struct sk_buff *skb)
-{
-       struct llc_station_state_ev *ev = llc_station_ev(skb);
-
-       return ev->type == LLC_STATION_EV_TYPE_SIMPLE &&
-              ev->prim_type ==
-                             LLC_STATION_EV_ENABLE_WITH_DUP_ADDR_CHECK ? 0 : 1;
-}
-
-static int llc_stat_ev_enable_without_dup_addr_check(struct sk_buff *skb)
-{
-       struct llc_station_state_ev *ev = llc_station_ev(skb);
-
-       return ev->type == LLC_STATION_EV_TYPE_SIMPLE &&
-              ev->prim_type ==
-                       LLC_STATION_EV_ENABLE_WITHOUT_DUP_ADDR_CHECK ? 0 : 1;
-}
-
-static int llc_stat_ev_ack_tmr_exp_lt_retry_cnt_max_retry(struct sk_buff *skb)
-{
-       struct llc_station_state_ev *ev = llc_station_ev(skb);
-
-       return ev->type == LLC_STATION_EV_TYPE_ACK_TMR &&
-               llc_main_station.retry_count <
-               llc_main_station.maximum_retry ? 0 : 1;
-}
-
-static int llc_stat_ev_ack_tmr_exp_eq_retry_cnt_max_retry(struct sk_buff *skb)
-{
-       struct llc_station_state_ev *ev = llc_station_ev(skb);
-
-       return ev->type == LLC_STATION_EV_TYPE_ACK_TMR &&
-               llc_main_station.retry_count ==
-               llc_main_station.maximum_retry ? 0 : 1;
-}
-
 static int llc_stat_ev_rx_null_dsap_xid_c(struct sk_buff *skb)
 {
-       struct llc_station_state_ev *ev = llc_station_ev(skb);
        struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
 
-       return ev->type == LLC_STATION_EV_TYPE_PDU &&
-              LLC_PDU_IS_CMD(pdu) &&                   /* command PDU */
+       return LLC_PDU_IS_CMD(pdu) &&                   /* command PDU */
               LLC_PDU_TYPE_IS_U(pdu) &&                /* U type PDU */
               LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_XID &&
               !pdu->dsap ? 0 : 1;                      /* NULL DSAP value */
 }
 
-static int llc_stat_ev_rx_null_dsap_0_xid_r_xid_r_cnt_eq(struct sk_buff *skb)
-{
-       struct llc_station_state_ev *ev = llc_station_ev(skb);
-       struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
-
-       return ev->type == LLC_STATION_EV_TYPE_PDU &&
-              LLC_PDU_IS_RSP(pdu) &&                   /* response PDU */
-              LLC_PDU_TYPE_IS_U(pdu) &&                /* U type PDU */
-              LLC_U_PDU_RSP(pdu) == LLC_1_PDU_CMD_XID &&
-              !pdu->dsap &&                            /* NULL DSAP value */
-              !llc_main_station.xid_r_count ? 0 : 1;
-}
-
-static int llc_stat_ev_rx_null_dsap_1_xid_r_xid_r_cnt_eq(struct sk_buff *skb)
-{
-       struct llc_station_state_ev *ev = llc_station_ev(skb);
-       struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
-
-       return ev->type == LLC_STATION_EV_TYPE_PDU &&
-              LLC_PDU_IS_RSP(pdu) &&                   /* response PDU */
-              LLC_PDU_TYPE_IS_U(pdu) &&                /* U type PDU */
-              LLC_U_PDU_RSP(pdu) == LLC_1_PDU_CMD_XID &&
-              !pdu->dsap &&                            /* NULL DSAP value */
-              llc_main_station.xid_r_count == 1 ? 0 : 1;
-}
-
 static int llc_stat_ev_rx_null_dsap_test_c(struct sk_buff *skb)
 {
-       struct llc_station_state_ev *ev = llc_station_ev(skb);
        struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
 
-       return ev->type == LLC_STATION_EV_TYPE_PDU &&
-              LLC_PDU_IS_CMD(pdu) &&                   /* command PDU */
+       return LLC_PDU_IS_CMD(pdu) &&                   /* command PDU */
               LLC_PDU_TYPE_IS_U(pdu) &&                /* U type PDU */
               LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_TEST &&
               !pdu->dsap ? 0 : 1;                      /* NULL DSAP */
 }
 
-static int llc_stat_ev_disable_req(struct sk_buff *skb)
-{
-       struct llc_station_state_ev *ev = llc_station_ev(skb);
-
-       return ev->type == LLC_STATION_EV_TYPE_PRIM &&
-              ev->prim == LLC_DISABLE_PRIM &&
-              ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1;
-}
-
-/**
- *     llc_station_send_pdu - queues PDU to send
- *     @skb: Address of the PDU
- *
- *     Queues a PDU to send to the MAC layer.
- */
-static void llc_station_send_pdu(struct sk_buff *skb)
-{
-       skb_queue_tail(&llc_main_station.mac_pdu_q, skb);
-       while ((skb = skb_dequeue(&llc_main_station.mac_pdu_q)) != NULL)
-               if (dev_queue_xmit(skb))
-                       break;
-}
-
-static int llc_station_ac_start_ack_timer(struct sk_buff *skb)
-{
-       mod_timer(&llc_main_station.ack_timer,
-                 jiffies + sysctl_llc_station_ack_timeout);
-       return 0;
-}
-
-static int llc_station_ac_set_retry_cnt_0(struct sk_buff *skb)
-{
-       llc_main_station.retry_count = 0;
-       return 0;
-}
-
-static int llc_station_ac_inc_retry_cnt_by_1(struct sk_buff *skb)
-{
-       llc_main_station.retry_count++;
-       return 0;
-}
-
-static int llc_station_ac_set_xid_r_cnt_0(struct sk_buff *skb)
-{
-       llc_main_station.xid_r_count = 0;
-       return 0;
-}
-
-static int llc_station_ac_inc_xid_r_cnt_by_1(struct sk_buff *skb)
-{
-       llc_main_station.xid_r_count++;
-       return 0;
-}
-
-static int llc_station_ac_send_null_dsap_xid_c(struct sk_buff *skb)
-{
-       int rc = 1;
-       struct sk_buff *nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U,
-                                              sizeof(struct llc_xid_info));
-
-       if (!nskb)
-               goto out;
-       llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, 0, 0, LLC_PDU_CMD);
-       llc_pdu_init_as_xid_cmd(nskb, LLC_XID_NULL_CLASS_2, 127);
-       rc = llc_mac_hdr_init(nskb, skb->dev->dev_addr, skb->dev->dev_addr);
-       if (unlikely(rc))
-               goto free;
-       llc_station_send_pdu(nskb);
-out:
-       return rc;
-free:
-       kfree_skb(nskb);
-       goto out;
-}
-
 static int llc_station_ac_send_xid_r(struct sk_buff *skb)
 {
        u8 mac_da[ETH_ALEN], dsap;
@@ -289,7 +62,7 @@ static int llc_station_ac_send_xid_r(struct sk_buff *skb)
        rc = llc_mac_hdr_init(nskb, skb->dev->dev_addr, mac_da);
        if (unlikely(rc))
                goto free;
-       llc_station_send_pdu(nskb);
+       dev_queue_xmit(nskb);
 out:
        return rc;
 free:
@@ -318,7 +91,7 @@ static int llc_station_ac_send_test_r(struct sk_buff *skb)
        rc = llc_mac_hdr_init(nskb, skb->dev->dev_addr, mac_da);
        if (unlikely(rc))
                goto free;
-       llc_station_send_pdu(nskb);
+       dev_queue_xmit(nskb);
 out:
        return rc;
 free:
@@ -326,352 +99,6 @@ free:
        goto out;
 }
 
-static int llc_station_ac_report_status(struct sk_buff *skb)
-{
-       return 0;
-}
-
-/* COMMON STATION STATE transitions */
-
-/* dummy last-transition indicator; common to all state transition groups
- * last entry for this state
- * all members are zeros, .bss zeroes it
- */
-static struct llc_station_state_trans llc_stat_state_trans_end;
-
-/* DOWN STATE transitions */
-
-/* state transition for LLC_STATION_EV_ENABLE_WITH_DUP_ADDR_CHECK event */
-static llc_station_action_t llc_stat_down_state_actions_1[] = {
-       [0] = llc_station_ac_start_ack_timer,
-       [1] = llc_station_ac_set_retry_cnt_0,
-       [2] = llc_station_ac_set_xid_r_cnt_0,
-       [3] = llc_station_ac_send_null_dsap_xid_c,
-       [4] = NULL,
-};
-
-static struct llc_station_state_trans llc_stat_down_state_trans_1 = {
-       .ev         = llc_stat_ev_enable_with_dup_addr_check,
-       .next_state = LLC_STATION_STATE_DUP_ADDR_CHK,
-       .ev_actions = llc_stat_down_state_actions_1,
-};
-
-/* state transition for LLC_STATION_EV_ENABLE_WITHOUT_DUP_ADDR_CHECK event */
-static llc_station_action_t llc_stat_down_state_actions_2[] = {
-       [0] = llc_station_ac_report_status,     /* STATION UP */
-       [1] = NULL,
-};
-
-static struct llc_station_state_trans llc_stat_down_state_trans_2 = {
-       .ev         = llc_stat_ev_enable_without_dup_addr_check,
-       .next_state = LLC_STATION_STATE_UP,
-       .ev_actions = llc_stat_down_state_actions_2,
-};
-
-/* array of pointers; one to each transition */
-static struct llc_station_state_trans *llc_stat_dwn_state_trans[] = {
-       [0] = &llc_stat_down_state_trans_1,
-       [1] = &llc_stat_down_state_trans_2,
-       [2] = &llc_stat_state_trans_end,
-};
-
-/* UP STATE transitions */
-/* state transition for LLC_STATION_EV_DISABLE_REQ event */
-static llc_station_action_t llc_stat_up_state_actions_1[] = {
-       [0] = llc_station_ac_report_status,     /* STATION DOWN */
-       [1] = NULL,
-};
-
-static struct llc_station_state_trans llc_stat_up_state_trans_1 = {
-       .ev         = llc_stat_ev_disable_req,
-       .next_state = LLC_STATION_STATE_DOWN,
-       .ev_actions = llc_stat_up_state_actions_1,
-};
-
-/* state transition for LLC_STATION_EV_RX_NULL_DSAP_XID_C event */
-static llc_station_action_t llc_stat_up_state_actions_2[] = {
-       [0] = llc_station_ac_send_xid_r,
-       [1] = NULL,
-};
-
-static struct llc_station_state_trans llc_stat_up_state_trans_2 = {
-       .ev         = llc_stat_ev_rx_null_dsap_xid_c,
-       .next_state = LLC_STATION_STATE_UP,
-       .ev_actions = llc_stat_up_state_actions_2,
-};
-
-/* state transition for LLC_STATION_EV_RX_NULL_DSAP_TEST_C event */
-static llc_station_action_t llc_stat_up_state_actions_3[] = {
-       [0] = llc_station_ac_send_test_r,
-       [1] = NULL,
-};
-
-static struct llc_station_state_trans llc_stat_up_state_trans_3 = {
-       .ev         = llc_stat_ev_rx_null_dsap_test_c,
-       .next_state = LLC_STATION_STATE_UP,
-       .ev_actions = llc_stat_up_state_actions_3,
-};
-
-/* array of pointers; one to each transition */
-static struct llc_station_state_trans *llc_stat_up_state_trans [] = {
-       [0] = &llc_stat_up_state_trans_1,
-       [1] = &llc_stat_up_state_trans_2,
-       [2] = &llc_stat_up_state_trans_3,
-       [3] = &llc_stat_state_trans_end,
-};
-
-/* DUP ADDR CHK STATE transitions */
-/* state transition for LLC_STATION_EV_RX_NULL_DSAP_0_XID_R_XID_R_CNT_EQ
- * event
- */
-static llc_station_action_t llc_stat_dupaddr_state_actions_1[] = {
-       [0] = llc_station_ac_inc_xid_r_cnt_by_1,
-       [1] = NULL,
-};
-
-static struct llc_station_state_trans llc_stat_dupaddr_state_trans_1 = {
-       .ev         = llc_stat_ev_rx_null_dsap_0_xid_r_xid_r_cnt_eq,
-       .next_state = LLC_STATION_STATE_DUP_ADDR_CHK,
-       .ev_actions = llc_stat_dupaddr_state_actions_1,
-};
-
-/* state transition for LLC_STATION_EV_RX_NULL_DSAP_1_XID_R_XID_R_CNT_EQ
- * event
- */
-static llc_station_action_t llc_stat_dupaddr_state_actions_2[] = {
-       [0] = llc_station_ac_report_status,     /* DUPLICATE ADDRESS FOUND */
-       [1] = NULL,
-};
-
-static struct llc_station_state_trans llc_stat_dupaddr_state_trans_2 = {
-       .ev         = llc_stat_ev_rx_null_dsap_1_xid_r_xid_r_cnt_eq,
-       .next_state = LLC_STATION_STATE_DOWN,
-       .ev_actions = llc_stat_dupaddr_state_actions_2,
-};
-
-/* state transition for LLC_STATION_EV_RX_NULL_DSAP_XID_C event */
-static llc_station_action_t llc_stat_dupaddr_state_actions_3[] = {
-       [0] = llc_station_ac_send_xid_r,
-       [1] = NULL,
-};
-
-static struct llc_station_state_trans llc_stat_dupaddr_state_trans_3 = {
-       .ev         = llc_stat_ev_rx_null_dsap_xid_c,
-       .next_state = LLC_STATION_STATE_DUP_ADDR_CHK,
-       .ev_actions = llc_stat_dupaddr_state_actions_3,
-};
-
-/* state transition for LLC_STATION_EV_ACK_TMR_EXP_LT_RETRY_CNT_MAX_RETRY
- * event
- */
-static llc_station_action_t llc_stat_dupaddr_state_actions_4[] = {
-       [0] = llc_station_ac_start_ack_timer,
-       [1] = llc_station_ac_inc_retry_cnt_by_1,
-       [2] = llc_station_ac_set_xid_r_cnt_0,
-       [3] = llc_station_ac_send_null_dsap_xid_c,
-       [4] = NULL,
-};
-
-static struct llc_station_state_trans llc_stat_dupaddr_state_trans_4 = {
-       .ev         = llc_stat_ev_ack_tmr_exp_lt_retry_cnt_max_retry,
-       .next_state = LLC_STATION_STATE_DUP_ADDR_CHK,
-       .ev_actions = llc_stat_dupaddr_state_actions_4,
-};
-
-/* state transition for LLC_STATION_EV_ACK_TMR_EXP_EQ_RETRY_CNT_MAX_RETRY
- * event
- */
-static llc_station_action_t llc_stat_dupaddr_state_actions_5[] = {
-       [0] = llc_station_ac_report_status,     /* STATION UP */
-       [1] = NULL,
-};
-
-static struct llc_station_state_trans llc_stat_dupaddr_state_trans_5 = {
-       .ev         = llc_stat_ev_ack_tmr_exp_eq_retry_cnt_max_retry,
-       .next_state = LLC_STATION_STATE_UP,
-       .ev_actions = llc_stat_dupaddr_state_actions_5,
-};
-
-/* state transition for LLC_STATION_EV_DISABLE_REQ event */
-static llc_station_action_t llc_stat_dupaddr_state_actions_6[] = {
-       [0] = llc_station_ac_report_status,     /* STATION DOWN */
-       [1] = NULL,
-};
-
-static struct llc_station_state_trans llc_stat_dupaddr_state_trans_6 = {
-       .ev         = llc_stat_ev_disable_req,
-       .next_state = LLC_STATION_STATE_DOWN,
-       .ev_actions = llc_stat_dupaddr_state_actions_6,
-};
-
-/* array of pointers; one to each transition */
-static struct llc_station_state_trans *llc_stat_dupaddr_state_trans[] = {
-       [0] = &llc_stat_dupaddr_state_trans_6,  /* Request */
-       [1] = &llc_stat_dupaddr_state_trans_4,  /* Timer */
-       [2] = &llc_stat_dupaddr_state_trans_5,
-       [3] = &llc_stat_dupaddr_state_trans_1,  /* Receive frame */
-       [4] = &llc_stat_dupaddr_state_trans_2,
-       [5] = &llc_stat_dupaddr_state_trans_3,
-       [6] = &llc_stat_state_trans_end,
-};
-
-static struct llc_station_state
-                       llc_station_state_table[LLC_NBR_STATION_STATES] = {
-       [LLC_STATION_STATE_DOWN - 1] = {
-               .curr_state  = LLC_STATION_STATE_DOWN,
-               .transitions = llc_stat_dwn_state_trans,
-       },
-       [LLC_STATION_STATE_DUP_ADDR_CHK - 1] = {
-               .curr_state  = LLC_STATION_STATE_DUP_ADDR_CHK,
-               .transitions = llc_stat_dupaddr_state_trans,
-       },
-       [LLC_STATION_STATE_UP - 1] = {
-               .curr_state  = LLC_STATION_STATE_UP,
-               .transitions = llc_stat_up_state_trans,
-       },
-};
-
-/**
- *     llc_exec_station_trans_actions - executes actions for transition
- *     @trans: Address of the transition
- *     @skb: Address of the event that caused the transition
- *
- *     Executes actions of a transition of the station state machine. Returns
- *     0 if all actions complete successfully, nonzero otherwise.
- */
-static u16 llc_exec_station_trans_actions(struct llc_station_state_trans *trans,
-                                         struct sk_buff *skb)
-{
-       u16 rc = 0;
-       llc_station_action_t *next_action = trans->ev_actions;
-
-       for (; next_action && *next_action; next_action++)
-               if ((*next_action)(skb))
-                       rc = 1;
-       return rc;
-}
-
-/**
- *     llc_find_station_trans - finds transition for this event
- *     @skb: Address of the event
- *
- *     Search thru events of the current state of the station until list
- *     exhausted or it's obvious that the event is not valid for the current
- *     state. Returns the address of the transition if cound, %NULL otherwise.
- */
-static struct llc_station_state_trans *
-                               llc_find_station_trans(struct sk_buff *skb)
-{
-       int i = 0;
-       struct llc_station_state_trans *rc = NULL;
-       struct llc_station_state_trans **next_trans;
-       struct llc_station_state *curr_state =
-                               &llc_station_state_table[llc_main_station.state - 1];
-
-       for (next_trans = curr_state->transitions; next_trans[i]->ev; i++)
-               if (!next_trans[i]->ev(skb)) {
-                       rc = next_trans[i];
-                       break;
-               }
-       return rc;
-}
-
-/**
- *     llc_station_free_ev - frees an event
- *     @skb: Address of the event
- *
- *     Frees an event.
- */
-static void llc_station_free_ev(struct sk_buff *skb)
-{
-       struct llc_station_state_ev *ev = llc_station_ev(skb);
-
-       if (ev->type == LLC_STATION_EV_TYPE_PDU)
-               kfree_skb(skb);
-}
-
-/**
- *     llc_station_next_state - processes event and goes to the next state
- *     @skb: Address of the event
- *
- *     Processes an event, executes any transitions related to that event and
- *     updates the state of the station.
- */
-static u16 llc_station_next_state(struct sk_buff *skb)
-{
-       u16 rc = 1;
-       struct llc_station_state_trans *trans;
-
-       if (llc_main_station.state > LLC_NBR_STATION_STATES)
-               goto out;
-       trans = llc_find_station_trans(skb);
-       if (trans) {
-               /* got the state to which we next transition; perform the
-                * actions associated with this transition before actually
-                * transitioning to the next state
-                */
-               rc = llc_exec_station_trans_actions(trans, skb);
-               if (!rc)
-                       /* transition station to next state if all actions
-                        * execute successfully; done; wait for next event
-                        */
-                       llc_main_station.state = trans->next_state;
-       } else
-               /* event not recognized in current state; re-queue it for
-                * processing again at a later time; return failure
-                */
-               rc = 0;
-out:
-       llc_station_free_ev(skb);
-       return rc;
-}
-
-/**
- *     llc_station_service_events - service events in the queue
- *
- *     Get an event from the station event queue (if any); attempt to service
- *     the event; if event serviced, get the next event (if any) on the event
- *     queue; if event not service, re-queue the event on the event queue and
- *     attempt to service the next event; when serviced all events in queue,
- *     finished; if don't transition to different state, just service all
- *     events once; if transition to new state, service all events again.
- *     Caller must hold llc_main_station.ev_q.lock.
- */
-static void llc_station_service_events(void)
-{
-       struct sk_buff *skb;
-
-       while ((skb = skb_dequeue(&llc_main_station.ev_q.list)) != NULL)
-               llc_station_next_state(skb);
-}
-
-/**
- *     llc_station_state_process - queue event and try to process queue.
- *     @skb: Address of the event
- *
- *     Queues an event (on the station event queue) for handling by the
- *     station state machine and attempts to process any queued-up events.
- */
-static void llc_station_state_process(struct sk_buff *skb)
-{
-       spin_lock_bh(&llc_main_station.ev_q.lock);
-       skb_queue_tail(&llc_main_station.ev_q.list, skb);
-       llc_station_service_events();
-       spin_unlock_bh(&llc_main_station.ev_q.lock);
-}
-
-static void llc_station_ack_tmr_cb(unsigned long timeout_data)
-{
-       struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC);
-
-       if (skb) {
-               struct llc_station_state_ev *ev = llc_station_ev(skb);
-
-               ev->type = LLC_STATION_EV_TYPE_ACK_TMR;
-               llc_station_state_process(skb);
-       }
-}
-
 /**
  *     llc_station_rcv - send received pdu to the station state machine
  *     @skb: received frame.
@@ -680,24 +107,15 @@ static void llc_station_ack_tmr_cb(unsigned long timeout_data)
  */
 static void llc_station_rcv(struct sk_buff *skb)
 {
-       struct llc_station_state_ev *ev = llc_station_ev(skb);
-
-       ev->type   = LLC_STATION_EV_TYPE_PDU;
-       ev->reason = 0;
-       llc_station_state_process(skb);
+       if (llc_stat_ev_rx_null_dsap_xid_c(skb))
+               llc_station_ac_send_xid_r(skb);
+       else if (llc_stat_ev_rx_null_dsap_test_c(skb))
+               llc_station_ac_send_test_r(skb);
+       kfree_skb(skb);
 }
 
 void __init llc_station_init(void)
 {
-       skb_queue_head_init(&llc_main_station.mac_pdu_q);
-       skb_queue_head_init(&llc_main_station.ev_q.list);
-       spin_lock_init(&llc_main_station.ev_q.lock);
-       setup_timer(&llc_main_station.ack_timer, llc_station_ack_tmr_cb,
-                       (unsigned long)&llc_main_station);
-       llc_main_station.ack_timer.expires  = jiffies +
-                                               sysctl_llc_station_ack_timeout;
-       llc_main_station.maximum_retry  = 1;
-       llc_main_station.state          = LLC_STATION_STATE_UP;
        llc_set_station_handler(llc_station_rcv);
 }
 
index d75306b9c2f3e80d5fe0ed90d511d646b8a0728c..612a5ddaf93b1ab1b5a524c5efef8d6b1f769038 100644 (file)
@@ -47,13 +47,6 @@ static struct ctl_table llc2_timeout_table[] = {
 };
 
 static struct ctl_table llc_station_table[] = {
-       {
-               .procname       = "ack_timeout",
-               .data           = &sysctl_llc_station_ack_timeout,
-               .maxlen         = sizeof(long),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
        { },
 };
 
index 8dfd70d8fcfbcce247670d102a21a4d083cb8276..a04752e910239821b1bc3110d078c8d81349d4e2 100644 (file)
@@ -38,14 +38,10 @@ static void gf_mulx(u8 *pad)
 static void aes_128_cmac_vector(struct crypto_cipher *tfm, size_t num_elem,
                                const u8 *addr[], const size_t *len, u8 *mac)
 {
-       u8 scratch[2 * AES_BLOCK_SIZE];
-       u8 *cbc, *pad;
+       u8 cbc[AES_BLOCK_SIZE], pad[AES_BLOCK_SIZE];
        const u8 *pos, *end;
        size_t i, e, left, total_len;
 
-       cbc = scratch;
-       pad = scratch + AES_BLOCK_SIZE;
-
        memset(cbc, 0, AES_BLOCK_SIZE);
 
        total_len = 0;
index d0deb3edae21fe4a1fc3cbbc7773e742945197f4..3195a6307f50eeb5e6715a6db0fbecc4fee2f4d5 100644 (file)
@@ -869,7 +869,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
 
        } else {
                ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR,
-                                               true);
+                                               false);
        }
 
  out:
index a58c0b649ba137b09214c031bf3508b5fe2974eb..05f3a313db8852b36c677cad188fd7154d6564ef 100644 (file)
@@ -20,7 +20,8 @@
 #include "rate.h"
 #include "mesh.h"
 
-static struct wireless_dev *ieee80211_add_iface(struct wiphy *wiphy, char *name,
+static struct wireless_dev *ieee80211_add_iface(struct wiphy *wiphy,
+                                               const char *name,
                                                enum nl80211_iftype type,
                                                u32 *flags,
                                                struct vif_params *params)
@@ -102,6 +103,18 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
        return 0;
 }
 
+static int ieee80211_start_p2p_device(struct wiphy *wiphy,
+                                     struct wireless_dev *wdev)
+{
+       return ieee80211_do_open(wdev, true);
+}
+
+static void ieee80211_stop_p2p_device(struct wiphy *wiphy,
+                                     struct wireless_dev *wdev)
+{
+       ieee80211_sdata_stop(IEEE80211_WDEV_TO_SUB_IF(wdev));
+}
+
 static int ieee80211_set_noack_map(struct wiphy *wiphy,
                                  struct net_device *dev,
                                  u16 noack_map)
@@ -158,6 +171,38 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
                }
        }
 
+       switch (sdata->vif.type) {
+       case NL80211_IFTYPE_STATION:
+               if (sdata->u.mgd.mfp != IEEE80211_MFP_DISABLED)
+                       key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
+               break;
+       case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_AP_VLAN:
+               /* Keys without a station are used for TX only */
+               if (key->sta && test_sta_flag(key->sta, WLAN_STA_MFP))
+                       key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               /* no MFP (yet) */
+               break;
+       case NL80211_IFTYPE_MESH_POINT:
+#ifdef CONFIG_MAC80211_MESH
+               if (sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)
+                       key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
+               break;
+#endif
+       case NL80211_IFTYPE_WDS:
+       case NL80211_IFTYPE_MONITOR:
+       case NL80211_IFTYPE_P2P_DEVICE:
+       case NL80211_IFTYPE_UNSPECIFIED:
+       case NUM_NL80211_IFTYPES:
+       case NL80211_IFTYPE_P2P_CLIENT:
+       case NL80211_IFTYPE_P2P_GO:
+               /* shouldn't happen */
+               WARN_ON_ONCE(1);
+               break;
+       }
+
        err = ieee80211_key_link(key, sdata, sta);
        if (err)
                ieee80211_key_free(sdata->local, key);
@@ -330,7 +375,7 @@ static void rate_idx_to_bitrate(struct rate_info *rate, struct sta_info *sta, in
        if (!(rate->flags & RATE_INFO_FLAGS_MCS)) {
                struct ieee80211_supported_band *sband;
                sband = sta->local->hw.wiphy->bands[
-                               sta->local->hw.conf.channel->band];
+                               sta->local->oper_channel->band];
                rate->legacy = sband->bitrates[idx].bitrate;
        } else
                rate->mcs = idx;
@@ -725,25 +770,23 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
 static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
                                    const u8 *resp, size_t resp_len)
 {
-       struct sk_buff *new, *old;
+       struct probe_resp *new, *old;
 
        if (!resp || !resp_len)
                return 1;
 
        old = rtnl_dereference(sdata->u.ap.probe_resp);
 
-       new = dev_alloc_skb(resp_len);
+       new = kzalloc(sizeof(struct probe_resp) + resp_len, GFP_KERNEL);
        if (!new)
                return -ENOMEM;
 
-       memcpy(skb_put(new, resp_len), resp, resp_len);
+       new->len = resp_len;
+       memcpy(new->data, resp, resp_len);
 
        rcu_assign_pointer(sdata->u.ap.probe_resp, new);
-       if (old) {
-               /* TODO: use call_rcu() */
-               synchronize_rcu();
-               dev_kfree_skb(old);
-       }
+       if (old)
+               kfree_rcu(old, rcu_head);
 
        return 0;
 }
@@ -950,7 +993,7 @@ static void ieee80211_send_layer2_update(struct sta_info *sta)
        /* 802.2 Type 1 Logical Link Control (LLC) Exchange Identifier (XID)
         * Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */
 
-       memset(msg->da, 0xff, ETH_ALEN);
+       eth_broadcast_addr(msg->da);
        memcpy(msg->sa, sta->sta.addr, ETH_ALEN);
        msg->len = htons(6);
        msg->dsap = 0;
@@ -1285,9 +1328,10 @@ static int ieee80211_change_station(struct wiphy *wiphy,
        mutex_unlock(&local->sta_mtx);
 
        if (sdata->vif.type == NL80211_IFTYPE_STATION &&
-           params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))
+           params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) {
                ieee80211_recalc_ps(local, -1);
-
+               ieee80211_recalc_ps_vif(sdata);
+       }
        return 0;
 }
 
@@ -1660,7 +1704,7 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
        }
 
        if (!sdata->vif.bss_conf.use_short_slot &&
-           sdata->local->hw.conf.channel->band == IEEE80211_BAND_5GHZ) {
+           sdata->local->oper_channel->band == IEEE80211_BAND_5GHZ) {
                sdata->vif.bss_conf.use_short_slot = true;
                changed |= BSS_CHANGED_ERP_SLOT;
        }
@@ -1774,6 +1818,7 @@ static int ieee80211_scan(struct wiphy *wiphy,
        case NL80211_IFTYPE_ADHOC:
        case NL80211_IFTYPE_MESH_POINT:
        case NL80211_IFTYPE_P2P_CLIENT:
+       case NL80211_IFTYPE_P2P_DEVICE:
                break;
        case NL80211_IFTYPE_P2P_GO:
                if (sdata->local->ops->hw_scan)
@@ -1926,7 +1971,7 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
                                  enum nl80211_tx_power_setting type, int mbm)
 {
        struct ieee80211_local *local = wiphy_priv(wiphy);
-       struct ieee80211_channel *chan = local->hw.conf.channel;
+       struct ieee80211_channel *chan = local->oper_channel;
        u32 changes = 0;
 
        switch (type) {
@@ -2026,9 +2071,7 @@ int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
         */
        if (!sdata->u.mgd.associated ||
            sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT) {
-               mutex_lock(&sdata->local->iflist_mtx);
                ieee80211_recalc_smps(sdata->local);
-               mutex_unlock(&sdata->local->iflist_mtx);
                return 0;
        }
 
@@ -2078,6 +2121,7 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
                ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
 
        ieee80211_recalc_ps(local, -1);
+       ieee80211_recalc_ps_vif(sdata);
 
        return 0;
 }
@@ -2460,6 +2504,9 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
                if (!sdata->u.mgd.associated)
                        need_offchan = true;
                break;
+       case NL80211_IFTYPE_P2P_DEVICE:
+               need_offchan = true;
+               break;
        default:
                return -EOPNOTSUPP;
        }
@@ -2652,6 +2699,7 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
                               u16 status_code, struct sk_buff *skb)
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       struct ieee80211_local *local = sdata->local;
        struct ieee80211_tdls_data *tf;
 
        tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
@@ -2671,8 +2719,10 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
                tf->u.setup_req.capability =
                        cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
 
-               ieee80211_add_srates_ie(sdata, skb, false);
-               ieee80211_add_ext_srates_ie(sdata, skb, false);
+               ieee80211_add_srates_ie(sdata, skb, false,
+                                       local->oper_channel->band);
+               ieee80211_add_ext_srates_ie(sdata, skb, false,
+                                           local->oper_channel->band);
                ieee80211_tdls_add_ext_capab(skb);
                break;
        case WLAN_TDLS_SETUP_RESPONSE:
@@ -2685,8 +2735,10 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
                tf->u.setup_resp.capability =
                        cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
 
-               ieee80211_add_srates_ie(sdata, skb, false);
-               ieee80211_add_ext_srates_ie(sdata, skb, false);
+               ieee80211_add_srates_ie(sdata, skb, false,
+                                       local->oper_channel->band);
+               ieee80211_add_ext_srates_ie(sdata, skb, false,
+                                           local->oper_channel->band);
                ieee80211_tdls_add_ext_capab(skb);
                break;
        case WLAN_TDLS_SETUP_CONFIRM:
@@ -2724,6 +2776,7 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
                           u16 status_code, struct sk_buff *skb)
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       struct ieee80211_local *local = sdata->local;
        struct ieee80211_mgmt *mgmt;
 
        mgmt = (void *)skb_put(skb, 24);
@@ -2746,8 +2799,10 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
                mgmt->u.action.u.tdls_discover_resp.capability =
                        cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
 
-               ieee80211_add_srates_ie(sdata, skb, false);
-               ieee80211_add_ext_srates_ie(sdata, skb, false);
+               ieee80211_add_srates_ie(sdata, skb, false,
+                                       local->oper_channel->band);
+               ieee80211_add_ext_srates_ie(sdata, skb, false,
+                                           local->oper_channel->band);
                ieee80211_tdls_add_ext_capab(skb);
                break;
        default:
@@ -3004,6 +3059,8 @@ struct cfg80211_ops mac80211_config_ops = {
        .add_virtual_intf = ieee80211_add_iface,
        .del_virtual_intf = ieee80211_del_iface,
        .change_virtual_intf = ieee80211_change_iface,
+       .start_p2p_device = ieee80211_start_p2p_device,
+       .stop_p2p_device = ieee80211_stop_p2p_device,
        .add_key = ieee80211_add_key,
        .del_key = ieee80211_del_key,
        .get_key = ieee80211_get_key,
index f0f87e5a1d354eef6a705deba7a15c21b491093f..0bfc914ddd1504d16a2ebf8ad74655d6deb60e53 100644 (file)
@@ -68,16 +68,14 @@ ieee80211_get_channel_mode(struct ieee80211_local *local,
        return mode;
 }
 
-bool ieee80211_set_channel_type(struct ieee80211_local *local,
-                               struct ieee80211_sub_if_data *sdata,
-                               enum nl80211_channel_type chantype)
+static enum nl80211_channel_type
+ieee80211_get_superchan(struct ieee80211_local *local,
+                       struct ieee80211_sub_if_data *sdata)
 {
-       struct ieee80211_sub_if_data *tmp;
        enum nl80211_channel_type superchan = NL80211_CHAN_NO_HT;
-       bool result;
+       struct ieee80211_sub_if_data *tmp;
 
        mutex_lock(&local->iflist_mtx);
-
        list_for_each_entry(tmp, &local->interfaces, list) {
                if (tmp == sdata)
                        continue;
@@ -103,39 +101,70 @@ bool ieee80211_set_channel_type(struct ieee80211_local *local,
                        break;
                }
        }
+       mutex_unlock(&local->iflist_mtx);
 
-       switch (superchan) {
+       return superchan;
+}
+
+static bool
+ieee80211_channel_types_are_compatible(enum nl80211_channel_type chantype1,
+                                      enum nl80211_channel_type chantype2,
+                                      enum nl80211_channel_type *compat)
+{
+       /*
+        * start out with chantype1 being the result,
+        * overwriting later if needed
+        */
+       if (compat)
+               *compat = chantype1;
+
+       switch (chantype1) {
        case NL80211_CHAN_NO_HT:
+               if (compat)
+                       *compat = chantype2;
+               break;
        case NL80211_CHAN_HT20:
                /*
                 * allow any change that doesn't go to no-HT
                 * (if it already is no-HT no change is needed)
                 */
-               if (chantype == NL80211_CHAN_NO_HT)
+               if (chantype2 == NL80211_CHAN_NO_HT)
                        break;
-               superchan = chantype;
+               if (compat)
+                       *compat = chantype2;
                break;
        case NL80211_CHAN_HT40PLUS:
        case NL80211_CHAN_HT40MINUS:
                /* allow smaller bandwidth and same */
-               if (chantype == NL80211_CHAN_NO_HT)
+               if (chantype2 == NL80211_CHAN_NO_HT)
                        break;
-               if (chantype == NL80211_CHAN_HT20)
+               if (chantype2 == NL80211_CHAN_HT20)
                        break;
-               if (superchan == chantype)
+               if (chantype2 == chantype1)
                        break;
-               result = false;
-               goto out;
+               return false;
        }
 
-       local->_oper_channel_type = superchan;
+       return true;
+}
+
+bool ieee80211_set_channel_type(struct ieee80211_local *local,
+                               struct ieee80211_sub_if_data *sdata,
+                               enum nl80211_channel_type chantype)
+{
+       enum nl80211_channel_type superchan;
+       enum nl80211_channel_type compatchan;
+
+       superchan = ieee80211_get_superchan(local, sdata);
+       if (!ieee80211_channel_types_are_compatible(superchan, chantype,
+                                                   &compatchan))
+               return false;
+
+       local->_oper_channel_type = compatchan;
 
        if (sdata)
                sdata->vif.bss_conf.channel_type = chantype;
 
-       result = true;
- out:
-       mutex_unlock(&local->iflist_mtx);
+       return true;
 
-       return result;
 }
index b8dfb440c8ef1ff903e3359e35b041ea9093d358..466f4b45dd94fdf72517d60a5a71e5ba8bbc9daf 100644 (file)
@@ -63,8 +63,6 @@ DEBUGFS_READONLY_FILE(user_power, "%d",
                      local->user_power_level);
 DEBUGFS_READONLY_FILE(power, "%d",
                      local->hw.conf.power_level);
-DEBUGFS_READONLY_FILE(frequency, "%d",
-                     local->hw.conf.channel->center_freq);
 DEBUGFS_READONLY_FILE(total_ps_buffered, "%d",
                      local->total_ps_buffered);
 DEBUGFS_READONLY_FILE(wep_iv, "%#08x",
@@ -72,6 +70,7 @@ DEBUGFS_READONLY_FILE(wep_iv, "%#08x",
 DEBUGFS_READONLY_FILE(rate_ctrl_alg, "%s",
        local->rate_ctrl ? local->rate_ctrl->ops->name : "hw/driver");
 
+#ifdef CONFIG_PM
 static ssize_t reset_write(struct file *file, const char __user *user_buf,
                           size_t count, loff_t *ppos)
 {
@@ -90,33 +89,7 @@ static const struct file_operations reset_ops = {
        .open = simple_open,
        .llseek = noop_llseek,
 };
-
-static ssize_t channel_type_read(struct file *file, char __user *user_buf,
-                      size_t count, loff_t *ppos)
-{
-       struct ieee80211_local *local = file->private_data;
-       const char *buf;
-
-       switch (local->hw.conf.channel_type) {
-       case NL80211_CHAN_NO_HT:
-               buf = "no ht\n";
-               break;
-       case NL80211_CHAN_HT20:
-               buf = "ht20\n";
-               break;
-       case NL80211_CHAN_HT40MINUS:
-               buf = "ht40-\n";
-               break;
-       case NL80211_CHAN_HT40PLUS:
-               buf = "ht40+\n";
-               break;
-       default:
-               buf = "???";
-               break;
-       }
-
-       return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
-}
+#endif
 
 static ssize_t hwflags_read(struct file *file, char __user *user_buf,
                            size_t count, loff_t *ppos)
@@ -205,7 +178,6 @@ static ssize_t queues_read(struct file *file, char __user *user_buf,
 }
 
 DEBUGFS_READONLY_FILE_OPS(hwflags);
-DEBUGFS_READONLY_FILE_OPS(channel_type);
 DEBUGFS_READONLY_FILE_OPS(queues);
 
 /* statistics stuff */
@@ -272,12 +244,12 @@ void debugfs_hw_add(struct ieee80211_local *local)
 
        local->debugfs.keys = debugfs_create_dir("keys", phyd);
 
-       DEBUGFS_ADD(frequency);
        DEBUGFS_ADD(total_ps_buffered);
        DEBUGFS_ADD(wep_iv);
        DEBUGFS_ADD(queues);
+#ifdef CONFIG_PM
        DEBUGFS_ADD_MODE(reset, 0200);
-       DEBUGFS_ADD(channel_type);
+#endif
        DEBUGFS_ADD(hwflags);
        DEBUGFS_ADD(user_power);
        DEBUGFS_ADD(power);
index df9203199102911d23626bd3ecfeb8b72066dc93..da9003b20004227b76f5ad89f04ad4445bf7d0ec 100644 (file)
@@ -9,7 +9,7 @@ static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata)
 {
        WARN(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER),
             "%s:  Failed check-sdata-in-driver check, flags: 0x%x\n",
-            sdata->dev->name, sdata->flags);
+            sdata->dev ? sdata->dev->name : sdata->name, sdata->flags);
 }
 
 static inline struct ieee80211_sub_if_data *
@@ -22,9 +22,11 @@ get_bss_sdata(struct ieee80211_sub_if_data *sdata)
        return sdata;
 }
 
-static inline void drv_tx(struct ieee80211_local *local, struct sk_buff *skb)
+static inline void drv_tx(struct ieee80211_local *local,
+                         struct ieee80211_tx_control *control,
+                         struct sk_buff *skb)
 {
-       local->ops->tx(&local->hw, skb);
+       local->ops->tx(&local->hw, control, skb);
 }
 
 static inline void drv_get_et_strings(struct ieee80211_sub_if_data *sdata,
@@ -526,6 +528,9 @@ static inline void drv_sta_rc_update(struct ieee80211_local *local,
        sdata = get_bss_sdata(sdata);
        check_sdata_in_driver(sdata);
 
+       WARN_ON(changed & IEEE80211_RC_SUPP_RATES_CHANGED &&
+               sdata->vif.type != NL80211_IFTYPE_ADHOC);
+
        trace_drv_sta_rc_update(local, sdata, sta, changed);
        if (local->ops->sta_rc_update)
                local->ops->sta_rc_update(&local->hw, &sdata->vif,
index 5746d62faba1956d5a8690726417275cc4a70ff5..5f3620f0bc0a651257aa53e28b91c4b2114be637 100644 (file)
@@ -109,7 +109,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon));
        mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
                                          IEEE80211_STYPE_PROBE_RESP);
-       memset(mgmt->da, 0xff, ETH_ALEN);
+       eth_broadcast_addr(mgmt->da);
        memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
        memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN);
        mgmt->u.beacon.beacon_int = cpu_to_le16(beacon_int);
@@ -205,7 +205,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        mod_timer(&ifibss->timer,
                  round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
 
-       bss = cfg80211_inform_bss_frame(local->hw.wiphy, local->hw.conf.channel,
+       bss = cfg80211_inform_bss_frame(local->hw.wiphy, chan,
                                        mgmt, skb->len, 0, GFP_KERNEL);
        cfg80211_put_bss(bss);
        netif_carrier_on(sdata->dev);
@@ -278,7 +278,7 @@ static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta,
        if (auth && !sdata->u.ibss.auth_frame_registrations) {
                ibss_dbg(sdata,
                         "TX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=1)\n",
-                        sdata->vif.addr, sdata->u.ibss.bssid, addr);
+                        sdata->vif.addr, addr, sdata->u.ibss.bssid);
                ieee80211_send_auth(sdata, 1, WLAN_AUTH_OPEN, NULL, 0,
                                    addr, sdata->u.ibss.bssid, NULL, 0, 0);
        }
@@ -294,7 +294,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
        struct ieee80211_local *local = sdata->local;
        struct sta_info *sta;
-       int band = local->hw.conf.channel->band;
+       int band = local->oper_channel->band;
 
        /*
         * XXX: Consider removing the least recently used entry and
@@ -332,11 +332,27 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
        return ieee80211_ibss_finish_sta(sta, auth);
 }
 
+static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata,
+                                         struct ieee80211_mgmt *mgmt,
+                                         size_t len)
+{
+       u16 reason = le16_to_cpu(mgmt->u.deauth.reason_code);
+
+       if (len < IEEE80211_DEAUTH_FRAME_LEN)
+               return;
+
+       ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM BSSID=%pM (reason: %d)\n",
+                mgmt->sa, mgmt->da, mgmt->bssid, reason);
+       sta_info_destroy_addr(sdata, mgmt->sa);
+}
+
 static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
                                        struct ieee80211_mgmt *mgmt,
                                        size_t len)
 {
        u16 auth_alg, auth_transaction;
+       struct sta_info *sta;
+       u8 deauth_frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
 
        lockdep_assert_held(&sdata->u.ibss.mtx);
 
@@ -352,9 +368,21 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
                 "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n",
                 mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
        sta_info_destroy_addr(sdata, mgmt->sa);
-       ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, 0, false);
+       sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, 0, false);
        rcu_read_unlock();
 
+       /*
+        * if we have any problem in allocating the new station, we reply with a
+        * DEAUTH frame to tell the other end that we had a problem
+        */
+       if (!sta) {
+               ieee80211_send_deauth_disassoc(sdata, sdata->u.ibss.bssid,
+                                              IEEE80211_STYPE_DEAUTH,
+                                              WLAN_REASON_UNSPECIFIED, true,
+                                              deauth_frame_buf);
+               return;
+       }
+
        /*
         * IEEE 802.11 standard does not require authentication in IBSS
         * networks and most implementations do not seem to use it.
@@ -459,8 +487,11 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
                        }
                }
 
-               if (sta && rates_updated)
+               if (sta && rates_updated) {
+                       drv_sta_rc_update(local, sdata, &sta->sta,
+                                         IEEE80211_RC_SUPP_RATES_CHANGED);
                        rate_control_rate_init(sta);
+               }
 
                rcu_read_unlock();
        }
@@ -561,7 +592,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
        struct ieee80211_local *local = sdata->local;
        struct sta_info *sta;
-       int band = local->hw.conf.channel->band;
+       int band = local->oper_channel->band;
 
        /*
         * XXX: Consider removing the least recently used entry and
@@ -759,7 +790,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
                                return;
                        }
                        sdata_info(sdata, "IBSS not allowed on %d MHz\n",
-                                  local->hw.conf.channel->center_freq);
+                                  local->oper_channel->center_freq);
 
                        /* No IBSS found - decrease scan interval and continue
                         * scanning. */
@@ -899,6 +930,9 @@ void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
        case IEEE80211_STYPE_AUTH:
                ieee80211_rx_mgmt_auth_ibss(sdata, mgmt, skb->len);
                break;
+       case IEEE80211_STYPE_DEAUTH:
+               ieee80211_rx_mgmt_deauth_ibss(sdata, mgmt, skb->len);
+               break;
        }
 
  mgmt_out:
index bb61f7718c4c52521c555dbc3ae9468c5510151e..8c804550465b37857d6dc50b082881ec5bd4ac35 100644 (file)
@@ -68,6 +68,8 @@ struct ieee80211_local;
 #define IEEE80211_DEFAULT_MAX_SP_LEN           \
        IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
 
+#define IEEE80211_DEAUTH_FRAME_LEN     (24 /* hdr */ + 2 /* reason */)
+
 struct ieee80211_fragment_entry {
        unsigned long first_frag_time;
        unsigned int seq;
@@ -193,8 +195,6 @@ struct ieee80211_tx_data {
        struct sta_info *sta;
        struct ieee80211_key *key;
 
-       struct ieee80211_channel *channel;
-
        unsigned int flags;
 };
 
@@ -274,9 +274,15 @@ struct beacon_data {
        struct rcu_head rcu_head;
 };
 
+struct probe_resp {
+       struct rcu_head rcu_head;
+       int len;
+       u8 data[0];
+};
+
 struct ieee80211_if_ap {
        struct beacon_data __rcu *beacon;
-       struct sk_buff __rcu *probe_resp;
+       struct probe_resp __rcu *probe_resp;
 
        struct list_head vlans;
 
@@ -359,6 +365,7 @@ enum ieee80211_sta_flags {
        IEEE80211_STA_NULLFUNC_ACKED    = BIT(8),
        IEEE80211_STA_RESET_SIGNAL_AVE  = BIT(9),
        IEEE80211_STA_DISABLE_40MHZ     = BIT(10),
+       IEEE80211_STA_DISABLE_VHT       = BIT(11),
 };
 
 struct ieee80211_mgd_auth_data {
@@ -406,6 +413,7 @@ struct ieee80211_if_managed {
        struct work_struct monitor_work;
        struct work_struct chswitch_work;
        struct work_struct beacon_connection_loss_work;
+       struct work_struct csa_connection_drop_work;
 
        unsigned long beacon_timeout;
        unsigned long probe_timeout;
@@ -965,7 +973,6 @@ struct ieee80211_local {
        int scan_channel_idx;
        int scan_ies_len;
 
-       struct ieee80211_sched_scan_ies sched_scan_ies;
        struct work_struct sched_scan_stopped_work;
        struct ieee80211_sub_if_data __rcu *sched_scan_sdata;
 
@@ -1052,7 +1059,7 @@ struct ieee80211_local {
        bool disable_dynamic_ps;
 
        int user_power_level; /* in dBm */
-       int power_constr_level; /* in dBm */
+       int ap_power_level; /* in dBm */
 
        enum ieee80211_smps_mode smps_mode;
 
@@ -1075,6 +1082,8 @@ struct ieee80211_local {
        struct idr ack_status_frames;
        spinlock_t ack_status_lock;
 
+       struct ieee80211_sub_if_data __rcu *p2p_sdata;
+
        /* dummy netdev for use w/ NAPI */
        struct net_device napi_dev;
 
@@ -1131,7 +1140,7 @@ struct ieee802_11_elems {
        u8 *prep;
        u8 *perr;
        struct ieee80211_rann_ie *rann;
-       u8 *ch_switch_elem;
+       struct ieee80211_channel_sw_ie *ch_switch_ie;
        u8 *country_elem;
        u8 *pwr_constr_elem;
        u8 *quiet_elem; /* first quite element */
@@ -1157,9 +1166,7 @@ struct ieee802_11_elems {
        u8 preq_len;
        u8 prep_len;
        u8 perr_len;
-       u8 ch_switch_elem_len;
        u8 country_elem_len;
-       u8 pwr_constr_elem_len;
        u8 quiet_elem_len;
        u8 num_of_quiet_elem;   /* can be more the one */
        u8 timeout_int_len;
@@ -1202,6 +1209,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
 void ieee80211_send_pspoll(struct ieee80211_local *local,
                           struct ieee80211_sub_if_data *sdata);
 void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency);
+void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata);
 int ieee80211_max_network_latency(struct notifier_block *nb,
                                  unsigned long data, void *dummy);
 int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata);
@@ -1291,6 +1299,8 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local);
 void ieee80211_recalc_idle(struct ieee80211_local *local);
 void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,
                                    const int offset);
+int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up);
+void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata);
 
 static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata)
 {
@@ -1358,7 +1368,6 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
 int ieee80211_reconfig(struct ieee80211_local *local);
 void ieee80211_stop_device(struct ieee80211_local *local);
 
-#ifdef CONFIG_PM
 int __ieee80211_suspend(struct ieee80211_hw *hw,
                        struct cfg80211_wowlan *wowlan);
 
@@ -1372,18 +1381,6 @@ static inline int __ieee80211_resume(struct ieee80211_hw *hw)
 
        return ieee80211_reconfig(hw_to_local(hw));
 }
-#else
-static inline int __ieee80211_suspend(struct ieee80211_hw *hw,
-                                     struct cfg80211_wowlan *wowlan)
-{
-       return 0;
-}
-
-static inline int __ieee80211_resume(struct ieee80211_hw *hw)
-{
-       return 0;
-}
-#endif
 
 /* utility functions/constants */
 extern void *mac80211_wiphy_privid; /* for wiphy privid */
@@ -1425,7 +1422,6 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
                             struct ieee80211_hdr *hdr);
 void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
                             struct ieee80211_hdr *hdr, bool ack);
-void ieee80211_beacon_connection_loss_work(struct work_struct *work);
 
 void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
                                     enum queue_stop_reason reason);
@@ -1451,19 +1447,24 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
                         u16 transaction, u16 auth_alg,
                         u8 *extra, size_t extra_len, const u8 *bssid,
                         const u8 *da, const u8 *key, u8 key_len, u8 key_idx);
+void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
+                                   const u8 *bssid, u16 stype, u16 reason,
+                                   bool send_frame, u8 *frame_buf);
 int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
                             const u8 *ie, size_t ie_len,
                             enum ieee80211_band band, u32 rate_mask,
                             u8 channel);
 struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
                                          u8 *dst, u32 ratemask,
+                                         struct ieee80211_channel *chan,
                                          const u8 *ssid, size_t ssid_len,
                                          const u8 *ie, size_t ie_len,
                                          bool directed);
 void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
                              const u8 *ssid, size_t ssid_len,
                              const u8 *ie, size_t ie_len,
-                             u32 ratemask, bool directed, bool no_cck);
+                             u32 ratemask, bool directed, bool no_cck,
+                             struct ieee80211_channel *channel);
 
 void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
                                  const size_t supp_rates_len,
@@ -1487,9 +1488,11 @@ u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
 u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
                               u32 cap);
 int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
-                           struct sk_buff *skb, bool need_basic);
+                           struct sk_buff *skb, bool need_basic,
+                           enum ieee80211_band band);
 int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
-                               struct sk_buff *skb, bool need_basic);
+                               struct sk_buff *skb, bool need_basic,
+                               enum ieee80211_band band);
 
 /* channel management */
 enum ieee80211_chan_mode {
index bfb57dcc15381a53cdcc3768e943a6fb144b8c3a..6f8a73c64fb31bde831609fa88a9b4e0425f02b9 100644 (file)
@@ -100,6 +100,10 @@ static u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
                        sdata->vif.bss_conf.idle = true;
                        continue;
                }
+
+               if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
+                       continue;
+
                /* count everything else */
                sdata->vif.bss_conf.idle = false;
                count++;
@@ -121,7 +125,8 @@ static u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
 
        list_for_each_entry(sdata, &local->interfaces, list) {
                if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
-                   sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+                   sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+                   sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
                        continue;
                if (sdata->old_idle == sdata->vif.bss_conf.idle)
                        continue;
@@ -204,6 +209,8 @@ static inline int identical_mac_addr_allowed(int type1, int type2)
 {
        return type1 == NL80211_IFTYPE_MONITOR ||
                type2 == NL80211_IFTYPE_MONITOR ||
+               type1 == NL80211_IFTYPE_P2P_DEVICE ||
+               type2 == NL80211_IFTYPE_P2P_DEVICE ||
                (type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_WDS) ||
                (type1 == NL80211_IFTYPE_WDS &&
                        (type2 == NL80211_IFTYPE_WDS ||
@@ -271,13 +278,15 @@ static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata)
        int n_queues = sdata->local->hw.queues;
        int i;
 
-       for (i = 0; i < IEEE80211_NUM_ACS; i++) {
-               if (WARN_ON_ONCE(sdata->vif.hw_queue[i] ==
-                                IEEE80211_INVAL_HW_QUEUE))
-                       return -EINVAL;
-               if (WARN_ON_ONCE(sdata->vif.hw_queue[i] >=
-                                n_queues))
-                       return -EINVAL;
+       if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE) {
+               for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+                       if (WARN_ON_ONCE(sdata->vif.hw_queue[i] ==
+                                        IEEE80211_INVAL_HW_QUEUE))
+                               return -EINVAL;
+                       if (WARN_ON_ONCE(sdata->vif.hw_queue[i] >=
+                                        n_queues))
+                               return -EINVAL;
+               }
        }
 
        if ((sdata->vif.type != NL80211_IFTYPE_AP) ||
@@ -406,9 +415,10 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
  * an error on interface type changes that have been pre-checked, so most
  * checks should be in ieee80211_check_concurrent_iface.
  */
-static int ieee80211_do_open(struct net_device *dev, bool coming_up)
+int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
 {
-       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+       struct net_device *dev = wdev->netdev;
        struct ieee80211_local *local = sdata->local;
        struct sta_info *sta;
        u32 changed = 0;
@@ -443,6 +453,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
        case NL80211_IFTYPE_STATION:
        case NL80211_IFTYPE_MONITOR:
        case NL80211_IFTYPE_ADHOC:
+       case NL80211_IFTYPE_P2P_DEVICE:
                /* no special treatment */
                break;
        case NL80211_IFTYPE_UNSPECIFIED:
@@ -471,7 +482,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
         * Copy the hopefully now-present MAC address to
         * this interface, if it has the special null one.
         */
-       if (is_zero_ether_addr(dev->dev_addr)) {
+       if (dev && is_zero_ether_addr(dev->dev_addr)) {
                memcpy(dev->dev_addr,
                       local->hw.wiphy->perm_addr,
                       ETH_ALEN);
@@ -536,15 +547,23 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
                        local->fif_probe_req++;
                }
 
-               changed |= ieee80211_reset_erp_info(sdata);
+               if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE)
+                       changed |= ieee80211_reset_erp_info(sdata);
                ieee80211_bss_info_change_notify(sdata, changed);
 
-               if (sdata->vif.type == NL80211_IFTYPE_STATION ||
-                   sdata->vif.type == NL80211_IFTYPE_ADHOC ||
-                   sdata->vif.type == NL80211_IFTYPE_AP)
+               switch (sdata->vif.type) {
+               case NL80211_IFTYPE_STATION:
+               case NL80211_IFTYPE_ADHOC:
+               case NL80211_IFTYPE_AP:
+               case NL80211_IFTYPE_MESH_POINT:
                        netif_carrier_off(dev);
-               else
+                       break;
+               case NL80211_IFTYPE_WDS:
+               case NL80211_IFTYPE_P2P_DEVICE:
+                       break;
+               default:
                        netif_carrier_on(dev);
+               }
 
                /*
                 * set default queue parameters so drivers don't
@@ -576,6 +595,9 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
                }
 
                rate_control_rate_init(sta);
+               netif_carrier_on(dev);
+       } else if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
+               rcu_assign_pointer(local->p2p_sdata, sdata);
        }
 
        /*
@@ -601,7 +623,8 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
 
        ieee80211_recalc_ps(local, -1);
 
-       netif_tx_start_all_queues(dev);
+       if (dev)
+               netif_tx_start_all_queues(dev);
 
        return 0;
  err_del_interface:
@@ -631,7 +654,7 @@ static int ieee80211_open(struct net_device *dev)
        if (err)
                return err;
 
-       return ieee80211_do_open(dev, true);
+       return ieee80211_do_open(&sdata->wdev, true);
 }
 
 static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
@@ -652,7 +675,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
        /*
         * Stop TX on this interface first.
         */
-       netif_tx_stop_all_queues(sdata->dev);
+       if (sdata->dev)
+               netif_tx_stop_all_queues(sdata->dev);
 
        ieee80211_roc_purge(sdata);
 
@@ -691,14 +715,16 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                local->fif_probe_req--;
        }
 
-       netif_addr_lock_bh(sdata->dev);
-       spin_lock_bh(&local->filter_lock);
-       __hw_addr_unsync(&local->mc_list, &sdata->dev->mc,
-                        sdata->dev->addr_len);
-       spin_unlock_bh(&local->filter_lock);
-       netif_addr_unlock_bh(sdata->dev);
+       if (sdata->dev) {
+               netif_addr_lock_bh(sdata->dev);
+               spin_lock_bh(&local->filter_lock);
+               __hw_addr_unsync(&local->mc_list, &sdata->dev->mc,
+                                sdata->dev->addr_len);
+               spin_unlock_bh(&local->filter_lock);
+               netif_addr_unlock_bh(sdata->dev);
 
-       ieee80211_configure_filter(local);
+               ieee80211_configure_filter(local);
+       }
 
        del_timer_sync(&local->dynamic_ps_timer);
        cancel_work_sync(&local->dynamic_ps_enable_work);
@@ -708,7 +734,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                struct ieee80211_sub_if_data *vlan, *tmpsdata;
                struct beacon_data *old_beacon =
                        rtnl_dereference(sdata->u.ap.beacon);
-               struct sk_buff *old_probe_resp =
+               struct probe_resp *old_probe_resp =
                        rtnl_dereference(sdata->u.ap.probe_resp);
 
                /* sdata_running will return false, so this will disable */
@@ -720,7 +746,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                RCU_INIT_POINTER(sdata->u.ap.probe_resp, NULL);
                synchronize_rcu();
                kfree(old_beacon);
-               kfree_skb(old_probe_resp);
+               kfree(old_probe_resp);
 
                /* down all dependent devices, that is VLANs */
                list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
@@ -759,24 +785,29 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                ieee80211_adjust_monitor_flags(sdata, -1);
                ieee80211_configure_filter(local);
                break;
+       case NL80211_IFTYPE_P2P_DEVICE:
+               /* relies on synchronize_rcu() below */
+               rcu_assign_pointer(local->p2p_sdata, NULL);
+               /* fall through */
        default:
                flush_work(&sdata->work);
                /*
                 * When we get here, the interface is marked down.
-                * Call synchronize_rcu() to wait for the RX path
+                * Call rcu_barrier() to wait both for the RX path
                 * should it be using the interface and enqueuing
-                * frames at this very time on another CPU.
+                * frames at this very time on another CPU, and
+                * for the sta free call_rcu callbacks.
                 */
-               synchronize_rcu();
-               skb_queue_purge(&sdata->skb_queue);
+               rcu_barrier();
 
                /*
-                * Disable beaconing here for mesh only, AP and IBSS
-                * are already taken care of.
+                * free_sta_rcu() enqueues a work for the actual
+                * sta cleanup, so we need to flush it while
+                * sdata is still valid.
                 */
-               if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
-                       ieee80211_bss_info_change_notify(sdata,
-                               BSS_CHANGED_BEACON_ENABLED);
+               flush_workqueue(local->workqueue);
+
+               skb_queue_purge(&sdata->skb_queue);
 
                /*
                 * Free all remaining keys, there shouldn't be any,
@@ -877,9 +908,8 @@ static void ieee80211_set_multicast_list(struct net_device *dev)
  * Called when the netdev is removed or, by the code below, before
  * the interface type changes.
  */
-static void ieee80211_teardown_sdata(struct net_device *dev)
+static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata)
 {
-       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
        struct ieee80211_local *local = sdata->local;
        int flushed;
        int i;
@@ -900,6 +930,11 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
        WARN_ON(flushed);
 }
 
+static void ieee80211_uninit(struct net_device *dev)
+{
+       ieee80211_teardown_sdata(IEEE80211_DEV_TO_SUB_IF(dev));
+}
+
 static u16 ieee80211_netdev_select_queue(struct net_device *dev,
                                         struct sk_buff *skb)
 {
@@ -909,7 +944,7 @@ static u16 ieee80211_netdev_select_queue(struct net_device *dev,
 static const struct net_device_ops ieee80211_dataif_ops = {
        .ndo_open               = ieee80211_open,
        .ndo_stop               = ieee80211_stop,
-       .ndo_uninit             = ieee80211_teardown_sdata,
+       .ndo_uninit             = ieee80211_uninit,
        .ndo_start_xmit         = ieee80211_subif_start_xmit,
        .ndo_set_rx_mode        = ieee80211_set_multicast_list,
        .ndo_change_mtu         = ieee80211_change_mtu,
@@ -940,7 +975,7 @@ static u16 ieee80211_monitor_select_queue(struct net_device *dev,
 static const struct net_device_ops ieee80211_monitorif_ops = {
        .ndo_open               = ieee80211_open,
        .ndo_stop               = ieee80211_stop,
-       .ndo_uninit             = ieee80211_teardown_sdata,
+       .ndo_uninit             = ieee80211_uninit,
        .ndo_start_xmit         = ieee80211_monitor_start_xmit,
        .ndo_set_rx_mode        = ieee80211_set_multicast_list,
        .ndo_change_mtu         = ieee80211_change_mtu,
@@ -1099,7 +1134,6 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
        /* and set some type-dependent values */
        sdata->vif.type = type;
        sdata->vif.p2p = false;
-       sdata->dev->netdev_ops = &ieee80211_dataif_ops;
        sdata->wdev.iftype = type;
 
        sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE);
@@ -1107,8 +1141,11 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
 
        sdata->noack_map = 0;
 
-       /* only monitor differs */
-       sdata->dev->type = ARPHRD_ETHER;
+       /* only monitor/p2p-device differ */
+       if (sdata->dev) {
+               sdata->dev->netdev_ops = &ieee80211_dataif_ops;
+               sdata->dev->type = ARPHRD_ETHER;
+       }
 
        skb_queue_head_init(&sdata->skb_queue);
        INIT_WORK(&sdata->work, ieee80211_iface_work);
@@ -1146,6 +1183,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
                break;
        case NL80211_IFTYPE_WDS:
        case NL80211_IFTYPE_AP_VLAN:
+       case NL80211_IFTYPE_P2P_DEVICE:
                break;
        case NL80211_IFTYPE_UNSPECIFIED:
        case NUM_NL80211_IFTYPES:
@@ -1156,18 +1194,6 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
        ieee80211_debugfs_add_netdev(sdata);
 }
 
-static void ieee80211_clean_sdata(struct ieee80211_sub_if_data *sdata)
-{
-       switch (sdata->vif.type) {
-       case NL80211_IFTYPE_MESH_POINT:
-               mesh_path_flush_by_iface(sdata);
-               break;
-
-       default:
-               break;
-       }
-}
-
 static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
                                           enum nl80211_iftype type)
 {
@@ -1225,7 +1251,7 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
 
        ieee80211_do_stop(sdata, false);
 
-       ieee80211_teardown_sdata(sdata->dev);
+       ieee80211_teardown_sdata(sdata);
 
        ret = drv_change_interface(local, sdata, internal_type, p2p);
        if (ret)
@@ -1240,7 +1266,7 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
 
        ieee80211_setup_sdata(sdata, type);
 
-       err = ieee80211_do_open(sdata->dev, false);
+       err = ieee80211_do_open(&sdata->wdev, false);
        WARN(err, "type change: do_open returned %d", err);
 
        return ret;
@@ -1267,14 +1293,14 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
                        return ret;
        } else {
                /* Purge and reset type-dependent state. */
-               ieee80211_teardown_sdata(sdata->dev);
+               ieee80211_teardown_sdata(sdata);
                ieee80211_setup_sdata(sdata, type);
        }
 
        /* reset some values that shouldn't be kept across type changes */
        sdata->vif.bss_conf.basic_rates =
                ieee80211_mandatory_rates(sdata->local,
-                       sdata->local->hw.conf.channel->band);
+                       sdata->local->oper_channel->band);
        sdata->drop_unencrypted = 0;
        if (type == NL80211_IFTYPE_STATION)
                sdata->u.mgd.use_4addr = false;
@@ -1283,8 +1309,7 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
 }
 
 static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
-                                      struct net_device *dev,
-                                      enum nl80211_iftype type)
+                                      u8 *perm_addr, enum nl80211_iftype type)
 {
        struct ieee80211_sub_if_data *sdata;
        u64 mask, start, addr, val, inc;
@@ -1293,13 +1318,12 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
        int i;
 
        /* default ... something at least */
-       memcpy(dev->perm_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
+       memcpy(perm_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
 
        if (is_zero_ether_addr(local->hw.wiphy->addr_mask) &&
            local->hw.wiphy->n_addresses <= 1)
                return;
 
-
        mutex_lock(&local->iflist_mtx);
 
        switch (type) {
@@ -1312,11 +1336,24 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
                list_for_each_entry(sdata, &local->interfaces, list) {
                        if (sdata->vif.type != NL80211_IFTYPE_AP)
                                continue;
-                       memcpy(dev->perm_addr, sdata->vif.addr, ETH_ALEN);
+                       memcpy(perm_addr, sdata->vif.addr, ETH_ALEN);
                        break;
                }
                /* keep default if no AP interface present */
                break;
+       case NL80211_IFTYPE_P2P_CLIENT:
+       case NL80211_IFTYPE_P2P_GO:
+               if (local->hw.flags & IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF) {
+                       list_for_each_entry(sdata, &local->interfaces, list) {
+                               if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE)
+                                       continue;
+                               if (!ieee80211_sdata_running(sdata))
+                                       continue;
+                               memcpy(perm_addr, sdata->vif.addr, ETH_ALEN);
+                               goto out_unlock;
+                       }
+               }
+               /* otherwise fall through */
        default:
                /* assign a new address if possible -- try n_addresses first */
                for (i = 0; i < local->hw.wiphy->n_addresses; i++) {
@@ -1331,7 +1368,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
                        }
 
                        if (!used) {
-                               memcpy(dev->perm_addr,
+                               memcpy(perm_addr,
                                       local->hw.wiphy->addresses[i].addr,
                                       ETH_ALEN);
                                break;
@@ -1382,7 +1419,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
                        }
 
                        if (!used) {
-                               memcpy(dev->perm_addr, tmp_addr, ETH_ALEN);
+                               memcpy(perm_addr, tmp_addr, ETH_ALEN);
                                break;
                        }
                        addr = (start & ~mask) | (val & mask);
@@ -1391,6 +1428,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
                break;
        }
 
+ out_unlock:
        mutex_unlock(&local->iflist_mtx);
 }
 
@@ -1398,49 +1436,68 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
                     struct wireless_dev **new_wdev, enum nl80211_iftype type,
                     struct vif_params *params)
 {
-       struct net_device *ndev;
+       struct net_device *ndev = NULL;
        struct ieee80211_sub_if_data *sdata = NULL;
        int ret, i;
        int txqs = 1;
 
        ASSERT_RTNL();
 
-       if (local->hw.queues >= IEEE80211_NUM_ACS)
-               txqs = IEEE80211_NUM_ACS;
-
-       ndev = alloc_netdev_mqs(sizeof(*sdata) + local->hw.vif_data_size,
-                               name, ieee80211_if_setup, txqs, 1);
-       if (!ndev)
-               return -ENOMEM;
-       dev_net_set(ndev, wiphy_net(local->hw.wiphy));
-
-       ndev->needed_headroom = local->tx_headroom +
-                               4*6 /* four MAC addresses */
-                               + 2 + 2 + 2 + 2 /* ctl, dur, seq, qos */
-                               + 6 /* mesh */
-                               + 8 /* rfc1042/bridge tunnel */
-                               - ETH_HLEN /* ethernet hard_header_len */
-                               + IEEE80211_ENCRYPT_HEADROOM;
-       ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM;
-
-       ret = dev_alloc_name(ndev, ndev->name);
-       if (ret < 0)
-               goto fail;
-
-       ieee80211_assign_perm_addr(local, ndev, type);
-       memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN);
-       SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy));
-
-       /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */
-       sdata = netdev_priv(ndev);
-       ndev->ieee80211_ptr = &sdata->wdev;
-       memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN);
-       memcpy(sdata->name, ndev->name, IFNAMSIZ);
+       if (type == NL80211_IFTYPE_P2P_DEVICE) {
+               struct wireless_dev *wdev;
+
+               sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size,
+                               GFP_KERNEL);
+               if (!sdata)
+                       return -ENOMEM;
+               wdev = &sdata->wdev;
+
+               sdata->dev = NULL;
+               strlcpy(sdata->name, name, IFNAMSIZ);
+               ieee80211_assign_perm_addr(local, wdev->address, type);
+               memcpy(sdata->vif.addr, wdev->address, ETH_ALEN);
+       } else {
+               if (local->hw.queues >= IEEE80211_NUM_ACS)
+                       txqs = IEEE80211_NUM_ACS;
+
+               ndev = alloc_netdev_mqs(sizeof(*sdata) +
+                                       local->hw.vif_data_size,
+                                       name, ieee80211_if_setup, txqs, 1);
+               if (!ndev)
+                       return -ENOMEM;
+               dev_net_set(ndev, wiphy_net(local->hw.wiphy));
+
+               ndev->needed_headroom = local->tx_headroom +
+                                       4*6 /* four MAC addresses */
+                                       + 2 + 2 + 2 + 2 /* ctl, dur, seq, qos */
+                                       + 6 /* mesh */
+                                       + 8 /* rfc1042/bridge tunnel */
+                                       - ETH_HLEN /* ethernet hard_header_len */
+                                       + IEEE80211_ENCRYPT_HEADROOM;
+               ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM;
+
+               ret = dev_alloc_name(ndev, ndev->name);
+               if (ret < 0) {
+                       free_netdev(ndev);
+                       return ret;
+               }
+
+               ieee80211_assign_perm_addr(local, ndev->perm_addr, type);
+               memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN);
+               SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy));
+
+               /* don't use IEEE80211_DEV_TO_SUB_IF -- it checks too much */
+               sdata = netdev_priv(ndev);
+               ndev->ieee80211_ptr = &sdata->wdev;
+               memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN);
+               memcpy(sdata->name, ndev->name, IFNAMSIZ);
+
+               sdata->dev = ndev;
+       }
 
        /* initialise type-independent data */
        sdata->wdev.wiphy = local->hw.wiphy;
        sdata->local = local;
-       sdata->dev = ndev;
 #ifdef CONFIG_INET
        sdata->arp_filter_state = true;
 #endif
@@ -1469,17 +1526,21 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
        /* setup type-dependent data */
        ieee80211_setup_sdata(sdata, type);
 
-       if (params) {
-               ndev->ieee80211_ptr->use_4addr = params->use_4addr;
-               if (type == NL80211_IFTYPE_STATION)
-                       sdata->u.mgd.use_4addr = params->use_4addr;
-       }
+       if (ndev) {
+               if (params) {
+                       ndev->ieee80211_ptr->use_4addr = params->use_4addr;
+                       if (type == NL80211_IFTYPE_STATION)
+                               sdata->u.mgd.use_4addr = params->use_4addr;
+               }
 
-       ndev->features |= local->hw.netdev_features;
+               ndev->features |= local->hw.netdev_features;
 
-       ret = register_netdevice(ndev);
-       if (ret)
-               goto fail;
+               ret = register_netdevice(ndev);
+               if (ret) {
+                       free_netdev(ndev);
+                       return ret;
+               }
+       }
 
        mutex_lock(&local->iflist_mtx);
        list_add_tail_rcu(&sdata->list, &local->interfaces);
@@ -1489,10 +1550,6 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
                *new_wdev = &sdata->wdev;
 
        return 0;
-
- fail:
-       free_netdev(ndev);
-       return ret;
 }
 
 void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
@@ -1503,11 +1560,22 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
        list_del_rcu(&sdata->list);
        mutex_unlock(&sdata->local->iflist_mtx);
 
-       /* clean up type-dependent data */
-       ieee80211_clean_sdata(sdata);
-
        synchronize_rcu();
-       unregister_netdevice(sdata->dev);
+
+       if (sdata->dev) {
+               unregister_netdevice(sdata->dev);
+       } else {
+               cfg80211_unregister_wdev(&sdata->wdev);
+               kfree(sdata);
+       }
+}
+
+void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata)
+{
+       if (WARN_ON_ONCE(!test_bit(SDATA_STATE_RUNNING, &sdata->state)))
+               return;
+       ieee80211_do_stop(sdata, true);
+       ieee80211_teardown_sdata(sdata);
 }
 
 /*
@@ -1518,6 +1586,7 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
 {
        struct ieee80211_sub_if_data *sdata, *tmp;
        LIST_HEAD(unreg_list);
+       LIST_HEAD(wdev_list);
 
        ASSERT_RTNL();
 
@@ -1525,13 +1594,20 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
        list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
                list_del(&sdata->list);
 
-               ieee80211_clean_sdata(sdata);
-
-               unregister_netdevice_queue(sdata->dev, &unreg_list);
+               if (sdata->dev)
+                       unregister_netdevice_queue(sdata->dev, &unreg_list);
+               else
+                       list_add(&sdata->list, &wdev_list);
        }
        mutex_unlock(&local->iflist_mtx);
        unregister_netdevice_many(&unreg_list);
        list_del(&unreg_list);
+
+       list_for_each_entry_safe(sdata, tmp, &wdev_list, list) {
+               list_del(&sdata->list);
+               cfg80211_unregister_wdev(&sdata->wdev);
+               kfree(sdata);
+       }
 }
 
 static int netdev_notify(struct notifier_block *nb,
index 7ae678ba5d679dbd40fc7c199c499ed82228757f..d27e61aaa71bd7200c527606820f5d5d82c39ff0 100644 (file)
@@ -402,7 +402,7 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key)
         * Synchronize so the TX path can no longer be using
         * this key before we free/remove it.
         */
-       synchronize_rcu();
+       synchronize_net();
 
        if (key->local)
                ieee80211_key_disable_hw_accel(key);
index c26e231c733af85d66fba73821ee877ca4338912..c80c4490351ce54fb75c41cdb9be1a313b0ffef5 100644 (file)
@@ -150,13 +150,11 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
 
        if (test_bit(SCAN_SW_SCANNING, &local->scanning) ||
            test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) ||
-           test_bit(SCAN_HW_SCANNING, &local->scanning))
+           test_bit(SCAN_HW_SCANNING, &local->scanning) ||
+           !local->ap_power_level)
                power = chan->max_power;
        else
-               power = local->power_constr_level ?
-                       min(chan->max_power,
-                               (chan->max_reg_power  - local->power_constr_level)) :
-                       chan->max_power;
+               power = min(chan->max_power, local->ap_power_level);
 
        if (local->user_power_level >= 0)
                power = min(power, local->user_power_level);
@@ -207,6 +205,10 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
                sdata->vif.bss_conf.bssid = NULL;
        else if (ieee80211_vif_is_mesh(&sdata->vif)) {
                sdata->vif.bss_conf.bssid = zero;
+       } else if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
+               sdata->vif.bss_conf.bssid = sdata->vif.addr;
+               WARN_ONCE(changed & ~(BSS_CHANGED_IDLE),
+                         "P2P Device BSS changed %#x", changed);
        } else {
                WARN_ON(1);
                return;
@@ -362,9 +364,7 @@ static void ieee80211_recalc_smps_work(struct work_struct *work)
        struct ieee80211_local *local =
                container_of(work, struct ieee80211_local, recalc_smps);
 
-       mutex_lock(&local->iflist_mtx);
        ieee80211_recalc_smps(local);
-       mutex_unlock(&local->iflist_mtx);
 }
 
 #ifdef CONFIG_INET
@@ -514,6 +514,11 @@ ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
                        BIT(IEEE80211_STYPE_AUTH >> 4) |
                        BIT(IEEE80211_STYPE_DEAUTH >> 4),
        },
+       [NL80211_IFTYPE_P2P_DEVICE] = {
+               .tx = 0xffff,
+               .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+                       BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
+       },
 };
 
 static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = {
@@ -536,6 +541,11 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
        int priv_size, i;
        struct wiphy *wiphy;
 
+       if (WARN_ON(!ops->tx || !ops->start || !ops->stop || !ops->config ||
+                   !ops->add_interface || !ops->remove_interface ||
+                   !ops->configure_filter))
+               return NULL;
+
        if (WARN_ON(ops->sta_state && (ops->sta_add || ops->sta_remove)))
                return NULL;
 
@@ -588,13 +598,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
 
        local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN);
 
-       BUG_ON(!ops->tx);
-       BUG_ON(!ops->start);
-       BUG_ON(!ops->stop);
-       BUG_ON(!ops->config);
-       BUG_ON(!ops->add_interface);
-       BUG_ON(!ops->remove_interface);
-       BUG_ON(!ops->configure_filter);
        local->ops = ops;
 
        /* set up some defaults */
index 85572353a7e37d59b64ced140d75f7a9b8d3fb81..ff0296c7bab8b131f07a9846b608e7b91944a93e 100644 (file)
@@ -109,11 +109,11 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
 
        /* Disallow HT40+/- mismatch */
        if (ie->ht_operation &&
-           (local->_oper_channel_type == NL80211_CHAN_HT40MINUS ||
-           local->_oper_channel_type == NL80211_CHAN_HT40PLUS) &&
+           (sdata->vif.bss_conf.channel_type == NL80211_CHAN_HT40MINUS ||
+            sdata->vif.bss_conf.channel_type == NL80211_CHAN_HT40PLUS) &&
            (sta_channel_type == NL80211_CHAN_HT40MINUS ||
             sta_channel_type == NL80211_CHAN_HT40PLUS) &&
-           local->_oper_channel_type != sta_channel_type)
+           sdata->vif.bss_conf.channel_type != sta_channel_type)
                goto mismatch;
 
        return true;
@@ -136,10 +136,13 @@ bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie)
  * mesh_accept_plinks_update - update accepting_plink in local mesh beacons
  *
  * @sdata: mesh interface in which mesh beacons are going to be updated
+ *
+ * Returns: beacon changed flag if the beacon content changed.
  */
-void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
+u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
 {
        bool free_plinks;
+       u32 changed = 0;
 
        /* In case mesh_plink_free_count > 0 and mesh_plinktbl_capacity == 0,
         * the mesh interface might be able to establish plinks with peers that
@@ -149,8 +152,12 @@ void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
         */
        free_plinks = mesh_plink_availables(sdata);
 
-       if (free_plinks != sdata->u.mesh.accepting_plinks)
-               ieee80211_mesh_housekeeping_timer((unsigned long) sdata);
+       if (free_plinks != sdata->u.mesh.accepting_plinks) {
+               sdata->u.mesh.accepting_plinks = free_plinks;
+               changed = BSS_CHANGED_BEACON;
+       }
+
+       return changed;
 }
 
 int mesh_rmc_init(struct ieee80211_sub_if_data *sdata)
@@ -262,7 +269,6 @@ mesh_add_meshconf_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
        neighbors = (neighbors > 15) ? 15 : neighbors;
        *pos++ = neighbors << 1;
        /* Mesh capability */
-       ifmsh->accepting_plinks = mesh_plink_availables(sdata);
        *pos = MESHCONF_CAPAB_FORWARDING;
        *pos |= ifmsh->accepting_plinks ?
            MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
@@ -349,17 +355,18 @@ int mesh_add_ds_params_ie(struct sk_buff *skb,
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_supported_band *sband;
+       struct ieee80211_channel *chan = local->oper_channel;
        u8 *pos;
 
        if (skb_tailroom(skb) < 3)
                return -ENOMEM;
 
-       sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+       sband = local->hw.wiphy->bands[chan->band];
        if (sband->band == IEEE80211_BAND_2GHZ) {
                pos = skb_put(skb, 2 + 1);
                *pos++ = WLAN_EID_DS_PARAMS;
                *pos++ = 1;
-               *pos++ = ieee80211_frequency_to_channel(local->hw.conf.channel->center_freq);
+               *pos++ = ieee80211_frequency_to_channel(chan->center_freq);
        }
 
        return 0;
@@ -374,7 +381,7 @@ int mesh_add_ht_cap_ie(struct sk_buff *skb,
 
        sband = local->hw.wiphy->bands[local->oper_channel->band];
        if (!sband->ht_cap.ht_supported ||
-           local->_oper_channel_type == NL80211_CHAN_NO_HT)
+           sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT)
                return 0;
 
        if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap))
@@ -391,7 +398,8 @@ int mesh_add_ht_oper_ie(struct sk_buff *skb,
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_channel *channel = local->oper_channel;
-       enum nl80211_channel_type channel_type = local->_oper_channel_type;
+       enum nl80211_channel_type channel_type =
+                               sdata->vif.bss_conf.channel_type;
        struct ieee80211_supported_band *sband =
                                local->hw.wiphy->bands[channel->band];
        struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
@@ -521,14 +529,13 @@ int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
 static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
                           struct ieee80211_if_mesh *ifmsh)
 {
-       bool free_plinks;
+       u32 changed;
 
        ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT);
        mesh_path_expire(sdata);
 
-       free_plinks = mesh_plink_availables(sdata);
-       if (free_plinks != sdata->u.mesh.accepting_plinks)
-               ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+       changed = mesh_accept_plinks_update(sdata);
+       ieee80211_bss_info_change_notify(sdata, changed);
 
        mod_timer(&ifmsh->housekeeping_timer,
                  round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL));
@@ -603,12 +610,14 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
        sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL;
        sdata->vif.bss_conf.basic_rates =
                ieee80211_mandatory_rates(sdata->local,
-                                         sdata->local->hw.conf.channel->band);
+                                         sdata->local->oper_channel->band);
        ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON |
                                                BSS_CHANGED_BEACON_ENABLED |
                                                BSS_CHANGED_HT |
                                                BSS_CHANGED_BASIC_RATES |
                                                BSS_CHANGED_BEACON_INT);
+
+       netif_carrier_on(sdata->dev);
 }
 
 void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
@@ -616,9 +625,15 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 
+       netif_carrier_off(sdata->dev);
+
+       /* stop the beacon */
        ifmsh->mesh_id_len = 0;
        ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
-       sta_info_flush(local, NULL);
+
+       /* flush STAs and mpaths on this iface */
+       sta_info_flush(sdata->local, sdata);
+       mesh_path_flush_by_iface(sdata);
 
        del_timer_sync(&sdata->u.mesh.housekeeping_timer);
        del_timer_sync(&sdata->u.mesh.mesh_path_root_timer);
index faaa39bcfd109b783c2f297fdda6aa3585587026..25d0f17dec71e74c8c9e514f17155628ce6dfff6 100644 (file)
@@ -215,6 +215,9 @@ struct mesh_rmc {
 /* Maximum number of paths per interface */
 #define MESH_MAX_MPATHS                1024
 
+/* Number of frames buffered per destination for unresolved destinations */
+#define MESH_FRAME_QUEUE_LEN   10
+
 /* Public interfaces */
 /* Various */
 int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
@@ -282,7 +285,7 @@ void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
                           u8 *hw_addr,
                           struct ieee802_11_elems *ie);
 bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie);
-void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata);
+u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata);
 void mesh_plink_broken(struct sta_info *sta);
 void mesh_plink_deactivate(struct sta_info *sta);
 int mesh_plink_open(struct sta_info *sta);
index 494bc39f61a4cb67fce07ad649e9be814fcb9d6b..47aeee2d8db160f6fa9eb62c32131bbd686acc29 100644 (file)
@@ -17,8 +17,6 @@
 #define MAX_METRIC     0xffffffff
 #define ARITH_SHIFT    8
 
-/* Number of frames buffered per destination for unresolved destinations */
-#define MESH_FRAME_QUEUE_LEN   10
 #define MAX_PREQ_QUEUE_LEN     64
 
 /* Destination only */
index 075bc535c60126c33c6ce5f015bab87b00fe08f2..aa749818860e72f1cb279f3cf20c437a5b0ae6d0 100644 (file)
@@ -203,23 +203,17 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
 {
        struct sk_buff *skb;
        struct ieee80211_hdr *hdr;
-       struct sk_buff_head tmpq;
        unsigned long flags;
 
        rcu_assign_pointer(mpath->next_hop, sta);
 
-       __skb_queue_head_init(&tmpq);
-
        spin_lock_irqsave(&mpath->frame_queue.lock, flags);
-
-       while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
+       skb_queue_walk(&mpath->frame_queue, skb) {
                hdr = (struct ieee80211_hdr *) skb->data;
                memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
                memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
-               __skb_queue_tail(&tmpq, skb);
        }
 
-       skb_queue_splice(&tmpq, &mpath->frame_queue);
        spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
 }
 
@@ -285,40 +279,42 @@ static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
                                    struct mesh_path *from_mpath,
                                    bool copy)
 {
-       struct sk_buff *skb, *cp_skb = NULL;
-       struct sk_buff_head gateq, failq;
+       struct sk_buff *skb, *fskb, *tmp;
+       struct sk_buff_head failq;
        unsigned long flags;
-       int num_skbs;
 
        BUG_ON(gate_mpath == from_mpath);
        BUG_ON(!gate_mpath->next_hop);
 
-       __skb_queue_head_init(&gateq);
        __skb_queue_head_init(&failq);
 
        spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
        skb_queue_splice_init(&from_mpath->frame_queue, &failq);
        spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
 
-       num_skbs = skb_queue_len(&failq);
-
-       while (num_skbs--) {
-               skb = __skb_dequeue(&failq);
-               if (copy) {
-                       cp_skb = skb_copy(skb, GFP_ATOMIC);
-                       if (cp_skb)
-                               __skb_queue_tail(&failq, cp_skb);
+       skb_queue_walk_safe(&failq, fskb, tmp) {
+               if (skb_queue_len(&gate_mpath->frame_queue) >=
+                                 MESH_FRAME_QUEUE_LEN) {
+                       mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
+                       break;
                }
 
+               skb = skb_copy(fskb, GFP_ATOMIC);
+               if (WARN_ON(!skb))
+                       break;
+
                prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
-               __skb_queue_tail(&gateq, skb);
+               skb_queue_tail(&gate_mpath->frame_queue, skb);
+
+               if (copy)
+                       continue;
+
+               __skb_unlink(fskb, &failq);
+               kfree_skb(fskb);
        }
 
-       spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
-       skb_queue_splice(&gateq, &gate_mpath->frame_queue);
        mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
                  gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
-       spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);
 
        if (!copy)
                return;
@@ -531,7 +527,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
 
        read_lock_bh(&pathtbl_resize_lock);
        memcpy(new_mpath->dst, dst, ETH_ALEN);
-       memset(new_mpath->rann_snd_addr, 0xff, ETH_ALEN);
+       eth_broadcast_addr(new_mpath->rann_snd_addr);
        new_mpath->is_root = false;
        new_mpath->sdata = sdata;
        new_mpath->flags = 0;
index af671b984df37123cf6841fb9a8e4e5db70bde65..3ab34d81689753e0beaf7af4a9c7c806f9da6915 100644 (file)
@@ -48,17 +48,17 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
                u8 *da, __le16 llid, __le16 plid, __le16 reason);
 
 static inline
-void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata)
+u32 mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata)
 {
        atomic_inc(&sdata->u.mesh.mshstats.estab_plinks);
-       mesh_accept_plinks_update(sdata);
+       return mesh_accept_plinks_update(sdata);
 }
 
 static inline
-void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata)
+u32 mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata)
 {
        atomic_dec(&sdata->u.mesh.mshstats.estab_plinks);
-       mesh_accept_plinks_update(sdata);
+       return mesh_accept_plinks_update(sdata);
 }
 
 /**
@@ -117,7 +117,7 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
        u16 ht_opmode;
        bool non_ht_sta = false, ht20_sta = false;
 
-       if (local->_oper_channel_type == NL80211_CHAN_NO_HT)
+       if (sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT)
                return 0;
 
        rcu_read_lock();
@@ -147,7 +147,8 @@ out:
 
        if (non_ht_sta)
                ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED;
-       else if (ht20_sta && local->_oper_channel_type > NL80211_CHAN_HT20)
+       else if (ht20_sta &&
+                sdata->vif.bss_conf.channel_type > NL80211_CHAN_HT20)
                ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_20MHZ;
        else
                ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
@@ -170,22 +171,21 @@ out:
  * @sta: mesh peer link to deactivate
  *
  * All mesh paths with this peer as next hop will be flushed
+ * Returns beacon changed flag if the beacon content changed.
  *
  * Locking: the caller must hold sta->lock
  */
-static bool __mesh_plink_deactivate(struct sta_info *sta)
+static u32 __mesh_plink_deactivate(struct sta_info *sta)
 {
        struct ieee80211_sub_if_data *sdata = sta->sdata;
-       bool deactivated = false;
+       u32 changed = 0;
 
-       if (sta->plink_state == NL80211_PLINK_ESTAB) {
-               mesh_plink_dec_estab_count(sdata);
-               deactivated = true;
-       }
+       if (sta->plink_state == NL80211_PLINK_ESTAB)
+               changed = mesh_plink_dec_estab_count(sdata);
        sta->plink_state = NL80211_PLINK_BLOCKED;
        mesh_path_flush_by_nexthop(sta);
 
-       return deactivated;
+       return changed;
 }
 
 /**
@@ -198,18 +198,17 @@ static bool __mesh_plink_deactivate(struct sta_info *sta)
 void mesh_plink_deactivate(struct sta_info *sta)
 {
        struct ieee80211_sub_if_data *sdata = sta->sdata;
-       bool deactivated;
+       u32 changed;
 
        spin_lock_bh(&sta->lock);
-       deactivated = __mesh_plink_deactivate(sta);
+       changed = __mesh_plink_deactivate(sta);
        sta->reason = cpu_to_le16(WLAN_REASON_MESH_PEER_CANCELED);
        mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
                            sta->sta.addr, sta->llid, sta->plid,
                            sta->reason);
        spin_unlock_bh(&sta->lock);
 
-       if (deactivated)
-               ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+       ieee80211_bss_info_change_notify(sdata, changed);
 }
 
 static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
@@ -217,12 +216,14 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
                u8 *da, __le16 llid, __le16 plid, __le16 reason) {
        struct ieee80211_local *local = sdata->local;
        struct sk_buff *skb;
+       struct ieee80211_tx_info *info;
        struct ieee80211_mgmt *mgmt;
        bool include_plid = false;
        u16 peering_proto = 0;
        u8 *pos, ie_len = 4;
        int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.self_prot) +
                      sizeof(mgmt->u.action.u.self_prot);
+       int err = -ENOMEM;
 
        skb = dev_alloc_skb(local->tx_headroom +
                            hdr_len +
@@ -238,6 +239,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
                            sdata->u.mesh.ie_len);
        if (!skb)
                return -1;
+       info = IEEE80211_SKB_CB(skb);
        skb_reserve(skb, local->tx_headroom);
        mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
        memset(mgmt, 0, hdr_len);
@@ -258,15 +260,18 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
                        pos = skb_put(skb, 2);
                        memcpy(pos + 2, &plid, 2);
                }
-               if (ieee80211_add_srates_ie(sdata, skb, true) ||
-                   ieee80211_add_ext_srates_ie(sdata, skb, true) ||
+               if (ieee80211_add_srates_ie(sdata, skb, true,
+                                           local->oper_channel->band) ||
+                   ieee80211_add_ext_srates_ie(sdata, skb, true,
+                                               local->oper_channel->band) ||
                    mesh_add_rsn_ie(skb, sdata) ||
                    mesh_add_meshid_ie(skb, sdata) ||
                    mesh_add_meshconf_ie(skb, sdata))
-                       return -1;
+                       goto free;
        } else {        /* WLAN_SP_MESH_PEERING_CLOSE */
+               info->flags |= IEEE80211_TX_CTL_NO_ACK;
                if (mesh_add_meshid_ie(skb, sdata))
-                       return -1;
+                       goto free;
        }
 
        /* Add Mesh Peering Management element */
@@ -285,11 +290,12 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
                ie_len += 2;    /* reason code */
                break;
        default:
-               return -EINVAL;
+               err = -EINVAL;
+               goto free;
        }
 
        if (WARN_ON(skb_tailroom(skb) < 2 + ie_len))
-               return -ENOMEM;
+               goto free;
 
        pos = skb_put(skb, 2 + ie_len);
        *pos++ = WLAN_EID_PEER_MGMT;
@@ -310,14 +316,17 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
        if (action != WLAN_SP_MESH_PEERING_CLOSE) {
                if (mesh_add_ht_cap_ie(skb, sdata) ||
                    mesh_add_ht_oper_ie(skb, sdata))
-                       return -1;
+                       goto free;
        }
 
        if (mesh_add_vendor_ies(skb, sdata))
-               return -1;
+               goto free;
 
        ieee80211_tx_skb(sdata, skb);
        return 0;
+free:
+       kfree_skb(skb);
+       return err;
 }
 
 /**
@@ -362,9 +371,14 @@ static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata,
 
        spin_lock_bh(&sta->lock);
        sta->last_rx = jiffies;
+       if (sta->plink_state == NL80211_PLINK_ESTAB) {
+               spin_unlock_bh(&sta->lock);
+               return sta;
+       }
+
        sta->sta.supp_rates[band] = rates;
        if (elems->ht_cap_elem &&
-           sdata->local->_oper_channel_type != NL80211_CHAN_NO_HT)
+           sdata->vif.bss_conf.channel_type != NL80211_CHAN_NO_HT)
                ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
                                                  elems->ht_cap_elem,
                                                  &sta->sta.ht_cap);
@@ -523,7 +537,8 @@ int mesh_plink_open(struct sta_info *sta)
        spin_lock_bh(&sta->lock);
        get_random_bytes(&llid, 2);
        sta->llid = llid;
-       if (sta->plink_state != NL80211_PLINK_LISTEN) {
+       if (sta->plink_state != NL80211_PLINK_LISTEN &&
+           sta->plink_state != NL80211_PLINK_BLOCKED) {
                spin_unlock_bh(&sta->lock);
                return -EBUSY;
        }
@@ -541,15 +556,14 @@ int mesh_plink_open(struct sta_info *sta)
 void mesh_plink_block(struct sta_info *sta)
 {
        struct ieee80211_sub_if_data *sdata = sta->sdata;
-       bool deactivated;
+       u32 changed;
 
        spin_lock_bh(&sta->lock);
-       deactivated = __mesh_plink_deactivate(sta);
+       changed = __mesh_plink_deactivate(sta);
        sta->plink_state = NL80211_PLINK_BLOCKED;
        spin_unlock_bh(&sta->lock);
 
-       if (deactivated)
-               ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+       ieee80211_bss_info_change_notify(sdata, changed);
 }
 
 
@@ -852,9 +866,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
                        del_timer(&sta->plink_timer);
                        sta->plink_state = NL80211_PLINK_ESTAB;
                        spin_unlock_bh(&sta->lock);
-                       mesh_plink_inc_estab_count(sdata);
+                       changed |= mesh_plink_inc_estab_count(sdata);
                        changed |= mesh_set_ht_prot_mode(sdata);
-                       changed |= BSS_CHANGED_BEACON;
                        mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n",
                                sta->sta.addr);
                        break;
@@ -888,9 +901,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
                        del_timer(&sta->plink_timer);
                        sta->plink_state = NL80211_PLINK_ESTAB;
                        spin_unlock_bh(&sta->lock);
-                       mesh_plink_inc_estab_count(sdata);
+                       changed |= mesh_plink_inc_estab_count(sdata);
                        changed |= mesh_set_ht_prot_mode(sdata);
-                       changed |= BSS_CHANGED_BEACON;
                        mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n",
                                sta->sta.addr);
                        mesh_plink_frame_tx(sdata,
@@ -908,13 +920,12 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
                case CLS_ACPT:
                        reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE);
                        sta->reason = reason;
-                       __mesh_plink_deactivate(sta);
+                       changed |= __mesh_plink_deactivate(sta);
                        sta->plink_state = NL80211_PLINK_HOLDING;
                        llid = sta->llid;
                        mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
                        spin_unlock_bh(&sta->lock);
                        changed |= mesh_set_ht_prot_mode(sdata);
-                       changed |= BSS_CHANGED_BEACON;
                        mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
                                            sta->sta.addr, llid, plid, reason);
                        break;
index f76b83341cf9a39db0e14092a85f2e576245a306..e714ed8bb198727c6738c1e3ae7650243dd2a07e 100644 (file)
@@ -88,8 +88,6 @@ MODULE_PARM_DESC(probe_wait_ms,
 #define TMR_RUNNING_TIMER      0
 #define TMR_RUNNING_CHANSW     1
 
-#define DEAUTH_DISASSOC_LEN    (24 /* hdr */ + 2 /* reason */)
-
 /*
  * All cfg80211 functions have to be called outside a locked
  * section so that they can acquire a lock themselves... This
@@ -146,6 +144,9 @@ void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata)
        if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)
                return;
 
+       if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
+               return;
+
        mod_timer(&sdata->u.mgd.bcn_mon_timer,
                  round_jiffies_up(jiffies + sdata->u.mgd.beacon_timeout));
 }
@@ -182,15 +183,15 @@ static u32 ieee80211_config_ht_tx(struct ieee80211_sub_if_data *sdata,
        u16 ht_opmode;
        bool disable_40 = false;
 
-       sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+       sband = local->hw.wiphy->bands[local->oper_channel->band];
 
        switch (sdata->vif.bss_conf.channel_type) {
        case NL80211_CHAN_HT40PLUS:
-               if (local->hw.conf.channel->flags & IEEE80211_CHAN_NO_HT40PLUS)
+               if (local->oper_channel->flags & IEEE80211_CHAN_NO_HT40PLUS)
                        disable_40 = true;
                break;
        case NL80211_CHAN_HT40MINUS:
-               if (local->hw.conf.channel->flags & IEEE80211_CHAN_NO_HT40MINUS)
+               if (local->oper_channel->flags & IEEE80211_CHAN_NO_HT40MINUS)
                        disable_40 = true;
                break;
        default:
@@ -326,6 +327,26 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
        ieee80211_ie_build_ht_cap(pos, &ht_cap, cap);
 }
 
+static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
+                                struct sk_buff *skb,
+                                struct ieee80211_supported_band *sband)
+{
+       u8 *pos;
+       u32 cap;
+       struct ieee80211_sta_vht_cap vht_cap;
+
+       BUILD_BUG_ON(sizeof(vht_cap) != sizeof(sband->vht_cap));
+
+       memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap));
+
+       /* determine capability flags */
+       cap = vht_cap.cap;
+
+       /* reserve and fill IE */
+       pos = skb_put(skb, sizeof(struct ieee80211_vht_capabilities) + 2);
+       ieee80211_ie_build_vht_cap(pos, &vht_cap, cap);
+}
+
 static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_local *local = sdata->local;
@@ -371,6 +392,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
                        4 + /* power capability */
                        2 + 2 * sband->n_channels + /* supported channels */
                        2 + sizeof(struct ieee80211_ht_cap) + /* HT */
+                       2 + sizeof(struct ieee80211_vht_capabilities) + /* VHT */
                        assoc_data->ie_len + /* extra IEs */
                        9, /* WMM */
                        GFP_KERNEL);
@@ -503,6 +525,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
                ieee80211_add_ht_ie(sdata, skb, assoc_data->ap_ht_param,
                                    sband, local->oper_channel, ifmgd->ap_smps);
 
+       if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
+               ieee80211_add_vht_ie(sdata, skb, sband);
+
        /* if present, add any custom non-vendor IEs that go after HT */
        if (assoc_data->ie_len && assoc_data->ie) {
                noffset = ieee80211_ie_split_vendor(assoc_data->ie,
@@ -547,48 +572,6 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
        ieee80211_tx_skb(sdata, skb);
 }
 
-static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
-                                          const u8 *bssid, u16 stype,
-                                          u16 reason, bool send_frame,
-                                          u8 *frame_buf)
-{
-       struct ieee80211_local *local = sdata->local;
-       struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
-       struct sk_buff *skb;
-       struct ieee80211_mgmt *mgmt = (void *)frame_buf;
-
-       /* build frame */
-       mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype);
-       mgmt->duration = 0; /* initialize only */
-       mgmt->seq_ctrl = 0; /* initialize only */
-       memcpy(mgmt->da, bssid, ETH_ALEN);
-       memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
-       memcpy(mgmt->bssid, bssid, ETH_ALEN);
-       /* u.deauth.reason_code == u.disassoc.reason_code */
-       mgmt->u.deauth.reason_code = cpu_to_le16(reason);
-
-       if (send_frame) {
-               skb = dev_alloc_skb(local->hw.extra_tx_headroom +
-                                   DEAUTH_DISASSOC_LEN);
-               if (!skb)
-                       return;
-
-               skb_reserve(skb, local->hw.extra_tx_headroom);
-
-               /* copy in frame */
-               memcpy(skb_put(skb, DEAUTH_DISASSOC_LEN),
-                      mgmt, DEAUTH_DISASSOC_LEN);
-
-               if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED))
-                       IEEE80211_SKB_CB(skb)->flags |=
-                               IEEE80211_TX_INTFL_DONT_ENCRYPT;
-
-               drv_mgd_prepare_tx(local, sdata);
-
-               ieee80211_tx_skb(sdata, skb);
-       }
-}
-
 void ieee80211_send_pspoll(struct ieee80211_local *local,
                           struct ieee80211_sub_if_data *sdata)
 {
@@ -687,6 +670,7 @@ static void ieee80211_chswitch_work(struct work_struct *work)
        /* XXX: shouldn't really modify cfg80211-owned data! */
        ifmgd->associated->channel = sdata->local->oper_channel;
 
+       /* XXX: wait for a beacon first? */
        ieee80211_wake_queues_by_reason(&sdata->local->hw,
                                        IEEE80211_QUEUE_STOP_REASON_CSA);
  out:
@@ -704,16 +688,13 @@ void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success)
 
        trace_api_chswitch_done(sdata, success);
        if (!success) {
-               /*
-                * If the channel switch was not successful, stay
-                * around on the old channel. We currently lack
-                * good handling of this situation, possibly we
-                * should just drop the association.
-                */
-               sdata->local->csa_channel = sdata->local->oper_channel;
+               sdata_info(sdata,
+                          "driver channel switch failed, disconnecting\n");
+               ieee80211_queue_work(&sdata->local->hw,
+                                    &ifmgd->csa_connection_drop_work);
+       } else {
+               ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work);
        }
-
-       ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work);
 }
 EXPORT_SYMBOL(ieee80211_chswitch_done);
 
@@ -758,61 +739,111 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
                return;
 
        new_ch = ieee80211_get_channel(sdata->local->hw.wiphy, new_freq);
-       if (!new_ch || new_ch->flags & IEEE80211_CHAN_DISABLED)
+       if (!new_ch || new_ch->flags & IEEE80211_CHAN_DISABLED) {
+               sdata_info(sdata,
+                          "AP %pM switches to unsupported channel (%d MHz), disconnecting\n",
+                          ifmgd->associated->bssid, new_freq);
+               ieee80211_queue_work(&sdata->local->hw,
+                                    &ifmgd->csa_connection_drop_work);
                return;
+       }
 
        sdata->local->csa_channel = new_ch;
 
+       ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
+
+       if (sw_elem->mode)
+               ieee80211_stop_queues_by_reason(&sdata->local->hw,
+                               IEEE80211_QUEUE_STOP_REASON_CSA);
+
        if (sdata->local->ops->channel_switch) {
                /* use driver's channel switch callback */
-               struct ieee80211_channel_switch ch_switch;
-               memset(&ch_switch, 0, sizeof(ch_switch));
-               ch_switch.timestamp = timestamp;
-               if (sw_elem->mode) {
-                       ch_switch.block_tx = true;
-                       ieee80211_stop_queues_by_reason(&sdata->local->hw,
-                                       IEEE80211_QUEUE_STOP_REASON_CSA);
-               }
-               ch_switch.channel = new_ch;
-               ch_switch.count = sw_elem->count;
-               ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
+               struct ieee80211_channel_switch ch_switch = {
+                       .timestamp = timestamp,
+                       .block_tx = sw_elem->mode,
+                       .channel = new_ch,
+                       .count = sw_elem->count,
+               };
+
                drv_channel_switch(sdata->local, &ch_switch);
                return;
        }
 
        /* channel switch handled in software */
-       if (sw_elem->count <= 1) {
+       if (sw_elem->count <= 1)
                ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work);
-       } else {
-               if (sw_elem->mode)
-                       ieee80211_stop_queues_by_reason(&sdata->local->hw,
-                                       IEEE80211_QUEUE_STOP_REASON_CSA);
-               ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
+       else
                mod_timer(&ifmgd->chswitch_timer,
-                         jiffies +
-                         msecs_to_jiffies(sw_elem->count *
-                                          cbss->beacon_interval));
-       }
+                         TU_TO_EXP_TIME(sw_elem->count *
+                                        cbss->beacon_interval));
 }
 
 static void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
-                                       u16 capab_info, u8 *pwr_constr_elem,
-                                       u8 pwr_constr_elem_len)
+                                       struct ieee80211_channel *channel,
+                                       const u8 *country_ie, u8 country_ie_len,
+                                       const u8 *pwr_constr_elem)
 {
-       struct ieee80211_conf *conf = &sdata->local->hw.conf;
+       struct ieee80211_country_ie_triplet *triplet;
+       int chan = ieee80211_frequency_to_channel(channel->center_freq);
+       int i, chan_pwr, chan_increment, new_ap_level;
+       bool have_chan_pwr = false;
 
-       if (!(capab_info & WLAN_CAPABILITY_SPECTRUM_MGMT))
+       /* Invalid IE */
+       if (country_ie_len % 2 || country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN)
                return;
 
-       /* Power constraint IE length should be 1 octet */
-       if (pwr_constr_elem_len != 1)
-               return;
+       triplet = (void *)(country_ie + 3);
+       country_ie_len -= 3;
+
+       switch (channel->band) {
+       default:
+               WARN_ON_ONCE(1);
+               /* fall through */
+       case IEEE80211_BAND_2GHZ:
+       case IEEE80211_BAND_60GHZ:
+               chan_increment = 1;
+               break;
+       case IEEE80211_BAND_5GHZ:
+               chan_increment = 4;
+               break;
+       }
+
+       /* find channel */
+       while (country_ie_len >= 3) {
+               u8 first_channel = triplet->chans.first_channel;
 
-       if ((*pwr_constr_elem <= conf->channel->max_reg_power) &&
-           (*pwr_constr_elem != sdata->local->power_constr_level)) {
-               sdata->local->power_constr_level = *pwr_constr_elem;
-               ieee80211_hw_config(sdata->local, 0);
+               if (first_channel >= IEEE80211_COUNTRY_EXTENSION_ID)
+                       goto next;
+
+               for (i = 0; i < triplet->chans.num_channels; i++) {
+                       if (first_channel + i * chan_increment == chan) {
+                               have_chan_pwr = true;
+                               chan_pwr = triplet->chans.max_power;
+                               break;
+                       }
+               }
+               if (have_chan_pwr)
+                       break;
+
+ next:
+               triplet++;
+               country_ie_len -= 3;
        }
+
+       if (!have_chan_pwr)
+               return;
+
+       new_ap_level = max_t(int, 0, chan_pwr - *pwr_constr_elem);
+
+       if (sdata->local->ap_power_level == new_ap_level)
+               return;
+
+       sdata_info(sdata,
+                  "Limiting TX power to %d (%d - %d) dBm as advertised by %pM\n",
+                  new_ap_level, chan_pwr, *pwr_constr_elem,
+                  sdata->u.mgd.bssid);
+       sdata->local->ap_power_level = new_ap_level;
+       ieee80211_hw_config(sdata->local, 0);
 }
 
 void ieee80211_enable_dyn_ps(struct ieee80211_vif *vif)
@@ -1007,6 +1038,16 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
        ieee80211_change_ps(local);
 }
 
+void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata)
+{
+       bool ps_allowed = ieee80211_powersave_allowed(sdata);
+
+       if (sdata->vif.bss_conf.ps != ps_allowed) {
+               sdata->vif.bss_conf.ps = ps_allowed;
+               ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_PS);
+       }
+}
+
 void ieee80211_dynamic_ps_disable_work(struct work_struct *work)
 {
        struct ieee80211_local *local =
@@ -1239,7 +1280,7 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
        }
 
        use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME);
-       if (sdata->local->hw.conf.channel->band == IEEE80211_BAND_5GHZ)
+       if (sdata->local->oper_channel->band == IEEE80211_BAND_5GHZ)
                use_short_slot = true;
 
        if (use_protection != bss_conf->use_cts_prot) {
@@ -1307,9 +1348,11 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
 
        mutex_lock(&local->iflist_mtx);
        ieee80211_recalc_ps(local, -1);
-       ieee80211_recalc_smps(local);
        mutex_unlock(&local->iflist_mtx);
 
+       ieee80211_recalc_smps(local);
+       ieee80211_recalc_ps_vif(sdata);
+
        netif_tx_start_all_queues(sdata->dev);
        netif_carrier_on(sdata->dev);
 }
@@ -1356,7 +1399,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        sta = sta_info_get(sdata, ifmgd->bssid);
        if (sta) {
                set_sta_flag(sta, WLAN_STA_BLOCK_BA);
-               ieee80211_sta_tear_down_BA_sessions(sta, tx);
+               ieee80211_sta_tear_down_BA_sessions(sta, false);
        }
        mutex_unlock(&local->sta_mtx);
 
@@ -1371,6 +1414,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        }
        local->ps_sdata = NULL;
 
+       /* disable per-vif ps */
+       ieee80211_recalc_ps_vif(sdata);
+
        /* flush out any pending frame (e.g. DELBA) before deauth/disassoc */
        if (tx)
                drv_flush(local, false);
@@ -1401,7 +1447,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa));
        memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask));
 
-       local->power_constr_level = 0;
+       local->ap_power_level = 0;
 
        del_timer_sync(&local->dynamic_ps_timer);
        cancel_work_sync(&local->dynamic_ps_enable_work);
@@ -1542,7 +1588,8 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
                        ssid_len = ssid[1];
 
                ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid_len, NULL,
-                                        0, (u32) -1, true, false);
+                                        0, (u32) -1, true, false,
+                                        ifmgd->associated->channel);
        }
 
        ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
@@ -1645,19 +1692,21 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
                ssid_len = ssid[1];
 
        skb = ieee80211_build_probe_req(sdata, cbss->bssid,
-                                       (u32) -1, ssid + 2, ssid_len,
+                                       (u32) -1,
+                                       sdata->local->oper_channel,
+                                       ssid + 2, ssid_len,
                                        NULL, 0, true);
 
        return skb;
 }
 EXPORT_SYMBOL(ieee80211_ap_probereq_get);
 
-static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
+static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata,
+                                  bool transmit_frame)
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        struct ieee80211_local *local = sdata->local;
-       u8 bssid[ETH_ALEN];
-       u8 frame_buf[DEAUTH_DISASSOC_LEN];
+       u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
 
        mutex_lock(&ifmgd->mtx);
        if (!ifmgd->associated) {
@@ -1665,27 +1714,24 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
                return;
        }
 
-       memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
-
-       sdata_info(sdata, "Connection to AP %pM lost\n", bssid);
-
        ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
                               WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
-                              false, frame_buf);
+                              transmit_frame, frame_buf);
+       ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
        mutex_unlock(&ifmgd->mtx);
 
        /*
         * must be outside lock due to cfg80211,
         * but that's not a problem.
         */
-       cfg80211_send_deauth(sdata->dev, frame_buf, DEAUTH_DISASSOC_LEN);
+       cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN);
 
        mutex_lock(&local->mtx);
        ieee80211_recalc_idle(local);
        mutex_unlock(&local->mtx);
 }
 
-void ieee80211_beacon_connection_loss_work(struct work_struct *work)
+static void ieee80211_beacon_connection_loss_work(struct work_struct *work)
 {
        struct ieee80211_sub_if_data *sdata =
                container_of(work, struct ieee80211_sub_if_data,
@@ -1701,10 +1747,24 @@ void ieee80211_beacon_connection_loss_work(struct work_struct *work)
                rcu_read_unlock();
        }
 
-       if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
-               __ieee80211_connection_loss(sdata);
-       else
+       if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR) {
+               sdata_info(sdata, "Connection to AP %pM lost\n",
+                          ifmgd->bssid);
+               __ieee80211_disconnect(sdata, false);
+       } else {
                ieee80211_mgd_probe_ap(sdata, true);
+       }
+}
+
+static void ieee80211_csa_connection_drop_work(struct work_struct *work)
+{
+       struct ieee80211_sub_if_data *sdata =
+               container_of(work, struct ieee80211_sub_if_data,
+                            u.mgd.csa_connection_drop_work);
+
+       ieee80211_wake_queues_by_reason(&sdata->local->hw,
+                                       IEEE80211_QUEUE_STOP_REASON_CSA);
+       __ieee80211_disconnect(sdata, true);
 }
 
 void ieee80211_beacon_loss(struct ieee80211_vif *vif)
@@ -2232,14 +2292,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
                mutex_unlock(&local->iflist_mtx);
        }
 
-       if (elems->ch_switch_elem && (elems->ch_switch_elem_len == 3) &&
-           (memcmp(mgmt->bssid, sdata->u.mgd.associated->bssid,
-                                                       ETH_ALEN) == 0)) {
-               struct ieee80211_channel_sw_ie *sw_elem =
-                       (struct ieee80211_channel_sw_ie *)elems->ch_switch_elem;
-               ieee80211_sta_process_chanswitch(sdata, sw_elem,
+       if (elems->ch_switch_ie &&
+           memcmp(mgmt->bssid, sdata->u.mgd.associated->bssid, ETH_ALEN) == 0)
+               ieee80211_sta_process_chanswitch(sdata, elems->ch_switch_ie,
                                                 bss, rx_status->mactime);
-       }
 }
 
 
@@ -2326,7 +2382,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
        if (baselen > len)
                return;
 
-       if (rx_status->freq != local->hw.conf.channel->center_freq)
+       if (rx_status->freq != local->oper_channel->center_freq)
                return;
 
        if (ifmgd->assoc_data && !ifmgd->assoc_data->have_beacon &&
@@ -2490,21 +2546,19 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
            !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) {
                struct ieee80211_supported_band *sband;
 
-               sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+               sband = local->hw.wiphy->bands[local->oper_channel->band];
 
                changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation,
                                                  bssid, true);
        }
 
-       /* Note: country IE parsing is done for us by cfg80211 */
-       if (elems.country_elem) {
-               /* TODO: IBSS also needs this */
-               if (elems.pwr_constr_elem)
-                       ieee80211_handle_pwr_constr(sdata,
-                               le16_to_cpu(mgmt->u.probe_resp.capab_info),
-                               elems.pwr_constr_elem,
-                               elems.pwr_constr_elem_len);
-       }
+       if (elems.country_elem && elems.pwr_constr_elem &&
+           mgmt->u.probe_resp.capab_info &
+                               cpu_to_le16(WLAN_CAPABILITY_SPECTRUM_MGMT))
+               ieee80211_handle_pwr_constr(sdata, local->oper_channel,
+                                           elems.country_elem,
+                                           elems.country_elem_len,
+                                           elems.pwr_constr_elem);
 
        ieee80211_bss_info_change_notify(sdata, changed);
 }
@@ -2601,7 +2655,7 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
-       u8 frame_buf[DEAUTH_DISASSOC_LEN];
+       u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
 
        ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason,
                               false, frame_buf);
@@ -2611,7 +2665,7 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
         * must be outside lock due to cfg80211,
         * but that's not a problem.
         */
-       cfg80211_send_deauth(sdata->dev, frame_buf, DEAUTH_DISASSOC_LEN);
+       cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN);
 
        mutex_lock(&local->mtx);
        ieee80211_recalc_idle(local);
@@ -2673,7 +2727,8 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
                 * will not answer to direct packet in unassociated state.
                 */
                ieee80211_send_probe_req(sdata, NULL, ssidie + 2, ssidie[1],
-                                        NULL, 0, (u32) -1, true, false);
+                                        NULL, 0, (u32) -1, true, false,
+                                        auth_data->bss->channel);
        }
 
        auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
@@ -2894,6 +2949,7 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
 
        cancel_work_sync(&ifmgd->monitor_work);
        cancel_work_sync(&ifmgd->beacon_connection_loss_work);
+       cancel_work_sync(&ifmgd->csa_connection_drop_work);
        if (del_timer_sync(&ifmgd->timer))
                set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
 
@@ -2950,6 +3006,8 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
        INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work);
        INIT_WORK(&ifmgd->beacon_connection_loss_work,
                  ieee80211_beacon_connection_loss_work);
+       INIT_WORK(&ifmgd->csa_connection_drop_work,
+                 ieee80211_csa_connection_drop_work);
        INIT_WORK(&ifmgd->request_smps_work, ieee80211_request_smps_work);
        setup_timer(&ifmgd->timer, ieee80211_sta_timer,
                    (unsigned long) sdata);
@@ -3000,41 +3058,17 @@ int ieee80211_max_network_latency(struct notifier_block *nb,
        return 0;
 }
 
-static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
-                                    struct cfg80211_bss *cbss, bool assoc)
+static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
+                                 struct cfg80211_bss *cbss)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
-       struct ieee80211_bss *bss = (void *)cbss->priv;
-       struct sta_info *sta = NULL;
-       bool have_sta = false;
-       int err;
        int ht_cfreq;
        enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
        const u8 *ht_oper_ie;
        const struct ieee80211_ht_operation *ht_oper = NULL;
        struct ieee80211_supported_band *sband;
 
-       if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data))
-               return -EINVAL;
-
-       if (assoc) {
-               rcu_read_lock();
-               have_sta = sta_info_get(sdata, cbss->bssid);
-               rcu_read_unlock();
-       }
-
-       if (!have_sta) {
-               sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL);
-               if (!sta)
-                       return -ENOMEM;
-       }
-
-       mutex_lock(&local->mtx);
-       ieee80211_recalc_idle(sdata->local);
-       mutex_unlock(&local->mtx);
-
-       /* switch to the right channel */
        sband = local->hw.wiphy->bands[cbss->channel->band];
 
        ifmgd->flags &= ~IEEE80211_STA_DISABLE_40MHZ;
@@ -3097,10 +3131,51 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
        local->oper_channel = cbss->channel;
        ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
 
-       if (sta) {
+       return 0;
+}
+
+static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
+                                    struct cfg80211_bss *cbss, bool assoc)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+       struct ieee80211_bss *bss = (void *)cbss->priv;
+       struct sta_info *new_sta = NULL;
+       bool have_sta = false;
+       int err;
+
+       if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data))
+               return -EINVAL;
+
+       if (assoc) {
+               rcu_read_lock();
+               have_sta = sta_info_get(sdata, cbss->bssid);
+               rcu_read_unlock();
+       }
+
+       if (!have_sta) {
+               new_sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL);
+               if (!new_sta)
+                       return -ENOMEM;
+       }
+
+       mutex_lock(&local->mtx);
+       ieee80211_recalc_idle(sdata->local);
+       mutex_unlock(&local->mtx);
+
+       if (new_sta) {
                u32 rates = 0, basic_rates = 0;
                bool have_higher_than_11mbit;
                int min_rate = INT_MAX, min_rate_index = -1;
+               struct ieee80211_supported_band *sband;
+
+               sband = local->hw.wiphy->bands[cbss->channel->band];
+
+               err = ieee80211_prep_channel(sdata, cbss);
+               if (err) {
+                       sta_info_free(local, new_sta);
+                       return err;
+               }
 
                ieee80211_get_rates(sband, bss->supp_rates,
                                    bss->supp_rates_len,
@@ -3122,7 +3197,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
                        basic_rates = BIT(min_rate_index);
                }
 
-               sta->sta.supp_rates[cbss->channel->band] = rates;
+               new_sta->sta.supp_rates[cbss->channel->band] = rates;
                sdata->vif.bss_conf.basic_rates = basic_rates;
 
                /* cf. IEEE 802.11 9.2.12 */
@@ -3145,10 +3220,10 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
                        BSS_CHANGED_BEACON_INT);
 
                if (assoc)
-                       sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
+                       sta_info_pre_move_state(new_sta, IEEE80211_STA_AUTH);
 
-               err = sta_info_insert(sta);
-               sta = NULL;
+               err = sta_info_insert(new_sta);
+               new_sta = NULL;
                if (err) {
                        sdata_info(sdata,
                                   "failed to insert STA entry for the AP (error %d)\n",
@@ -3302,9 +3377,13 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
        }
 
        /* prepare assoc data */
-
-       ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N;
-       ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
+       
+       /*
+        * keep only the 40 MHz disable bit set as it might have
+        * been set during authentication already, all other bits
+        * should be reset for a new connection
+        */
+       ifmgd->flags &= IEEE80211_STA_DISABLE_40MHZ;
 
        ifmgd->beacon_crc_valid = false;
 
@@ -3320,21 +3399,34 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
                    req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP ||
                    req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) {
                        ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
+                       ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
                        netdev_info(sdata->dev,
-                                   "disabling HT due to WEP/TKIP use\n");
+                                   "disabling HT/VHT due to WEP/TKIP use\n");
                }
        }
 
-       if (req->flags & ASSOC_REQ_DISABLE_HT)
+       if (req->flags & ASSOC_REQ_DISABLE_HT) {
                ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
+               ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+       }
 
        /* Also disable HT if we don't support it or the AP doesn't use WMM */
        sband = local->hw.wiphy->bands[req->bss->channel->band];
        if (!sband->ht_cap.ht_supported ||
            local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) {
                ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
-               netdev_info(sdata->dev,
-                           "disabling HT as WMM/QoS is not supported\n");
+               if (!bss->wmm_used)
+                       netdev_info(sdata->dev,
+                                   "disabling HT as WMM/QoS is not supported by the AP\n");
+       }
+
+       /* disable VHT if we don't support it or the AP doesn't use WMM */
+       if (!sband->vht_cap.vht_supported ||
+           local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) {
+               ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+               if (!bss->wmm_used)
+                       netdev_info(sdata->dev,
+                                   "disabling VHT as WMM/QoS is not supported by the AP\n");
        }
 
        memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
@@ -3456,7 +3548,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
                         struct cfg80211_deauth_request *req)
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
-       u8 frame_buf[DEAUTH_DISASSOC_LEN];
+       u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
 
        mutex_lock(&ifmgd->mtx);
 
@@ -3471,17 +3563,21 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
                   req->bssid, req->reason_code);
 
        if (ifmgd->associated &&
-           ether_addr_equal(ifmgd->associated->bssid, req->bssid))
+           ether_addr_equal(ifmgd->associated->bssid, req->bssid)) {
                ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
                                       req->reason_code, true, frame_buf);
-       else
+       } else {
+               drv_mgd_prepare_tx(sdata->local, sdata);
                ieee80211_send_deauth_disassoc(sdata, req->bssid,
                                               IEEE80211_STYPE_DEAUTH,
                                               req->reason_code, true,
                                               frame_buf);
+       }
+
        mutex_unlock(&ifmgd->mtx);
 
-       __cfg80211_send_deauth(sdata->dev, frame_buf, DEAUTH_DISASSOC_LEN);
+       __cfg80211_send_deauth(sdata->dev, frame_buf,
+                              IEEE80211_DEAUTH_FRAME_LEN);
 
        mutex_lock(&sdata->local->mtx);
        ieee80211_recalc_idle(sdata->local);
@@ -3495,7 +3591,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        u8 bssid[ETH_ALEN];
-       u8 frame_buf[DEAUTH_DISASSOC_LEN];
+       u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
 
        mutex_lock(&ifmgd->mtx);
 
@@ -3520,7 +3616,8 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
                               frame_buf);
        mutex_unlock(&ifmgd->mtx);
 
-       __cfg80211_send_disassoc(sdata->dev, frame_buf, DEAUTH_DISASSOC_LEN);
+       __cfg80211_send_disassoc(sdata->dev, frame_buf,
+                                IEEE80211_DEAUTH_FRAME_LEN);
 
        mutex_lock(&sdata->local->mtx);
        ieee80211_recalc_idle(sdata->local);
index 635c3250c66894ed090d08d4356b5832a7120247..83608ac167801f1c06fc55dd3ab53370d947cbc7 100644 (file)
@@ -116,6 +116,9 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
                if (!ieee80211_sdata_running(sdata))
                        continue;
 
+               if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
+                       continue;
+
                if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
                        set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
 
@@ -144,6 +147,9 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
 
        mutex_lock(&local->iflist_mtx);
        list_for_each_entry(sdata, &local->interfaces, list) {
+               if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
+                       continue;
+
                if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
                        clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
 
@@ -227,8 +233,7 @@ static void ieee80211_hw_roc_start(struct work_struct *work)
                        u32 dur = dep->duration;
                        dep->duration = dur - roc->duration;
                        roc->duration = dur;
-                       list_del(&dep->list);
-                       list_add(&dep->list, &roc->list);
+                       list_move(&dep->list, &roc->list);
                }
        }
  out_unlock:
index 6e4fd32c66171345c399f9eb765809d974068e34..10de668eb9f64b8e6f3f9509f7db9ad666f36af1 100644 (file)
@@ -56,7 +56,7 @@ static inline void rate_control_rate_init(struct sta_info *sta)
        if (!ref)
                return;
 
-       sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+       sband = local->hw.wiphy->bands[local->oper_channel->band];
 
        ref->ops->rate_init(ref->priv, sband, ista, priv_sta);
        set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
index 0cb4edee6af5a468657d15af4fc7a05b99028088..61c621e9273fe70c26978d42433d58fca60a80b8 100644 (file)
@@ -60,7 +60,9 @@ static inline int should_drop_frame(struct sk_buff *skb,
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 
-       if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
+       if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
+                           RX_FLAG_FAILED_PLCP_CRC |
+                           RX_FLAG_AMPDU_IS_ZEROLEN))
                return 1;
        if (unlikely(skb->len < 16 + present_fcs_len))
                return 1;
@@ -91,10 +93,17 @@ ieee80211_rx_radiotap_len(struct ieee80211_local *local,
        if (status->flag & RX_FLAG_HT) /* HT info */
                len += 3;
 
+       if (status->flag & RX_FLAG_AMPDU_DETAILS) {
+               /* padding */
+               while (len & 3)
+                       len++;
+               len += 8;
+       }
+
        return len;
 }
 
-/**
+/*
  * ieee80211_add_rx_radiotap_header - add radiotap header
  *
  * add a radiotap header containing all the fields which the hardware provided.
@@ -215,6 +224,37 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
                pos++;
                *pos++ = status->rate_idx;
        }
+
+       if (status->flag & RX_FLAG_AMPDU_DETAILS) {
+               u16 flags = 0;
+
+               /* ensure 4 byte alignment */
+               while ((pos - (u8 *)rthdr) & 3)
+                       pos++;
+               rthdr->it_present |=
+                       cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
+               put_unaligned_le32(status->ampdu_reference, pos);
+               pos += 4;
+               if (status->flag & RX_FLAG_AMPDU_REPORT_ZEROLEN)
+                       flags |= IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN;
+               if (status->flag & RX_FLAG_AMPDU_IS_ZEROLEN)
+                       flags |= IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN;
+               if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN)
+                       flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
+               if (status->flag & RX_FLAG_AMPDU_IS_LAST)
+                       flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST;
+               if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR)
+                       flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR;
+               if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
+                       flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN;
+               put_unaligned_le16(flags, pos);
+               pos += 2;
+               if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
+                       *pos++ = status->ampdu_delimiter_crc;
+               else
+                       *pos++ = 0;
+               *pos++ = 0;
+       }
 }
 
 /*
@@ -2268,7 +2308,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
 
                goto queue;
        case WLAN_CATEGORY_SPECTRUM_MGMT:
-               if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
+               if (status->band != IEEE80211_BAND_5GHZ)
                        break;
 
                if (sdata->vif.type != NL80211_IFTYPE_STATION)
@@ -2772,8 +2812,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
                if (!bssid) {
                        if (!ether_addr_equal(sdata->vif.addr, hdr->addr1))
                                return 0;
-               } else if (!ieee80211_bssid_match(bssid,
-                                       sdata->vif.addr)) {
+               } else if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) {
                        /*
                         * Accept public action frames even when the
                         * BSSID doesn't match, this is used for P2P
@@ -2793,9 +2832,18 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
                if (!ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2))
                        return 0;
                break;
+       case NL80211_IFTYPE_P2P_DEVICE:
+               if (!ieee80211_is_public_action(hdr, skb->len) &&
+                   !ieee80211_is_probe_req(hdr->frame_control) &&
+                   !ieee80211_is_probe_resp(hdr->frame_control) &&
+                   !ieee80211_is_beacon(hdr->frame_control))
+                       return 0;
+               if (!ether_addr_equal(sdata->vif.addr, hdr->addr1))
+                       status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
+               break;
        default:
                /* should never get here */
-               WARN_ON(1);
+               WARN_ON_ONCE(1);
                break;
        }
 
index 839dd9737989ec78bbb979c814953b1bdd0187a8..c4cdbde24fd3a70db1141c9460f617710daf98e2 100644 (file)
@@ -407,7 +407,7 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
        enum ieee80211_band band = local->hw.conf.channel->band;
 
        sdata = rcu_dereference_protected(local->scan_sdata,
-                                         lockdep_is_held(&local->mtx));;
+                                         lockdep_is_held(&local->mtx));
 
        for (i = 0; i < local->scan_req->n_ssids; i++)
                ieee80211_send_probe_req(
@@ -416,7 +416,8 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
                        local->scan_req->ssids[i].ssid_len,
                        local->scan_req->ie, local->scan_req->ie_len,
                        local->scan_req->rates[band], false,
-                       local->scan_req->no_cck);
+                       local->scan_req->no_cck,
+                       local->hw.conf.channel);
 
        /*
         * After sending probe requests, wait for probe responses
@@ -479,11 +480,10 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
        if (local->ops->hw_scan) {
                __set_bit(SCAN_HW_SCANNING, &local->scanning);
        } else if ((req->n_channels == 1) &&
-                  (req->channels[0]->center_freq ==
-                   local->hw.conf.channel->center_freq)) {
-
-               /* If we are scanning only on the current channel, then
-                * we do not need to stop normal activities
+                  (req->channels[0] == local->oper_channel)) {
+               /*
+                * If we are scanning only on the operating channel
+                * then we do not need to stop normal activities
                 */
                unsigned long next_delay;
 
@@ -917,6 +917,7 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
                                       struct cfg80211_sched_scan_request *req)
 {
        struct ieee80211_local *local = sdata->local;
+       struct ieee80211_sched_scan_ies sched_scan_ies;
        int ret, i;
 
        mutex_lock(&local->mtx);
@@ -935,33 +936,28 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
                if (!local->hw.wiphy->bands[i])
                        continue;
 
-               local->sched_scan_ies.ie[i] = kzalloc(2 +
-                                                     IEEE80211_MAX_SSID_LEN +
-                                                     local->scan_ies_len +
-                                                     req->ie_len,
-                                                     GFP_KERNEL);
-               if (!local->sched_scan_ies.ie[i]) {
+               sched_scan_ies.ie[i] = kzalloc(2 + IEEE80211_MAX_SSID_LEN +
+                                              local->scan_ies_len +
+                                              req->ie_len,
+                                              GFP_KERNEL);
+               if (!sched_scan_ies.ie[i]) {
                        ret = -ENOMEM;
                        goto out_free;
                }
 
-               local->sched_scan_ies.len[i] =
-                       ieee80211_build_preq_ies(local,
-                                                local->sched_scan_ies.ie[i],
+               sched_scan_ies.len[i] =
+                       ieee80211_build_preq_ies(local, sched_scan_ies.ie[i],
                                                 req->ie, req->ie_len, i,
                                                 (u32) -1, 0);
        }
 
-       ret = drv_sched_scan_start(local, sdata, req,
-                                  &local->sched_scan_ies);
-       if (ret == 0) {
+       ret = drv_sched_scan_start(local, sdata, req, &sched_scan_ies);
+       if (ret == 0)
                rcu_assign_pointer(local->sched_scan_sdata, sdata);
-               goto out;
-       }
 
 out_free:
        while (i > 0)
-               kfree(local->sched_scan_ies.ie[--i]);
+               kfree(sched_scan_ies.ie[--i]);
 out:
        mutex_unlock(&local->mtx);
        return ret;
@@ -970,7 +966,7 @@ out:
 int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_local *local = sdata->local;
-       int ret = 0, i;
+       int ret = 0;
 
        mutex_lock(&local->mtx);
 
@@ -979,12 +975,9 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata)
                goto out;
        }
 
-       if (rcu_access_pointer(local->sched_scan_sdata)) {
-               for (i = 0; i < IEEE80211_NUM_BANDS; i++)
-                       kfree(local->sched_scan_ies.ie[i]);
-
+       if (rcu_access_pointer(local->sched_scan_sdata))
                drv_sched_scan_stop(local, sdata);
-       }
+
 out:
        mutex_unlock(&local->mtx);
 
@@ -1006,7 +999,6 @@ void ieee80211_sched_scan_stopped_work(struct work_struct *work)
        struct ieee80211_local *local =
                container_of(work, struct ieee80211_local,
                             sched_scan_stopped_work);
-       int i;
 
        mutex_lock(&local->mtx);
 
@@ -1015,9 +1007,6 @@ void ieee80211_sched_scan_stopped_work(struct work_struct *work)
                return;
        }
 
-       for (i = 0; i < IEEE80211_NUM_BANDS; i++)
-               kfree(local->sched_scan_ies.ie[i]);
-
        rcu_assign_pointer(local->sched_scan_sdata, NULL);
 
        mutex_unlock(&local->mtx);
index 06fa75ceb0251e6064d90661bf7b5e61f979b72c..797dd36a220d92ac549067a9cd4d59a7ebfc09a3 100644 (file)
@@ -91,6 +91,70 @@ static int sta_info_hash_del(struct ieee80211_local *local,
        return -ENOENT;
 }
 
+static void free_sta_work(struct work_struct *wk)
+{
+       struct sta_info *sta = container_of(wk, struct sta_info, free_sta_wk);
+       int ac, i;
+       struct tid_ampdu_tx *tid_tx;
+       struct ieee80211_sub_if_data *sdata = sta->sdata;
+       struct ieee80211_local *local = sdata->local;
+
+       /*
+        * At this point, when being called as call_rcu callback,
+        * neither mac80211 nor the driver can reference this
+        * sta struct any more except by still existing timers
+        * associated with this station that we clean up below.
+        */
+
+       if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
+               BUG_ON(!sdata->bss);
+
+               clear_sta_flag(sta, WLAN_STA_PS_STA);
+
+               atomic_dec(&sdata->bss->num_sta_ps);
+               sta_info_recalc_tim(sta);
+       }
+
+       for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+               local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]);
+               __skb_queue_purge(&sta->ps_tx_buf[ac]);
+               __skb_queue_purge(&sta->tx_filtered[ac]);
+       }
+
+#ifdef CONFIG_MAC80211_MESH
+       if (ieee80211_vif_is_mesh(&sdata->vif)) {
+               mesh_accept_plinks_update(sdata);
+               mesh_plink_deactivate(sta);
+               del_timer_sync(&sta->plink_timer);
+       }
+#endif
+
+       cancel_work_sync(&sta->drv_unblock_wk);
+
+       /*
+        * Destroy aggregation state here. It would be nice to wait for the
+        * driver to finish aggregation stop and then clean up, but for now
+        * drivers have to handle aggregation stop being requested, followed
+        * directly by station destruction.
+        */
+       for (i = 0; i < STA_TID_NUM; i++) {
+               tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]);
+               if (!tid_tx)
+                       continue;
+               __skb_queue_purge(&tid_tx->pending);
+               kfree(tid_tx);
+       }
+
+       sta_info_free(local, sta);
+}
+
+static void free_sta_rcu(struct rcu_head *h)
+{
+       struct sta_info *sta = container_of(h, struct sta_info, rcu_head);
+
+       ieee80211_queue_work(&sta->local->hw, &sta->free_sta_wk);
+}
+
 /* protected by RCU */
 struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
                              const u8 *addr)
@@ -241,6 +305,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
 
        spin_lock_init(&sta->lock);
        INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
+       INIT_WORK(&sta->free_sta_wk, free_sta_work);
        INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
        mutex_init(&sta->ampdu_mlme.mtx);
 
@@ -654,8 +719,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
 {
        struct ieee80211_local *local;
        struct ieee80211_sub_if_data *sdata;
-       int ret, i, ac;
-       struct tid_ampdu_tx *tid_tx;
+       int ret, i;
 
        might_sleep();
 
@@ -674,7 +738,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
         * will be sufficient.
         */
        set_sta_flag(sta, WLAN_STA_BLOCK_BA);
-       ieee80211_sta_tear_down_BA_sessions(sta, true);
+       ieee80211_sta_tear_down_BA_sessions(sta, false);
 
        ret = sta_info_hash_del(local, sta);
        if (ret)
@@ -711,65 +775,14 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
                WARN_ON_ONCE(ret != 0);
        }
 
-       /*
-        * At this point, after we wait for an RCU grace period,
-        * neither mac80211 nor the driver can reference this
-        * sta struct any more except by still existing timers
-        * associated with this station that we clean up below.
-        */
-       synchronize_rcu();
-
-       if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
-               BUG_ON(!sdata->bss);
-
-               clear_sta_flag(sta, WLAN_STA_PS_STA);
-
-               atomic_dec(&sdata->bss->num_sta_ps);
-               sta_info_recalc_tim(sta);
-       }
-
-       for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
-               local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]);
-               __skb_queue_purge(&sta->ps_tx_buf[ac]);
-               __skb_queue_purge(&sta->tx_filtered[ac]);
-       }
-
-#ifdef CONFIG_MAC80211_MESH
-       if (ieee80211_vif_is_mesh(&sdata->vif))
-               mesh_accept_plinks_update(sdata);
-#endif
-
        sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr);
 
-       cancel_work_sync(&sta->drv_unblock_wk);
-
        cfg80211_del_sta(sdata->dev, sta->sta.addr, GFP_KERNEL);
 
        rate_control_remove_sta_debugfs(sta);
        ieee80211_sta_debugfs_remove(sta);
 
-#ifdef CONFIG_MAC80211_MESH
-       if (ieee80211_vif_is_mesh(&sta->sdata->vif)) {
-               mesh_plink_deactivate(sta);
-               del_timer_sync(&sta->plink_timer);
-       }
-#endif
-
-       /*
-        * Destroy aggregation state here. It would be nice to wait for the
-        * driver to finish aggregation stop and then clean up, but for now
-        * drivers have to handle aggregation stop being requested, followed
-        * directly by station destruction.
-        */
-       for (i = 0; i < STA_TID_NUM; i++) {
-               tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]);
-               if (!tid_tx)
-                       continue;
-               __skb_queue_purge(&tid_tx->pending);
-               kfree(tid_tx);
-       }
-
-       sta_info_free(local, sta);
+       call_rcu(&sta->rcu_head, free_sta_rcu);
 
        return 0;
 }
index a470e1123a5576ed5e14b779ed4a9213cda407b7..c88f161f81185a678335fe3df6201fb8e64c8d80 100644 (file)
@@ -287,6 +287,7 @@ struct sta_ampdu_mlme {
 struct sta_info {
        /* General information, mostly static */
        struct list_head list;
+       struct rcu_head rcu_head;
        struct sta_info __rcu *hnext;
        struct ieee80211_local *local;
        struct ieee80211_sub_if_data *sdata;
@@ -297,6 +298,7 @@ struct sta_info {
        spinlock_t lock;
 
        struct work_struct drv_unblock_wk;
+       struct work_struct free_sta_wk;
 
        u16 listen_interval;
 
index 8cd72914cdaf2f3540268920b1ab0969f687d330..2ce89732d0f21755939699b55e3382ac98baeaca 100644 (file)
@@ -517,21 +517,41 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
 
        if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
                u64 cookie = (unsigned long)skb;
+               bool found = false;
+
                acked = info->flags & IEEE80211_TX_STAT_ACK;
 
-               /*
-                * TODO: When we have non-netdev frame TX,
-                * we cannot use skb->dev->ieee80211_ptr
-                */
+               rcu_read_lock();
+
+               list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+                       if (!sdata->dev)
+                               continue;
+
+                       if (skb->dev != sdata->dev)
+                               continue;
 
-               if (ieee80211_is_nullfunc(hdr->frame_control) ||
-                   ieee80211_is_qos_nullfunc(hdr->frame_control))
-                       cfg80211_probe_status(skb->dev, hdr->addr1,
+                       found = true;
+                       break;
+               }
+
+               if (!skb->dev) {
+                       sdata = rcu_dereference(local->p2p_sdata);
+                       if (sdata)
+                               found = true;
+               }
+
+               if (!found)
+                       skb->dev = NULL;
+               else if (ieee80211_is_nullfunc(hdr->frame_control) ||
+                        ieee80211_is_qos_nullfunc(hdr->frame_control)) {
+                       cfg80211_probe_status(sdata->dev, hdr->addr1,
                                              cookie, acked, GFP_ATOMIC);
-               else
-                       cfg80211_mgmt_tx_status(
-                               skb->dev->ieee80211_ptr, cookie, skb->data,
-                               skb->len, acked, GFP_ATOMIC);
+               } else {
+                       cfg80211_mgmt_tx_status(&sdata->wdev, cookie, skb->data,
+                                               skb->len, acked, GFP_ATOMIC);
+               }
+
+               rcu_read_unlock();
        }
 
        if (unlikely(info->ack_frame_id)) {
index c6d33b55b2dfd51602d7fc40dcbbbb0ef6a0a451..18d9c8a52e9e72d98686778bb7bcab974119e638 100644 (file)
@@ -24,7 +24,7 @@
                        __string(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
 #define VIF_ASSIGN     __entry->vif_type = sdata->vif.type; __entry->sdata = sdata;    \
                        __entry->p2p = sdata->vif.p2p;                                  \
-                       __assign_str(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
+                       __assign_str(vif_name, sdata->dev ? sdata->dev->name : sdata->name)
 #define VIF_PR_FMT     " vif:%s(%d%s)"
 #define VIF_PR_ARG     __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : ""
 
@@ -274,9 +274,12 @@ TRACE_EVENT(drv_config,
                __entry->dynamic_ps_timeout = local->hw.conf.dynamic_ps_timeout;
                __entry->max_sleep_period = local->hw.conf.max_sleep_period;
                __entry->listen_interval = local->hw.conf.listen_interval;
-               __entry->long_frame_max_tx_count = local->hw.conf.long_frame_max_tx_count;
-               __entry->short_frame_max_tx_count = local->hw.conf.short_frame_max_tx_count;
-               __entry->center_freq = local->hw.conf.channel->center_freq;
+               __entry->long_frame_max_tx_count =
+                       local->hw.conf.long_frame_max_tx_count;
+               __entry->short_frame_max_tx_count =
+                       local->hw.conf.short_frame_max_tx_count;
+               __entry->center_freq = local->hw.conf.channel ?
+                                       local->hw.conf.channel->center_freq : 0;
                __entry->channel_type = local->hw.conf.channel_type;
                __entry->smps = local->hw.conf.smps_mode;
        ),
index c5e8c9c31f7687d9922d0011ea1b31e8244ea8d2..e0e0d1d0e8301d4a8803b7af811e24d752347a88 100644 (file)
@@ -55,7 +55,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
        if (WARN_ON_ONCE(info->control.rates[0].idx < 0))
                return 0;
 
-       sband = local->hw.wiphy->bands[tx->channel->band];
+       sband = local->hw.wiphy->bands[info->band];
        txrate = &sband->bitrates[info->control.rates[0].idx];
 
        erp = txrate->flags & IEEE80211_RATE_ERP_G;
@@ -580,7 +580,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
                                tx->key = NULL;
                        else
                                skip_hw = (tx->key->conf.flags &
-                                          IEEE80211_KEY_FLAG_SW_MGMT) &&
+                                          IEEE80211_KEY_FLAG_SW_MGMT_TX) &&
                                        ieee80211_is_mgmt(hdr->frame_control);
                        break;
                case WLAN_CIPHER_SUITE_AES_CMAC:
@@ -615,7 +615,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
 
        memset(&txrc, 0, sizeof(txrc));
 
-       sband = tx->local->hw.wiphy->bands[tx->channel->band];
+       sband = tx->local->hw.wiphy->bands[info->band];
 
        len = min_t(u32, tx->skb->len + FCS_LEN,
                         tx->local->hw.wiphy->frag_threshold);
@@ -626,13 +626,13 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
        txrc.bss_conf = &tx->sdata->vif.bss_conf;
        txrc.skb = tx->skb;
        txrc.reported_rate.idx = -1;
-       txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[tx->channel->band];
+       txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band];
        if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1)
                txrc.max_rate_idx = -1;
        else
                txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
        memcpy(txrc.rate_idx_mcs_mask,
-              tx->sdata->rc_rateidx_mcs_mask[tx->channel->band],
+              tx->sdata->rc_rateidx_mcs_mask[info->band],
               sizeof(txrc.rate_idx_mcs_mask));
        txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
                    tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
@@ -667,7 +667,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
                 "scanning and associated. Target station: "
                 "%pM on %d GHz band\n",
                 tx->sdata->name, hdr->addr1,
-                tx->channel->band ? 5 : 2))
+                info->band ? 5 : 2))
                return TX_DROP;
 
        /*
@@ -1131,7 +1131,6 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
        tx->skb = skb;
        tx->local = local;
        tx->sdata = sdata;
-       tx->channel = local->hw.conf.channel;
        __skb_queue_head_init(&tx->skbs);
 
        /*
@@ -1204,6 +1203,7 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
                               struct sk_buff_head *skbs,
                               bool txpending)
 {
+       struct ieee80211_tx_control control;
        struct sk_buff *skb, *tmp;
        unsigned long flags;
 
@@ -1240,10 +1240,10 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
                spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
 
                info->control.vif = vif;
-               info->control.sta = sta;
+               control.sta = sta;
 
                __skb_unlink(skb, skbs);
-               drv_tx(local, skb);
+               drv_tx(local, &control, skb);
        }
 
        return true;
@@ -1399,8 +1399,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
                goto out;
        }
 
-       tx.channel = local->hw.conf.channel;
-       info->band = tx.channel->band;
+       info->band = local->hw.conf.channel->band;
 
        /* set up hw_queue value early */
        if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) ||
@@ -1720,7 +1719,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_tx_info *info;
-       int ret = NETDEV_TX_BUSY, head_need;
+       int head_need;
        u16 ethertype, hdrlen,  meshhdrlen = 0;
        __le16 fc;
        struct ieee80211_hdr hdr;
@@ -1736,10 +1735,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
        u32 info_flags = 0;
        u16 info_id = 0;
 
-       if (unlikely(skb->len < ETH_HLEN)) {
-               ret = NETDEV_TX_OK;
+       if (unlikely(skb->len < ETH_HLEN))
                goto fail;
-       }
 
        /* convert Ethernet header to proper 802.11 header (based on
         * operation mode) */
@@ -1787,7 +1784,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
                if (!sdata->u.mesh.mshcfg.dot11MeshTTL) {
                        /* Do not send frames with mesh_ttl == 0 */
                        sdata->u.mesh.mshstats.dropped_frames_ttl++;
-                       ret = NETDEV_TX_OK;
                        goto fail;
                }
                rcu_read_lock();
@@ -1874,10 +1870,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
 
                if (tdls_direct) {
                        /* link during setup - throw out frames to peer */
-                       if (!tdls_auth) {
-                               ret = NETDEV_TX_OK;
+                       if (!tdls_auth)
                                goto fail;
-                       }
 
                        /* DA SA BSSID */
                        memcpy(hdr.addr1, skb->data, ETH_ALEN);
@@ -1911,7 +1905,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
                hdrlen = 24;
                break;
        default:
-               ret = NETDEV_TX_OK;
                goto fail;
        }
 
@@ -1956,7 +1949,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
 
                I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
 
-               ret = NETDEV_TX_OK;
                goto fail;
        }
 
@@ -2011,10 +2003,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
                skb = skb_clone(skb, GFP_ATOMIC);
                kfree_skb(tmp_skb);
 
-               if (!skb) {
-                       ret = NETDEV_TX_OK;
+               if (!skb)
                        goto fail;
-               }
        }
 
        hdr.frame_control = fc;
@@ -2117,10 +2107,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
        return NETDEV_TX_OK;
 
  fail:
-       if (ret == NETDEV_TX_OK)
-               dev_kfree_skb(skb);
-
-       return ret;
+       dev_kfree_skb(skb);
+       return NETDEV_TX_OK;
 }
 
 
@@ -2295,12 +2283,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
        struct ieee80211_sub_if_data *sdata = NULL;
        struct ieee80211_if_ap *ap = NULL;
        struct beacon_data *beacon;
-       struct ieee80211_supported_band *sband;
-       enum ieee80211_band band = local->hw.conf.channel->band;
+       enum ieee80211_band band = local->oper_channel->band;
        struct ieee80211_tx_rate_control txrc;
 
-       sband = local->hw.wiphy->bands[band];
-
        rcu_read_lock();
 
        sdata = vif_to_sdata(vif);
@@ -2410,7 +2395,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
                memset(mgmt, 0, hdr_len);
                mgmt->frame_control =
                    cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
-               memset(mgmt->da, 0xff, ETH_ALEN);
+               eth_broadcast_addr(mgmt->da);
                memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
                memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
                mgmt->u.beacon.beacon_int =
@@ -2422,9 +2407,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
                *pos++ = WLAN_EID_SSID;
                *pos++ = 0x0;
 
-               if (ieee80211_add_srates_ie(sdata, skb, true) ||
+               if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
                    mesh_add_ds_params_ie(skb, sdata) ||
-                   ieee80211_add_ext_srates_ie(sdata, skb, true) ||
+                   ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
                    mesh_add_rsn_ie(skb, sdata) ||
                    mesh_add_ht_cap_ie(skb, sdata) ||
                    mesh_add_ht_oper_ie(skb, sdata) ||
@@ -2447,12 +2432,12 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
 
        memset(&txrc, 0, sizeof(txrc));
        txrc.hw = hw;
-       txrc.sband = sband;
+       txrc.sband = local->hw.wiphy->bands[band];
        txrc.bss_conf = &sdata->vif.bss_conf;
        txrc.skb = skb;
        txrc.reported_rate.idx = -1;
        txrc.rate_idx_mask = sdata->rc_rateidx_mask[band];
-       if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1)
+       if (txrc.rate_idx_mask == (1 << txrc.sband->n_bitrates) - 1)
                txrc.max_rate_idx = -1;
        else
                txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
@@ -2476,7 +2461,8 @@ struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
                                        struct ieee80211_vif *vif)
 {
        struct ieee80211_if_ap *ap = NULL;
-       struct sk_buff *presp = NULL, *skb = NULL;
+       struct sk_buff *skb = NULL;
+       struct probe_resp *presp = NULL;
        struct ieee80211_hdr *hdr;
        struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
 
@@ -2490,10 +2476,12 @@ struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
        if (!presp)
                goto out;
 
-       skb = skb_copy(presp, GFP_ATOMIC);
+       skb = dev_alloc_skb(presp->len);
        if (!skb)
                goto out;
 
+       memcpy(skb_put(skb, presp->len), presp->data, presp->len);
+
        hdr = (struct ieee80211_hdr *) skb->data;
        memset(hdr->addr1, 0, sizeof(hdr->addr1));
 
@@ -2604,9 +2592,9 @@ struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
        memset(hdr, 0, sizeof(*hdr));
        hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
                                         IEEE80211_STYPE_PROBE_REQ);
-       memset(hdr->addr1, 0xff, ETH_ALEN);
+       eth_broadcast_addr(hdr->addr1);
        memcpy(hdr->addr2, vif->addr, ETH_ALEN);
-       memset(hdr->addr3, 0xff, ETH_ALEN);
+       eth_broadcast_addr(hdr->addr3);
 
        pos = skb_put(skb, ie_ssid_len);
        *pos++ = WLAN_EID_SSID;
@@ -2703,8 +2691,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
        info = IEEE80211_SKB_CB(skb);
 
        tx.flags |= IEEE80211_TX_PS_BUFFERED;
-       tx.channel = local->hw.conf.channel;
-       info->band = tx.channel->band;
+       info->band = local->oper_channel->band;
 
        if (invoke_tx_handlers(&tx))
                skb = NULL;
index 39b82fee4904784c87a635026e8ae0b7e2a4318d..22ca35054dd065753b9e7d6c4f3e5ab990903e83 100644 (file)
@@ -276,6 +276,9 @@ void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue)
        list_for_each_entry_rcu(sdata, &local->interfaces, list) {
                int ac;
 
+               if (!sdata->dev)
+                       continue;
+
                if (test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
                        continue;
 
@@ -364,6 +367,9 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
        list_for_each_entry_rcu(sdata, &local->interfaces, list) {
                int ac;
 
+               if (!sdata->dev)
+                       continue;
+
                for (ac = 0; ac < n_acs; ac++) {
                        if (sdata->vif.hw_queue[ac] == queue ||
                            sdata->vif.cab_queue == queue)
@@ -768,8 +774,11 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
                                elem_parse_failed = true;
                        break;
                case WLAN_EID_CHANNEL_SWITCH:
-                       elems->ch_switch_elem = pos;
-                       elems->ch_switch_elem_len = elen;
+                       if (elen != sizeof(struct ieee80211_channel_sw_ie)) {
+                               elem_parse_failed = true;
+                               break;
+                       }
+                       elems->ch_switch_ie = (void *)pos;
                        break;
                case WLAN_EID_QUIET:
                        if (!elems->quiet_elem) {
@@ -783,8 +792,11 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
                        elems->country_elem_len = elen;
                        break;
                case WLAN_EID_PWR_CONSTRAINT:
+                       if (elen != 1) {
+                               elem_parse_failed = true;
+                               break;
+                       }
                        elems->pwr_constr_elem = pos;
-                       elems->pwr_constr_elem_len = elen;
                        break;
                case WLAN_EID_TIMEOUT_INTERVAL:
                        elems->timeout_int = pos;
@@ -832,7 +844,7 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
 
        memset(&qparam, 0, sizeof(qparam));
 
-       use_11b = (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) &&
+       use_11b = (local->oper_channel->band == IEEE80211_BAND_2GHZ) &&
                 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE);
 
        /*
@@ -899,7 +911,8 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
                drv_conf_tx(local, sdata, ac, &qparam);
        }
 
-       if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
+       if (sdata->vif.type != NL80211_IFTYPE_MONITOR &&
+           sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE) {
                sdata->vif.bss_conf.qos = enable_qos;
                if (bss_notify)
                        ieee80211_bss_info_change_notify(sdata,
@@ -919,7 +932,7 @@ void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
                if ((supp_rates[i] & 0x7f) * 5 > 110)
                        have_higher_than_11mbit = 1;
 
-       if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ &&
+       if (local->oper_channel->band == IEEE80211_BAND_2GHZ &&
            have_higher_than_11mbit)
                sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
        else
@@ -994,6 +1007,45 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
        ieee80211_tx_skb(sdata, skb);
 }
 
+void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
+                                   const u8 *bssid, u16 stype, u16 reason,
+                                   bool send_frame, u8 *frame_buf)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct sk_buff *skb;
+       struct ieee80211_mgmt *mgmt = (void *)frame_buf;
+
+       /* build frame */
+       mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype);
+       mgmt->duration = 0; /* initialize only */
+       mgmt->seq_ctrl = 0; /* initialize only */
+       memcpy(mgmt->da, bssid, ETH_ALEN);
+       memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
+       memcpy(mgmt->bssid, bssid, ETH_ALEN);
+       /* u.deauth.reason_code == u.disassoc.reason_code */
+       mgmt->u.deauth.reason_code = cpu_to_le16(reason);
+
+       if (send_frame) {
+               skb = dev_alloc_skb(local->hw.extra_tx_headroom +
+                                   IEEE80211_DEAUTH_FRAME_LEN);
+               if (!skb)
+                       return;
+
+               skb_reserve(skb, local->hw.extra_tx_headroom);
+
+               /* copy in frame */
+               memcpy(skb_put(skb, IEEE80211_DEAUTH_FRAME_LEN),
+                      mgmt, IEEE80211_DEAUTH_FRAME_LEN);
+
+               if (sdata->vif.type != NL80211_IFTYPE_STATION ||
+                   !(sdata->u.mgd.flags & IEEE80211_STA_MFP_ENABLED))
+                       IEEE80211_SKB_CB(skb)->flags |=
+                               IEEE80211_TX_INTFL_DONT_ENCRYPT;
+
+               ieee80211_tx_skb(sdata, skb);
+       }
+}
+
 int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
                             const u8 *ie, size_t ie_len,
                             enum ieee80211_band band, u32 rate_mask,
@@ -1100,6 +1152,7 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
 
 struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
                                          u8 *dst, u32 ratemask,
+                                         struct ieee80211_channel *chan,
                                          const u8 *ssid, size_t ssid_len,
                                          const u8 *ie, size_t ie_len,
                                          bool directed)
@@ -1109,7 +1162,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_mgmt *mgmt;
        size_t buf_len;
        u8 *buf;
-       u8 chan;
+       u8 chan_no;
 
        /* FIXME: come up with a proper value */
        buf = kmalloc(200 + ie_len, GFP_KERNEL);
@@ -1122,14 +1175,12 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
         * badly-behaved APs don't respond when this parameter is included.
         */
        if (directed)
-               chan = 0;
+               chan_no = 0;
        else
-               chan = ieee80211_frequency_to_channel(
-                       local->hw.conf.channel->center_freq);
+               chan_no = ieee80211_frequency_to_channel(chan->center_freq);
 
-       buf_len = ieee80211_build_preq_ies(local, buf, ie, ie_len,
-                                          local->hw.conf.channel->band,
-                                          ratemask, chan);
+       buf_len = ieee80211_build_preq_ies(local, buf, ie, ie_len, chan->band,
+                                          ratemask, chan_no);
 
        skb = ieee80211_probereq_get(&local->hw, &sdata->vif,
                                     ssid, ssid_len,
@@ -1154,11 +1205,13 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
 void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
                              const u8 *ssid, size_t ssid_len,
                              const u8 *ie, size_t ie_len,
-                             u32 ratemask, bool directed, bool no_cck)
+                             u32 ratemask, bool directed, bool no_cck,
+                             struct ieee80211_channel *channel)
 {
        struct sk_buff *skb;
 
-       skb = ieee80211_build_probe_req(sdata, dst, ratemask, ssid, ssid_len,
+       skb = ieee80211_build_probe_req(sdata, dst, ratemask, channel,
+                                       ssid, ssid_len,
                                        ie, ie_len, directed);
        if (skb) {
                if (no_cck)
@@ -1359,7 +1412,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                switch (sdata->vif.type) {
                case NL80211_IFTYPE_STATION:
                        changed |= BSS_CHANGED_ASSOC |
-                                  BSS_CHANGED_ARP_FILTER;
+                                  BSS_CHANGED_ARP_FILTER |
+                                  BSS_CHANGED_PS;
                        mutex_lock(&sdata->u.mgd.mtx);
                        ieee80211_bss_info_change_notify(sdata, changed);
                        mutex_unlock(&sdata->u.mgd.mtx);
@@ -1385,6 +1439,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                case NL80211_IFTYPE_MONITOR:
                        /* ignore virtual */
                        break;
+               case NL80211_IFTYPE_P2P_DEVICE:
+                       changed = BSS_CHANGED_IDLE;
+                       break;
                case NL80211_IFTYPE_UNSPECIFIED:
                case NUM_NL80211_IFTYPES:
                case NL80211_IFTYPE_P2P_CLIENT:
@@ -1549,14 +1606,13 @@ static int check_mgd_smps(struct ieee80211_if_managed *ifmgd,
        return 0;
 }
 
-/* must hold iflist_mtx */
 void ieee80211_recalc_smps(struct ieee80211_local *local)
 {
        struct ieee80211_sub_if_data *sdata;
        enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_OFF;
        int count = 0;
 
-       lockdep_assert_held(&local->iflist_mtx);
+       mutex_lock(&local->iflist_mtx);
 
        /*
         * This function could be improved to handle multiple
@@ -1571,6 +1627,8 @@ void ieee80211_recalc_smps(struct ieee80211_local *local)
        list_for_each_entry(sdata, &local->interfaces, list) {
                if (!ieee80211_sdata_running(sdata))
                        continue;
+               if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
+                       continue;
                if (sdata->vif.type != NL80211_IFTYPE_STATION)
                        goto set;
 
@@ -1583,12 +1641,14 @@ void ieee80211_recalc_smps(struct ieee80211_local *local)
        }
 
        if (smps_mode == local->smps_mode)
-               return;
+               goto unlock;
 
  set:
        local->smps_mode = smps_mode;
        /* changed flag is auto-detected for this */
        ieee80211_hw_config(local, 0);
+ unlock:
+       mutex_unlock(&local->iflist_mtx);
 }
 
 static bool ieee80211_id_in_list(const u8 *ids, int n_ids, u8 id)
@@ -1809,7 +1869,8 @@ ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper)
 }
 
 int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
-                           struct sk_buff *skb, bool need_basic)
+                           struct sk_buff *skb, bool need_basic,
+                           enum ieee80211_band band)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_supported_band *sband;
@@ -1817,7 +1878,7 @@ int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
        u8 i, rates, *pos;
        u32 basic_rates = sdata->vif.bss_conf.basic_rates;
 
-       sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+       sband = local->hw.wiphy->bands[band];
        rates = sband->n_bitrates;
        if (rates > 8)
                rates = 8;
@@ -1840,7 +1901,8 @@ int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
 }
 
 int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
-                               struct sk_buff *skb, bool need_basic)
+                               struct sk_buff *skb, bool need_basic,
+                               enum ieee80211_band band)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_supported_band *sband;
@@ -1848,7 +1910,7 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
        u8 i, exrates, *pos;
        u32 basic_rates = sdata->vif.bss_conf.basic_rates;
 
-       sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+       sband = local->hw.wiphy->bands[band];
        exrates = sband->n_bitrates;
        if (exrates > 8)
                exrates -= 8;
index c19b214ffd571776434c2400eb1d906fc35d2589..fefa514b99170aae55620e4007565b0ad3e814b4 100644 (file)
@@ -356,6 +356,55 @@ config NETFILTER_NETLINK_QUEUE_CT
          If this option is enabled, NFQUEUE can include Connection Tracking
          information together with the packet is the enqueued via NFNETLINK.
 
+config NF_NAT
+       tristate
+
+config NF_NAT_NEEDED
+       bool
+       depends on NF_NAT
+       default y
+
+config NF_NAT_PROTO_DCCP
+       tristate
+       depends on NF_NAT && NF_CT_PROTO_DCCP
+       default NF_NAT && NF_CT_PROTO_DCCP
+
+config NF_NAT_PROTO_UDPLITE
+       tristate
+       depends on NF_NAT && NF_CT_PROTO_UDPLITE
+       default NF_NAT && NF_CT_PROTO_UDPLITE
+
+config NF_NAT_PROTO_SCTP
+       tristate
+       default NF_NAT && NF_CT_PROTO_SCTP
+       depends on NF_NAT && NF_CT_PROTO_SCTP
+       select LIBCRC32C
+
+config NF_NAT_AMANDA
+       tristate
+       depends on NF_CONNTRACK && NF_NAT
+       default NF_NAT && NF_CONNTRACK_AMANDA
+
+config NF_NAT_FTP
+       tristate
+       depends on NF_CONNTRACK && NF_NAT
+       default NF_NAT && NF_CONNTRACK_FTP
+
+config NF_NAT_IRC
+       tristate
+       depends on NF_CONNTRACK && NF_NAT
+       default NF_NAT && NF_CONNTRACK_IRC
+
+config NF_NAT_SIP
+       tristate
+       depends on NF_CONNTRACK && NF_NAT
+       default NF_NAT && NF_CONNTRACK_SIP
+
+config NF_NAT_TFTP
+       tristate
+       depends on NF_CONNTRACK && NF_NAT
+       default NF_NAT && NF_CONNTRACK_TFTP
+
 endif # NF_CONNTRACK
 
 # transparent proxy support
@@ -599,6 +648,16 @@ config NETFILTER_XT_TARGET_MARK
        (e.g. when running oldconfig). It selects
        CONFIG_NETFILTER_XT_MARK (combined mark/MARK module).
 
+config NETFILTER_XT_TARGET_NETMAP
+       tristate '"NETMAP" target support'
+       depends on NF_NAT
+       ---help---
+       NETMAP is an implementation of static 1:1 NAT mapping of network
+       addresses. It maps the network address part, while keeping the host
+       address part intact.
+
+       To compile it as a module, choose M here. If unsure, say N.
+
 config NETFILTER_XT_TARGET_NFLOG
        tristate '"NFLOG" target support'
        default m if NETFILTER_ADVANCED=n
@@ -621,19 +680,6 @@ config NETFILTER_XT_TARGET_NFQUEUE
 
          To compile it as a module, choose M here.  If unsure, say N.
 
-config NETFILTER_XT_TARGET_NOTRACK
-       tristate  '"NOTRACK" target support'
-       depends on IP_NF_RAW || IP6_NF_RAW
-       depends on NF_CONNTRACK
-       help
-         The NOTRACK target allows a select rule to specify
-         which packets *not* to enter the conntrack/NAT
-         subsystem with all the consequences (no ICMP error tracking,
-         no protocol helpers for the selected packets).
-
-         If you want to compile it as a module, say M here and read
-         <file:Documentation/kbuild/modules.txt>.  If unsure, say `N'.
-
 config NETFILTER_XT_TARGET_RATEEST
        tristate '"RATEEST" target support'
        depends on NETFILTER_ADVANCED
@@ -644,6 +690,17 @@ config NETFILTER_XT_TARGET_RATEEST
 
          To compile it as a module, choose M here.  If unsure, say N.
 
+config NETFILTER_XT_TARGET_REDIRECT
+       tristate "REDIRECT target support"
+       depends on NF_NAT
+       ---help---
+       REDIRECT is a special case of NAT: all incoming connections are
+       mapped onto the incoming interface's address, causing the packets to
+       come to the local machine instead of passing through. This is
+       useful for transparent proxies.
+
+       To compile it as a module, choose M here. If unsure, say N.
+
 config NETFILTER_XT_TARGET_TEE
        tristate '"TEE" - packet cloning to alternate destination'
        depends on NETFILTER_ADVANCED
index 1c5160f2278e6306f4bcb0de34e349c7c8efc60c..32596978df1d9bb0b92f1b0b1b81c4adf7204c94 100644 (file)
@@ -43,6 +43,23 @@ obj-$(CONFIG_NF_CONNTRACK_SANE) += nf_conntrack_sane.o
 obj-$(CONFIG_NF_CONNTRACK_SIP) += nf_conntrack_sip.o
 obj-$(CONFIG_NF_CONNTRACK_TFTP) += nf_conntrack_tftp.o
 
+nf_nat-y       := nf_nat_core.o nf_nat_proto_unknown.o nf_nat_proto_common.o \
+                  nf_nat_proto_udp.o nf_nat_proto_tcp.o nf_nat_helper.o
+
+obj-$(CONFIG_NF_NAT) += nf_nat.o
+
+# NAT protocols (nf_nat)
+obj-$(CONFIG_NF_NAT_PROTO_DCCP) += nf_nat_proto_dccp.o
+obj-$(CONFIG_NF_NAT_PROTO_UDPLITE) += nf_nat_proto_udplite.o
+obj-$(CONFIG_NF_NAT_PROTO_SCTP) += nf_nat_proto_sctp.o
+
+# NAT helpers
+obj-$(CONFIG_NF_NAT_AMANDA) += nf_nat_amanda.o
+obj-$(CONFIG_NF_NAT_FTP) += nf_nat_ftp.o
+obj-$(CONFIG_NF_NAT_IRC) += nf_nat_irc.o
+obj-$(CONFIG_NF_NAT_SIP) += nf_nat_sip.o
+obj-$(CONFIG_NF_NAT_TFTP) += nf_nat_tftp.o
+
 # transparent proxy support
 obj-$(CONFIG_NETFILTER_TPROXY) += nf_tproxy_core.o
 
@@ -53,6 +70,7 @@ obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
 obj-$(CONFIG_NETFILTER_XT_MARK) += xt_mark.o
 obj-$(CONFIG_NETFILTER_XT_CONNMARK) += xt_connmark.o
 obj-$(CONFIG_NETFILTER_XT_SET) += xt_set.o
+obj-$(CONFIG_NF_NAT) += xt_nat.o
 
 # targets
 obj-$(CONFIG_NETFILTER_XT_TARGET_AUDIT) += xt_AUDIT.o
@@ -65,10 +83,11 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_HMARK) += xt_HMARK.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_LOG) += xt_LOG.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_NETMAP) += xt_NETMAP.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
-obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_RATEEST) += xt_RATEEST.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_REDIRECT) += xt_REDIRECT.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_SECMARK) += xt_SECMARK.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_TPROXY) += xt_TPROXY.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_TCPMSS) += xt_TCPMSS.o
index 0bc6b60db4df2fda1a7cd2f1b57747f655d046a3..68912dadf13d15c0354bec108039176b9659c75a 100644 (file)
@@ -126,7 +126,7 @@ unsigned int nf_iterate(struct list_head *head,
                        unsigned int hook,
                        const struct net_device *indev,
                        const struct net_device *outdev,
-                       struct list_head **i,
+                       struct nf_hook_ops **elemp,
                        int (*okfn)(struct sk_buff *),
                        int hook_thresh)
 {
@@ -136,22 +136,20 @@ unsigned int nf_iterate(struct list_head *head,
         * The caller must not block between calls to this
         * function because of risk of continuing from deleted element.
         */
-       list_for_each_continue_rcu(*i, head) {
-               struct nf_hook_ops *elem = (struct nf_hook_ops *)*i;
-
-               if (hook_thresh > elem->priority)
+       list_for_each_entry_continue_rcu((*elemp), head, list) {
+               if (hook_thresh > (*elemp)->priority)
                        continue;
 
                /* Optimization: we don't need to hold module
                   reference here, since function can't sleep. --RR */
 repeat:
-               verdict = elem->hook(hook, skb, indev, outdev, okfn);
+               verdict = (*elemp)->hook(hook, skb, indev, outdev, okfn);
                if (verdict != NF_ACCEPT) {
 #ifdef CONFIG_NETFILTER_DEBUG
                        if (unlikely((verdict & NF_VERDICT_MASK)
                                                        > NF_MAX_VERDICT)) {
                                NFDEBUG("Evil return from %p(%u).\n",
-                                       elem->hook, hook);
+                                       (*elemp)->hook, hook);
                                continue;
                        }
 #endif
@@ -172,14 +170,14 @@ int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
                 int (*okfn)(struct sk_buff *),
                 int hook_thresh)
 {
-       struct list_head *elem;
+       struct nf_hook_ops *elem;
        unsigned int verdict;
        int ret = 0;
 
        /* We may already have this, but read-locks nest anyway */
        rcu_read_lock();
 
-       elem = &nf_hooks[pf][hook];
+       elem = list_entry_rcu(&nf_hooks[pf][hook], struct nf_hook_ops, list);
 next_hook:
        verdict = nf_iterate(&nf_hooks[pf][hook], skb, hook, indev,
                             outdev, &elem, okfn, hook_thresh);
@@ -273,6 +271,11 @@ EXPORT_SYMBOL_GPL(nfq_ct_nat_hook);
 
 #endif /* CONFIG_NF_CONNTRACK */
 
+#ifdef CONFIG_NF_NAT_NEEDED
+void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
+EXPORT_SYMBOL(nf_nat_decode_session_hook);
+#endif
+
 #ifdef CONFIG_PROC_FS
 struct proc_dir_entry *proc_net_netfilter;
 EXPORT_SYMBOL(proc_net_netfilter);
index 7e1b061aeeba4c14cb45785b2975b353ab18a314..4a92fd47bd4cec1a6726b297e78e604f7d9e61e5 100644 (file)
 #define IP_SET_BITMAP_TIMEOUT
 #include <linux/netfilter/ipset/ip_set_timeout.h>
 
+#define REVISION_MIN   0
+#define REVISION_MAX   0
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-MODULE_DESCRIPTION("bitmap:ip type of IP sets");
+IP_SET_MODULE_DESC("bitmap:ip", REVISION_MIN, REVISION_MAX);
 MODULE_ALIAS("ip_set_bitmap:ip");
 
 /* Type structure */
@@ -284,7 +287,7 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
        } else if (tb[IPSET_ATTR_CIDR]) {
                u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
-               if (cidr > 32)
+               if (!cidr || cidr > 32)
                        return -IPSET_ERR_INVALID_CIDR;
                ip_set_mask_from_to(ip, ip_to, cidr);
        } else
@@ -454,7 +457,8 @@ static int
 bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
 {
        struct bitmap_ip *map;
-       u32 first_ip, last_ip, hosts, elements;
+       u32 first_ip, last_ip, hosts;
+       u64 elements;
        u8 netmask = 32;
        int ret;
 
@@ -497,7 +501,7 @@ bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
 
        if (netmask == 32) {
                hosts = 1;
-               elements = last_ip - first_ip + 1;
+               elements = (u64)last_ip - first_ip + 1;
        } else {
                u8 mask_bits;
                u32 mask;
@@ -515,7 +519,8 @@ bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
        if (elements > IPSET_BITMAP_MAX_RANGE + 1)
                return -IPSET_ERR_BITMAP_RANGE_SIZE;
 
-       pr_debug("hosts %u, elements %u\n", hosts, elements);
+       pr_debug("hosts %u, elements %llu\n",
+                hosts, (unsigned long long)elements);
 
        map = kzalloc(sizeof(*map), GFP_KERNEL);
        if (!map)
@@ -554,8 +559,8 @@ static struct ip_set_type bitmap_ip_type __read_mostly = {
        .features       = IPSET_TYPE_IP,
        .dimension      = IPSET_DIM_ONE,
        .family         = NFPROTO_IPV4,
-       .revision_min   = 0,
-       .revision_max   = 0,
+       .revision_min   = REVISION_MIN,
+       .revision_max   = REVISION_MAX,
        .create         = bitmap_ip_create,
        .create_policy  = {
                [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
index d7eaf10edb6d311f26a0b1e2bceac856c1085ae8..0f92dc24cb894bfa0674c2b3f6dca970d7615c64 100644 (file)
 #include <linux/netfilter/ipset/ip_set_timeout.h>
 #include <linux/netfilter/ipset/ip_set_bitmap.h>
 
+#define REVISION_MIN   0
+#define REVISION_MAX   0
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-MODULE_DESCRIPTION("bitmap:ip,mac type of IP sets");
+IP_SET_MODULE_DESC("bitmap:ip,mac", REVISION_MIN, REVISION_MAX);
 MODULE_ALIAS("ip_set_bitmap:ip,mac");
 
 enum {
@@ -320,11 +323,11 @@ bitmap_ipmac_tlist(const struct ip_set *set,
                    (elem->match == MAC_FILLED &&
                     nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN,
                             elem->ether)))
-                   goto nla_put_failure;
+                       goto nla_put_failure;
                timeout = elem->match == MAC_UNSET ? elem->timeout
                                : ip_set_timeout_get(elem->timeout);
                if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(timeout)))
-                   goto nla_put_failure;
+                       goto nla_put_failure;
                ipset_nest_end(skb, nested);
        }
        ipset_nest_end(skb, atd);
@@ -557,7 +560,8 @@ static int
 bitmap_ipmac_create(struct ip_set *set, struct nlattr *tb[],
                    u32 flags)
 {
-       u32 first_ip, last_ip, elements;
+       u32 first_ip, last_ip;
+       u64 elements;
        struct bitmap_ipmac *map;
        int ret;
 
@@ -588,7 +592,7 @@ bitmap_ipmac_create(struct ip_set *set, struct nlattr *tb[],
        } else
                return -IPSET_ERR_PROTOCOL;
 
-       elements = last_ip - first_ip + 1;
+       elements = (u64)last_ip - first_ip + 1;
 
        if (elements > IPSET_BITMAP_MAX_RANGE + 1)
                return -IPSET_ERR_BITMAP_RANGE_SIZE;
@@ -629,8 +633,8 @@ static struct ip_set_type bitmap_ipmac_type = {
        .features       = IPSET_TYPE_IP | IPSET_TYPE_MAC,
        .dimension      = IPSET_DIM_TWO,
        .family         = NFPROTO_IPV4,
-       .revision_min   = 0,
-       .revision_max   = 0,
+       .revision_min   = REVISION_MIN,
+       .revision_max   = REVISION_MAX,
        .create         = bitmap_ipmac_create,
        .create_policy  = {
                [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
index b9f1fce7053b29d9fb761d634a581d2d8b8ef1d2..e6b2db76f4c3faeb4f4c5570a19a9cea0a852f0e 100644 (file)
 #define IP_SET_BITMAP_TIMEOUT
 #include <linux/netfilter/ipset/ip_set_timeout.h>
 
+#define REVISION_MIN   0
+#define REVISION_MAX   0
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-MODULE_DESCRIPTION("bitmap:port type of IP sets");
+IP_SET_MODULE_DESC("bitmap:port", REVISION_MIN, REVISION_MAX);
 MODULE_ALIAS("ip_set_bitmap:port");
 
 /* Type structure */
@@ -487,8 +490,8 @@ static struct ip_set_type bitmap_port_type = {
        .features       = IPSET_TYPE_PORT,
        .dimension      = IPSET_DIM_ONE,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = 0,
-       .revision_max   = 0,
+       .revision_min   = REVISION_MIN,
+       .revision_max   = REVISION_MAX,
        .create         = bitmap_port_create,
        .create_policy  = {
                [IPSET_ATTR_PORT]       = { .type = NLA_U16 },
index 9730882697aaedbab0beee66f0f12a654b97b63a..778465f217fa975c39af6fb3d7429214dbfe2a51 100644 (file)
@@ -69,7 +69,8 @@ find_set_type(const char *name, u8 family, u8 revision)
 
        list_for_each_entry_rcu(type, &ip_set_type_list, list)
                if (STREQ(type->name, name) &&
-                   (type->family == family || type->family == NFPROTO_UNSPEC) &&
+                   (type->family == family ||
+                    type->family == NFPROTO_UNSPEC) &&
                    revision >= type->revision_min &&
                    revision <= type->revision_max)
                        return type;
@@ -149,7 +150,8 @@ __find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max,
        rcu_read_lock();
        list_for_each_entry_rcu(type, &ip_set_type_list, list)
                if (STREQ(type->name, name) &&
-                   (type->family == family || type->family == NFPROTO_UNSPEC)) {
+                   (type->family == family ||
+                    type->family == NFPROTO_UNSPEC)) {
                        found = true;
                        if (type->revision_min < *min)
                                *min = type->revision_min;
@@ -368,6 +370,12 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
                set->variant->kadt(set, skb, par, IPSET_ADD, opt);
                write_unlock_bh(&set->lock);
                ret = 1;
+       } else {
+               /* --return-nomatch: invert matched element */
+               if ((opt->flags & IPSET_RETURN_NOMATCH) &&
+                   (set->type->features & IPSET_TYPE_NOMATCH) &&
+                   (ret > 0 || ret == -ENOTEMPTY))
+                       ret = -ret;
        }
 
        /* Convert error codes to nomatch */
@@ -563,13 +571,13 @@ flag_exist(const struct nlmsghdr *nlh)
 }
 
 static struct nlmsghdr *
-start_msg(struct sk_buff *skb, u32 pid, u32 seq, unsigned int flags,
+start_msg(struct sk_buff *skb, u32 portid, u32 seq, unsigned int flags,
          enum ipset_cmd cmd)
 {
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
 
-       nlh = nlmsg_put(skb, pid, seq, cmd | (NFNL_SUBSYS_IPSET << 8),
+       nlh = nlmsg_put(skb, portid, seq, cmd | (NFNL_SUBSYS_IPSET << 8),
                        sizeof(*nfmsg), flags);
        if (nlh == NULL)
                return NULL;
@@ -721,7 +729,8 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
         * by the nfnl mutex. Find the first free index in ip_set_list
         * and check clashing.
         */
-       if ((ret = find_free_id(set->name, &index, &clash)) != 0) {
+       ret = find_free_id(set->name, &index, &clash);
+       if (ret != 0) {
                /* If this is the same set and requested, ignore error */
                if (ret == -EEXIST &&
                    (flags & IPSET_FLAG_EXIST) &&
@@ -1045,7 +1054,7 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
        ip_set_id_t index = IPSET_INVALID_ID, max;
        struct ip_set *set = NULL;
        struct nlmsghdr *nlh = NULL;
-       unsigned int flags = NETLINK_CB(cb->skb).pid ? NLM_F_MULTI : 0;
+       unsigned int flags = NETLINK_CB(cb->skb).portid ? NLM_F_MULTI : 0;
        u32 dump_type, dump_flags;
        int ret = 0;
 
@@ -1093,7 +1102,7 @@ dump_last:
                        pr_debug("reference set\n");
                        __ip_set_get(index);
                }
-               nlh = start_msg(skb, NETLINK_CB(cb->skb).pid,
+               nlh = start_msg(skb, NETLINK_CB(cb->skb).portid,
                                cb->nlh->nlmsg_seq, flags,
                                IPSET_CMD_LIST);
                if (!nlh) {
@@ -1226,7 +1235,7 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
                skb2 = nlmsg_new(payload, GFP_KERNEL);
                if (skb2 == NULL)
                        return -ENOMEM;
-               rep = __nlmsg_put(skb2, NETLINK_CB(skb).pid,
+               rep = __nlmsg_put(skb2, NETLINK_CB(skb).portid,
                                  nlh->nlmsg_seq, NLMSG_ERROR, payload, 0);
                errmsg = nlmsg_data(rep);
                errmsg->error = ret;
@@ -1241,7 +1250,7 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
 
                *errline = lineno;
 
-               netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+               netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
                /* Signal netlink not to send its ACK/errmsg.  */
                return -EINTR;
        }
@@ -1416,7 +1425,7 @@ ip_set_header(struct sock *ctnl, struct sk_buff *skb,
        if (skb2 == NULL)
                return -ENOMEM;
 
-       nlh2 = start_msg(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0,
+       nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
                         IPSET_CMD_HEADER);
        if (!nlh2)
                goto nlmsg_failure;
@@ -1428,7 +1437,7 @@ ip_set_header(struct sock *ctnl, struct sk_buff *skb,
                goto nla_put_failure;
        nlmsg_end(skb2, nlh2);
 
-       ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+       ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
        if (ret < 0)
                return ret;
 
@@ -1476,7 +1485,7 @@ ip_set_type(struct sock *ctnl, struct sk_buff *skb,
        if (skb2 == NULL)
                return -ENOMEM;
 
-       nlh2 = start_msg(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0,
+       nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
                         IPSET_CMD_TYPE);
        if (!nlh2)
                goto nlmsg_failure;
@@ -1489,7 +1498,7 @@ ip_set_type(struct sock *ctnl, struct sk_buff *skb,
        nlmsg_end(skb2, nlh2);
 
        pr_debug("Send TYPE, nlmsg_len: %u\n", nlh2->nlmsg_len);
-       ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+       ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
        if (ret < 0)
                return ret;
 
@@ -1525,7 +1534,7 @@ ip_set_protocol(struct sock *ctnl, struct sk_buff *skb,
        if (skb2 == NULL)
                return -ENOMEM;
 
-       nlh2 = start_msg(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0,
+       nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
                         IPSET_CMD_PROTOCOL);
        if (!nlh2)
                goto nlmsg_failure;
@@ -1533,7 +1542,7 @@ ip_set_protocol(struct sock *ctnl, struct sk_buff *skb,
                goto nla_put_failure;
        nlmsg_end(skb2, nlh2);
 
-       ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+       ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
        if (ret < 0)
                return ret;
 
index a68dbd4f1e4e4404d25d6b71e321122fd4fae66a..ec3dba5dcd62f081c1749fb60f2aa71cd31d11ad 100644 (file)
 #include <linux/netfilter/ipset/ip_set_timeout.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
+#define REVISION_MIN   0
+#define REVISION_MAX   0
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-MODULE_DESCRIPTION("hash:ip type of IP sets");
+IP_SET_MODULE_DESC("hash:ip", REVISION_MIN, REVISION_MAX);
 MODULE_ALIAS("ip_set_hash:ip");
 
 /* Type specific function prefix */
@@ -114,7 +117,7 @@ nla_put_failure:
 static inline void
 hash_ip4_data_next(struct ip_set_hash *h, const struct hash_ip4_elem *d)
 {
-       h->next.ip = ntohl(d->ip);
+       h->next.ip = d->ip;
 }
 
 static int
@@ -179,7 +182,7 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
        } else if (tb[IPSET_ATTR_CIDR]) {
                u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
-               if (cidr > 32)
+               if (!cidr || cidr > 32)
                        return -IPSET_ERR_INVALID_CIDR;
                ip_set_mask_from_to(ip, ip_to, cidr);
        } else
@@ -188,7 +191,7 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
        hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
 
        if (retried)
-               ip = h->next.ip;
+               ip = ntohl(h->next.ip);
        for (; !before(ip_to, ip); ip += hosts) {
                nip = htonl(ip);
                if (nip == 0)
@@ -452,8 +455,8 @@ static struct ip_set_type hash_ip_type __read_mostly = {
        .features       = IPSET_TYPE_IP,
        .dimension      = IPSET_DIM_ONE,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = 0,
-       .revision_max   = 0,
+       .revision_min   = REVISION_MIN,
+       .revision_max   = REVISION_MAX,
        .create         = hash_ip_create,
        .create_policy  = {
                [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
index 92722bb82eea65cde13c77ce78be136fca2a8b94..0171f7502fa58d035fcda2361a6c4968acf09b7b 100644 (file)
 #include <linux/netfilter/ipset/ip_set_getport.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
+#define REVISION_MIN   0
+#define REVISION_MAX   1 /* SCTP and UDPLITE support added */
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-MODULE_DESCRIPTION("hash:ip,port type of IP sets");
+IP_SET_MODULE_DESC("hash:ip,port", REVISION_MIN, REVISION_MAX);
 MODULE_ALIAS("ip_set_hash:ip,port");
 
 /* Type specific function prefix */
@@ -130,8 +133,8 @@ static inline void
 hash_ipport4_data_next(struct ip_set_hash *h,
                       const struct hash_ipport4_elem *d)
 {
-       h->next.ip = ntohl(d->ip);
-       h->next.port = ntohs(d->port);
+       h->next.ip = d->ip;
+       h->next.port = d->port;
 }
 
 static int
@@ -217,7 +220,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
        } else if (tb[IPSET_ATTR_CIDR]) {
                u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
-               if (cidr > 32)
+               if (!cidr || cidr > 32)
                        return -IPSET_ERR_INVALID_CIDR;
                ip_set_mask_from_to(ip, ip_to, cidr);
        } else
@@ -231,9 +234,10 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
        }
 
        if (retried)
-               ip = h->next.ip;
+               ip = ntohl(h->next.ip);
        for (; !before(ip_to, ip); ip++) {
-               p = retried && ip == h->next.ip ? h->next.port : port;
+               p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
+                                                      : port;
                for (; p <= port_to; p++) {
                        data.ip = htonl(ip);
                        data.port = htons(p);
@@ -349,7 +353,7 @@ static inline void
 hash_ipport6_data_next(struct ip_set_hash *h,
                       const struct hash_ipport6_elem *d)
 {
-       h->next.port = ntohs(d->port);
+       h->next.port = d->port;
 }
 
 static int
@@ -431,7 +435,7 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
                swap(port, port_to);
 
        if (retried)
-               port = h->next.port;
+               port = ntohs(h->next.port);
        for (; port <= port_to; port++) {
                data.port = htons(port);
                ret = adtfn(set, &data, timeout, flags);
@@ -522,8 +526,8 @@ static struct ip_set_type hash_ipport_type __read_mostly = {
        .features       = IPSET_TYPE_IP | IPSET_TYPE_PORT,
        .dimension      = IPSET_DIM_TWO,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = 0,
-       .revision_max   = 1,    /* SCTP and UDPLITE support added */
+       .revision_min   = REVISION_MIN,
+       .revision_max   = REVISION_MAX,
        .create         = hash_ipport_create,
        .create_policy  = {
                [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
index 0637ce096def10e4faad6ef18003e263c8e8b77c..6344ef551ec811208b79ddc54c89a1270c2419cd 100644 (file)
 #include <linux/netfilter/ipset/ip_set_getport.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
+#define REVISION_MIN   0
+#define REVISION_MAX   1 /* SCTP and UDPLITE support added */
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-MODULE_DESCRIPTION("hash:ip,port,ip type of IP sets");
+IP_SET_MODULE_DESC("hash:ip,port,ip", REVISION_MIN, REVISION_MAX);
 MODULE_ALIAS("ip_set_hash:ip,port,ip");
 
 /* Type specific function prefix */
@@ -133,8 +136,8 @@ static inline void
 hash_ipportip4_data_next(struct ip_set_hash *h,
                         const struct hash_ipportip4_elem *d)
 {
-       h->next.ip = ntohl(d->ip);
-       h->next.port = ntohs(d->port);
+       h->next.ip = d->ip;
+       h->next.port = d->port;
 }
 
 static int
@@ -225,7 +228,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
        } else if (tb[IPSET_ATTR_CIDR]) {
                u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
-               if (cidr > 32)
+               if (!cidr || cidr > 32)
                        return -IPSET_ERR_INVALID_CIDR;
                ip_set_mask_from_to(ip, ip_to, cidr);
        } else
@@ -239,9 +242,10 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
        }
 
        if (retried)
-               ip = h->next.ip;
+               ip = ntohl(h->next.ip);
        for (; !before(ip_to, ip); ip++) {
-               p = retried && ip == h->next.ip ? h->next.port : port;
+               p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
+                                                      : port;
                for (; p <= port_to; p++) {
                        data.ip = htonl(ip);
                        data.port = htons(p);
@@ -362,7 +366,7 @@ static inline void
 hash_ipportip6_data_next(struct ip_set_hash *h,
                         const struct hash_ipportip6_elem *d)
 {
-       h->next.port = ntohs(d->port);
+       h->next.port = d->port;
 }
 
 static int
@@ -449,7 +453,7 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
                swap(port, port_to);
 
        if (retried)
-               port = h->next.port;
+               port = ntohs(h->next.port);
        for (; port <= port_to; port++) {
                data.port = htons(port);
                ret = adtfn(set, &data, timeout, flags);
@@ -540,8 +544,8 @@ static struct ip_set_type hash_ipportip_type __read_mostly = {
        .features       = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
        .dimension      = IPSET_DIM_THREE,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = 0,
-       .revision_max   = 1,    /* SCTP and UDPLITE support added */
+       .revision_min   = REVISION_MIN,
+       .revision_max   = REVISION_MAX,
        .create         = hash_ipportip_create,
        .create_policy  = {
                [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
index 1ce21ca976e17bb077f1e790677cc7af99f7c7aa..cb71f9a774e7d50d67998aaaadc199a563f43f40 100644 (file)
 #include <linux/netfilter/ipset/ip_set_getport.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
+#define REVISION_MIN   0
+/*                     1    SCTP and UDPLITE support added */
+/*                     2    Range as input support for IPv4 added */
+#define REVISION_MAX   3 /* nomatch flag support added */
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-MODULE_DESCRIPTION("hash:ip,port,net type of IP sets");
+IP_SET_MODULE_DESC("hash:ip,port,net", REVISION_MIN, REVISION_MAX);
 MODULE_ALIAS("ip_set_hash:ip,port,net");
 
 /* Type specific function prefix */
@@ -99,10 +104,10 @@ hash_ipportnet4_data_flags(struct hash_ipportnet4_elem *dst, u32 flags)
        dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
 }
 
-static inline bool
+static inline int
 hash_ipportnet4_data_match(const struct hash_ipportnet4_elem *elem)
 {
-       return !elem->nomatch;
+       return elem->nomatch ? -ENOTEMPTY : 1;
 }
 
 static inline void
@@ -173,9 +178,9 @@ static inline void
 hash_ipportnet4_data_next(struct ip_set_hash *h,
                          const struct hash_ipportnet4_elem *d)
 {
-       h->next.ip = ntohl(d->ip);
-       h->next.port = ntohs(d->port);
-       h->next.ip2 = ntohl(d->ip2);
+       h->next.ip = d->ip;
+       h->next.port = d->port;
+       h->next.ip2 = d->ip2;
 }
 
 static int
@@ -290,7 +295,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        } else if (tb[IPSET_ATTR_CIDR]) {
                u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
-               if (cidr > 32)
+               if (!cidr || cidr > 32)
                        return -IPSET_ERR_INVALID_CIDR;
                ip_set_mask_from_to(ip, ip_to, cidr);
        }
@@ -314,14 +319,17 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        }
 
        if (retried)
-               ip = h->next.ip;
+               ip = ntohl(h->next.ip);
        for (; !before(ip_to, ip); ip++) {
                data.ip = htonl(ip);
-               p = retried && ip == h->next.ip ? h->next.port : port;
+               p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
+                                                      : port;
                for (; p <= port_to; p++) {
                        data.port = htons(p);
-                       ip2 = retried && ip == h->next.ip && p == h->next.port
-                               ? h->next.ip2 : ip2_from;
+                       ip2 = retried
+                             && ip == ntohl(h->next.ip)
+                             && p == ntohs(h->next.port)
+                               ? ntohl(h->next.ip2) : ip2_from;
                        while (!after(ip2, ip2_to)) {
                                data.ip2 = htonl(ip2);
                                ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
@@ -403,10 +411,10 @@ hash_ipportnet6_data_flags(struct hash_ipportnet6_elem *dst, u32 flags)
        dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
 }
 
-static inline bool
+static inline int
 hash_ipportnet6_data_match(const struct hash_ipportnet6_elem *elem)
 {
-       return !elem->nomatch;
+       return elem->nomatch ? -ENOTEMPTY : 1;
 }
 
 static inline void
@@ -486,7 +494,7 @@ static inline void
 hash_ipportnet6_data_next(struct ip_set_hash *h,
                          const struct hash_ipportnet6_elem *d)
 {
-       h->next.port = ntohs(d->port);
+       h->next.port = d->port;
 }
 
 static int
@@ -598,7 +606,7 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
                swap(port, port_to);
 
        if (retried)
-               port = h->next.port;
+               port = ntohs(h->next.port);
        for (; port <= port_to; port++) {
                data.port = htons(port);
                ret = adtfn(set, &data, timeout, flags);
@@ -689,13 +697,12 @@ hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
 static struct ip_set_type hash_ipportnet_type __read_mostly = {
        .name           = "hash:ip,port,net",
        .protocol       = IPSET_PROTOCOL,
-       .features       = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
+       .features       = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2 |
+                         IPSET_TYPE_NOMATCH,
        .dimension      = IPSET_DIM_THREE,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = 0,
-       /*                1        SCTP and UDPLITE support added */
-       /*                2        Range as input support for IPv4 added */
-       .revision_max   = 3,    /* nomatch flag support added */
+       .revision_min   = REVISION_MIN,
+       .revision_max   = REVISION_MAX,
        .create         = hash_ipportnet_create,
        .create_policy  = {
                [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
index c57a6a09906d7df3338f900d2b3b7321f487b103..29e94b981f3f3fe4853a9683a1cbecdfc9d7ec42 100644 (file)
 #include <linux/netfilter/ipset/ip_set_timeout.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
+#define REVISION_MIN   0
+/*                     1    Range as input support for IPv4 added */
+#define REVISION_MAX   2 /* nomatch flag support added */
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-MODULE_DESCRIPTION("hash:net type of IP sets");
+IP_SET_MODULE_DESC("hash:net", REVISION_MIN, REVISION_MAX);
 MODULE_ALIAS("ip_set_hash:net");
 
 /* Type specific function prefix */
@@ -86,10 +90,10 @@ hash_net4_data_flags(struct hash_net4_elem *dst, u32 flags)
        dst->nomatch = flags & IPSET_FLAG_NOMATCH;
 }
 
-static inline bool
+static inline int
 hash_net4_data_match(const struct hash_net4_elem *elem)
 {
-       return !elem->nomatch;
+       return elem->nomatch ? -ENOTEMPTY : 1;
 }
 
 static inline void
@@ -152,7 +156,7 @@ static inline void
 hash_net4_data_next(struct ip_set_hash *h,
                    const struct hash_net4_elem *d)
 {
-       h->next.ip = ntohl(d->ip);
+       h->next.ip = d->ip;
 }
 
 static int
@@ -235,7 +239,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
                        return -IPSET_ERR_HASH_RANGE;
        }
        if (retried)
-               ip = h->next.ip;
+               ip = ntohl(h->next.ip);
        while (!after(ip, ip_to)) {
                data.ip = htonl(ip);
                last = ip_set_range_to_cidr(ip, ip_to, &data.cidr);
@@ -307,10 +311,10 @@ hash_net6_data_flags(struct hash_net6_elem *dst, u32 flags)
        dst->nomatch = flags & IPSET_FLAG_NOMATCH;
 }
 
-static inline bool
+static inline int
 hash_net6_data_match(const struct hash_net6_elem *elem)
 {
-       return !elem->nomatch;
+       return elem->nomatch ? -ENOTEMPTY : 1;
 }
 
 static inline void
@@ -532,12 +536,11 @@ hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
 static struct ip_set_type hash_net_type __read_mostly = {
        .name           = "hash:net",
        .protocol       = IPSET_PROTOCOL,
-       .features       = IPSET_TYPE_IP,
+       .features       = IPSET_TYPE_IP | IPSET_TYPE_NOMATCH,
        .dimension      = IPSET_DIM_ONE,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = 0,
-       /*              = 1        Range as input support for IPv4 added */
-       .revision_max   = 2,    /* nomatch flag support added */
+       .revision_min   = REVISION_MIN,
+       .revision_max   = REVISION_MAX,
        .create         = hash_net_create,
        .create_policy  = {
                [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
index d5d3607ae7bcf5e9704bd189217115cf048aee4b..b9a63381e34998e08ab5271f7d82cca9058a76d8 100644 (file)
 #include <linux/netfilter/ipset/ip_set_timeout.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
+#define REVISION_MIN   0
+/*                     1    nomatch flag support added */
+#define REVISION_MAX   2 /* /0 support added */
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-MODULE_DESCRIPTION("hash:net,iface type of IP sets");
+IP_SET_MODULE_DESC("hash:net,iface", REVISION_MIN, REVISION_MAX);
 MODULE_ALIAS("ip_set_hash:net,iface");
 
 /* Interface name rbtree */
@@ -140,7 +144,7 @@ struct hash_netiface4_elem_hashed {
        u8 physdev;
        u8 cidr;
        u8 nomatch;
-       u8 padding;
+       u8 elem;
 };
 
 #define HKEY_DATALEN   sizeof(struct hash_netiface4_elem_hashed)
@@ -151,7 +155,7 @@ struct hash_netiface4_elem {
        u8 physdev;
        u8 cidr;
        u8 nomatch;
-       u8 padding;
+       u8 elem;
        const char *iface;
 };
 
@@ -161,7 +165,7 @@ struct hash_netiface4_telem {
        u8 physdev;
        u8 cidr;
        u8 nomatch;
-       u8 padding;
+       u8 elem;
        const char *iface;
        unsigned long timeout;
 };
@@ -181,18 +185,14 @@ hash_netiface4_data_equal(const struct hash_netiface4_elem *ip1,
 static inline bool
 hash_netiface4_data_isnull(const struct hash_netiface4_elem *elem)
 {
-       return elem->cidr == 0;
+       return elem->elem == 0;
 }
 
 static inline void
 hash_netiface4_data_copy(struct hash_netiface4_elem *dst,
                         const struct hash_netiface4_elem *src)
 {
-       dst->ip = src->ip;
-       dst->cidr = src->cidr;
-       dst->physdev = src->physdev;
-       dst->iface = src->iface;
-       dst->nomatch = src->nomatch;
+       memcpy(dst, src, sizeof(*dst));
 }
 
 static inline void
@@ -201,10 +201,10 @@ hash_netiface4_data_flags(struct hash_netiface4_elem *dst, u32 flags)
        dst->nomatch = flags & IPSET_FLAG_NOMATCH;
 }
 
-static inline bool
+static inline int
 hash_netiface4_data_match(const struct hash_netiface4_elem *elem)
 {
-       return !elem->nomatch;
+       return elem->nomatch ? -ENOTEMPTY : 1;
 }
 
 static inline void
@@ -217,7 +217,7 @@ hash_netiface4_data_netmask(struct hash_netiface4_elem *elem, u8 cidr)
 static inline void
 hash_netiface4_data_zero_out(struct hash_netiface4_elem *elem)
 {
-       elem->cidr = 0;
+       elem->elem = 0;
 }
 
 static bool
@@ -277,7 +277,7 @@ static inline void
 hash_netiface4_data_next(struct ip_set_hash *h,
                         const struct hash_netiface4_elem *d)
 {
-       h->next.ip = ntohl(d->ip);
+       h->next.ip = d->ip;
 }
 
 static int
@@ -288,7 +288,8 @@ hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb,
        struct ip_set_hash *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netiface4_elem data = {
-               .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+               .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK,
+               .elem = 1,
        };
        int ret;
 
@@ -339,7 +340,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
 {
        struct ip_set_hash *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
-       struct hash_netiface4_elem data = { .cidr = HOST_MASK };
+       struct hash_netiface4_elem data = { .cidr = HOST_MASK, .elem = 1 };
        u32 ip = 0, ip_to, last;
        u32 timeout = h->timeout;
        char iface[IFNAMSIZ];
@@ -360,7 +361,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
 
        if (tb[IPSET_ATTR_CIDR]) {
                data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
-               if (!data.cidr || data.cidr > HOST_MASK)
+               if (data.cidr > HOST_MASK)
                        return -IPSET_ERR_INVALID_CIDR;
        }
 
@@ -389,7 +390,6 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
                if (adt == IPSET_ADD && (cadt_flags & IPSET_FLAG_NOMATCH))
                        flags |= (cadt_flags << 16);
        }
-
        if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
                data.ip = htonl(ip & ip_set_hostmask(data.cidr));
                ret = adtfn(set, &data, timeout, flags);
@@ -409,7 +409,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
        }
 
        if (retried)
-               ip = h->next.ip;
+               ip = ntohl(h->next.ip);
        while (!after(ip, ip_to)) {
                data.ip = htonl(ip);
                last = ip_set_range_to_cidr(ip, ip_to, &data.cidr);
@@ -442,7 +442,7 @@ struct hash_netiface6_elem_hashed {
        u8 physdev;
        u8 cidr;
        u8 nomatch;
-       u8 padding;
+       u8 elem;
 };
 
 #define HKEY_DATALEN   sizeof(struct hash_netiface6_elem_hashed)
@@ -452,7 +452,7 @@ struct hash_netiface6_elem {
        u8 physdev;
        u8 cidr;
        u8 nomatch;
-       u8 padding;
+       u8 elem;
        const char *iface;
 };
 
@@ -461,7 +461,7 @@ struct hash_netiface6_telem {
        u8 physdev;
        u8 cidr;
        u8 nomatch;
-       u8 padding;
+       u8 elem;
        const char *iface;
        unsigned long timeout;
 };
@@ -481,7 +481,7 @@ hash_netiface6_data_equal(const struct hash_netiface6_elem *ip1,
 static inline bool
 hash_netiface6_data_isnull(const struct hash_netiface6_elem *elem)
 {
-       return elem->cidr == 0;
+       return elem->elem == 0;
 }
 
 static inline void
@@ -497,16 +497,16 @@ hash_netiface6_data_flags(struct hash_netiface6_elem *dst, u32 flags)
        dst->nomatch = flags & IPSET_FLAG_NOMATCH;
 }
 
-static inline bool
+static inline int
 hash_netiface6_data_match(const struct hash_netiface6_elem *elem)
 {
-       return !elem->nomatch;
+       return elem->nomatch ? -ENOTEMPTY : 1;
 }
 
 static inline void
 hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem)
 {
-       elem->cidr = 0;
+       elem->elem = 0;
 }
 
 static inline void
@@ -590,7 +590,8 @@ hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb,
        struct ip_set_hash *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netiface6_elem data = {
-               .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+               .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK,
+               .elem = 1,
        };
        int ret;
 
@@ -637,7 +638,7 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
 {
        struct ip_set_hash *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
-       struct hash_netiface6_elem data = { .cidr = HOST_MASK };
+       struct hash_netiface6_elem data = { .cidr = HOST_MASK, .elem = 1 };
        u32 timeout = h->timeout;
        char iface[IFNAMSIZ];
        int ret;
@@ -659,7 +660,7 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
 
        if (tb[IPSET_ATTR_CIDR])
                data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
-       if (!data.cidr || data.cidr > HOST_MASK)
+       if (data.cidr > HOST_MASK)
                return -IPSET_ERR_INVALID_CIDR;
        ip6_netmask(&data.ip, data.cidr);
 
@@ -773,11 +774,12 @@ hash_netiface_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
 static struct ip_set_type hash_netiface_type __read_mostly = {
        .name           = "hash:net,iface",
        .protocol       = IPSET_PROTOCOL,
-       .features       = IPSET_TYPE_IP | IPSET_TYPE_IFACE,
+       .features       = IPSET_TYPE_IP | IPSET_TYPE_IFACE |
+                         IPSET_TYPE_NOMATCH,
        .dimension      = IPSET_DIM_TWO,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = 0,
-       .revision_max   = 1,    /* nomatch flag support added */
+       .revision_min   = REVISION_MIN,
+       .revision_max   = REVISION_MAX,
        .create         = hash_netiface_create,
        .create_policy  = {
                [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
index fc3143a2d41bbdd07747ede2339ecf8407911cc0..7ef700de596c54d09f8873a4498da207bfd5a828 100644 (file)
 #include <linux/netfilter/ipset/ip_set_getport.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
+#define REVISION_MIN   0
+/*                     1    SCTP and UDPLITE support added */
+/*                     2    Range as input support for IPv4 added */
+#define REVISION_MAX   3 /* nomatch flag support added */
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-MODULE_DESCRIPTION("hash:net,port type of IP sets");
+IP_SET_MODULE_DESC("hash:net,port", REVISION_MIN, REVISION_MAX);
 MODULE_ALIAS("ip_set_hash:net,port");
 
 /* Type specific function prefix */
@@ -99,10 +104,10 @@ hash_netport4_data_flags(struct hash_netport4_elem *dst, u32 flags)
        dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
 }
 
-static inline bool
+static inline int
 hash_netport4_data_match(const struct hash_netport4_elem *elem)
 {
-       return !elem->nomatch;
+       return elem->nomatch ? -ENOTEMPTY : 1;
 }
 
 static inline void
@@ -171,8 +176,8 @@ static inline void
 hash_netport4_data_next(struct ip_set_hash *h,
                        const struct hash_netport4_elem *d)
 {
-       h->next.ip = ntohl(d->ip);
-       h->next.port = ntohs(d->port);
+       h->next.ip = d->ip;
+       h->next.port = d->port;
 }
 
 static int
@@ -289,12 +294,13 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
        }
 
        if (retried)
-               ip = h->next.ip;
+               ip = ntohl(h->next.ip);
        while (!after(ip, ip_to)) {
                data.ip = htonl(ip);
                last = ip_set_range_to_cidr(ip, ip_to, &cidr);
                data.cidr = cidr - 1;
-               p = retried && ip == h->next.ip ? h->next.port : port;
+               p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
+                                                      : port;
                for (; p <= port_to; p++) {
                        data.port = htons(p);
                        ret = adtfn(set, &data, timeout, flags);
@@ -369,10 +375,10 @@ hash_netport6_data_flags(struct hash_netport6_elem *dst, u32 flags)
        dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
 }
 
-static inline bool
+static inline int
 hash_netport6_data_match(const struct hash_netport6_elem *elem)
 {
-       return !elem->nomatch;
+       return elem->nomatch ? -ENOTEMPTY : 1;
 }
 
 static inline void
@@ -450,7 +456,7 @@ static inline void
 hash_netport6_data_next(struct ip_set_hash *h,
                        const struct hash_netport6_elem *d)
 {
-       h->next.port = ntohs(d->port);
+       h->next.port = d->port;
 }
 
 static int
@@ -554,7 +560,7 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
                swap(port, port_to);
 
        if (retried)
-               port = h->next.port;
+               port = ntohs(h->next.port);
        for (; port <= port_to; port++) {
                data.port = htons(port);
                ret = adtfn(set, &data, timeout, flags);
@@ -644,13 +650,11 @@ hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
 static struct ip_set_type hash_netport_type __read_mostly = {
        .name           = "hash:net,port",
        .protocol       = IPSET_PROTOCOL,
-       .features       = IPSET_TYPE_IP | IPSET_TYPE_PORT,
+       .features       = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_NOMATCH,
        .dimension      = IPSET_DIM_TWO,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = 0,
-       /*                1        SCTP and UDPLITE support added */
-       /*                2,       Range as input support for IPv4 added */
-       .revision_max   = 3,    /* nomatch flag support added */
+       .revision_min   = REVISION_MIN,
+       .revision_max   = REVISION_MAX,
        .create         = hash_netport_create,
        .create_policy  = {
                [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
index 6cb1225765f952667b0f4c763e635461fcb94ad0..8371c2bac2e4240eb5c4b3f6abd0faa48f6212ce 100644 (file)
 #include <linux/netfilter/ipset/ip_set_timeout.h>
 #include <linux/netfilter/ipset/ip_set_list.h>
 
+#define REVISION_MIN   0
+#define REVISION_MAX   0
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-MODULE_DESCRIPTION("list:set type of IP sets");
+IP_SET_MODULE_DESC("list:set", REVISION_MIN, REVISION_MAX);
 MODULE_ALIAS("ip_set_list:set");
 
 /* Member elements without and with timeout */
@@ -579,8 +582,8 @@ static struct ip_set_type list_set_type __read_mostly = {
        .features       = IPSET_TYPE_NAME | IPSET_DUMP_LAST,
        .dimension      = IPSET_DIM_ONE,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = 0,
-       .revision_max   = 0,
+       .revision_min   = REVISION_MIN,
+       .revision_max   = REVISION_MAX,
        .create         = list_set_create,
        .create_policy  = {
                [IPSET_ATTR_SIZE]       = { .type = NLA_U32 },
index f9871385a65eddca7f9b942f0c83794c5c147339..8b2cffdfdd9985e7397a0b800d5e2c28af20a4b7 100644 (file)
@@ -250,7 +250,8 @@ comment 'IPVS application helper'
 
 config IP_VS_FTP
        tristate "FTP protocol helper"
-        depends on IP_VS_PROTO_TCP && NF_CONNTRACK && NF_NAT
+       depends on IP_VS_PROTO_TCP && NF_CONNTRACK && NF_NAT && \
+               NF_CONNTRACK_FTP
        select IP_VS_NFCT
        ---help---
          FTP is a protocol that transfers IP address and/or port number in
index 64f9e8f13207e94463240b330abc668fd607df70..9713e6e86d472f2e2aefc5c4ebfd8f95a5cda62c 100644 (file)
@@ -180,22 +180,38 @@ register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app, __u16 proto,
 }
 
 
-/*
- *     ip_vs_app registration routine
- */
-int register_ip_vs_app(struct net *net, struct ip_vs_app *app)
+/* Register application for netns */
+struct ip_vs_app *register_ip_vs_app(struct net *net, struct ip_vs_app *app)
 {
        struct netns_ipvs *ipvs = net_ipvs(net);
-       /* increase the module use count */
-       ip_vs_use_count_inc();
+       struct ip_vs_app *a;
+       int err = 0;
+
+       if (!ipvs)
+               return ERR_PTR(-ENOENT);
 
        mutex_lock(&__ip_vs_app_mutex);
 
-       list_add(&app->a_list, &ipvs->app_list);
+       list_for_each_entry(a, &ipvs->app_list, a_list) {
+               if (!strcmp(app->name, a->name)) {
+                       err = -EEXIST;
+                       goto out_unlock;
+               }
+       }
+       a = kmemdup(app, sizeof(*app), GFP_KERNEL);
+       if (!a) {
+               err = -ENOMEM;
+               goto out_unlock;
+       }
+       INIT_LIST_HEAD(&a->incs_list);
+       list_add(&a->a_list, &ipvs->app_list);
+       /* increase the module use count */
+       ip_vs_use_count_inc();
 
+out_unlock:
        mutex_unlock(&__ip_vs_app_mutex);
 
-       return 0;
+       return err ? ERR_PTR(err) : a;
 }
 
 
@@ -205,20 +221,29 @@ int register_ip_vs_app(struct net *net, struct ip_vs_app *app)
  */
 void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app)
 {
-       struct ip_vs_app *inc, *nxt;
+       struct netns_ipvs *ipvs = net_ipvs(net);
+       struct ip_vs_app *a, *anxt, *inc, *nxt;
+
+       if (!ipvs)
+               return;
 
        mutex_lock(&__ip_vs_app_mutex);
 
-       list_for_each_entry_safe(inc, nxt, &app->incs_list, a_list) {
-               ip_vs_app_inc_release(net, inc);
-       }
+       list_for_each_entry_safe(a, anxt, &ipvs->app_list, a_list) {
+               if (app && strcmp(app->name, a->name))
+                       continue;
+               list_for_each_entry_safe(inc, nxt, &a->incs_list, a_list) {
+                       ip_vs_app_inc_release(net, inc);
+               }
 
-       list_del(&app->a_list);
+               list_del(&a->a_list);
+               kfree(a);
 
-       mutex_unlock(&__ip_vs_app_mutex);
+               /* decrease the module use count */
+               ip_vs_use_count_dec();
+       }
 
-       /* decrease the module use count */
-       ip_vs_use_count_dec();
+       mutex_unlock(&__ip_vs_app_mutex);
 }
 
 
@@ -586,5 +611,6 @@ int __net_init ip_vs_app_net_init(struct net *net)
 
 void __net_exit ip_vs_app_net_cleanup(struct net *net)
 {
+       unregister_ip_vs_app(net, NULL /* all */);
        proc_net_remove(net, "ip_vs_app");
 }
index b54eccef40b5cf7ecb74a1c7f1950f48d7823413..58918e20f9d5b038c2181b893b0e0458dbd3a8f2 100644 (file)
@@ -1303,7 +1303,8 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
        struct ip_vs_conn *cp;
        struct ip_vs_protocol *pp;
        struct ip_vs_proto_data *pd;
-       unsigned int offset, ihl, verdict;
+       unsigned int offset, offset2, ihl, verdict;
+       bool ipip;
 
        *related = 1;
 
@@ -1345,6 +1346,21 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
 
        net = skb_net(skb);
 
+       /* Special case for errors for IPIP packets */
+       ipip = false;
+       if (cih->protocol == IPPROTO_IPIP) {
+               if (unlikely(cih->frag_off & htons(IP_OFFSET)))
+                       return NF_ACCEPT;
+               /* Error for our IPIP must arrive at LOCAL_IN */
+               if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL))
+                       return NF_ACCEPT;
+               offset += cih->ihl * 4;
+               cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
+               if (cih == NULL)
+                       return NF_ACCEPT; /* The packet looks wrong, ignore */
+               ipip = true;
+       }
+
        pd = ip_vs_proto_data_get(net, cih->protocol);
        if (!pd)
                return NF_ACCEPT;
@@ -1358,11 +1374,14 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
        IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
                      "Checking incoming ICMP for");
 
+       offset2 = offset;
        offset += cih->ihl * 4;
 
        ip_vs_fill_iphdr(AF_INET, cih, &ciph);
-       /* The embedded headers contain source and dest in reverse order */
-       cp = pp->conn_in_get(AF_INET, skb, &ciph, offset, 1);
+       /* The embedded headers contain source and dest in reverse order.
+        * For IPIP this is error for request, not for reply.
+        */
+       cp = pp->conn_in_get(AF_INET, skb, &ciph, offset, ipip ? 0 : 1);
        if (!cp)
                return NF_ACCEPT;
 
@@ -1376,6 +1395,57 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
                goto out;
        }
 
+       if (ipip) {
+               __be32 info = ic->un.gateway;
+
+               /* Update the MTU */
+               if (ic->type == ICMP_DEST_UNREACH &&
+                   ic->code == ICMP_FRAG_NEEDED) {
+                       struct ip_vs_dest *dest = cp->dest;
+                       u32 mtu = ntohs(ic->un.frag.mtu);
+
+                       /* Strip outer IP and ICMP, go to IPIP header */
+                       __skb_pull(skb, ihl + sizeof(_icmph));
+                       offset2 -= ihl + sizeof(_icmph);
+                       skb_reset_network_header(skb);
+                       IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n",
+                               &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu);
+                       rcu_read_lock();
+                       ipv4_update_pmtu(skb, dev_net(skb->dev),
+                                        mtu, 0, 0, 0, 0);
+                       rcu_read_unlock();
+                       /* Client uses PMTUD? */
+                       if (!(cih->frag_off & htons(IP_DF)))
+                               goto ignore_ipip;
+                       /* Prefer the resulting PMTU */
+                       if (dest) {
+                               spin_lock(&dest->dst_lock);
+                               if (dest->dst_cache)
+                                       mtu = dst_mtu(dest->dst_cache);
+                               spin_unlock(&dest->dst_lock);
+                       }
+                       if (mtu > 68 + sizeof(struct iphdr))
+                               mtu -= sizeof(struct iphdr);
+                       info = htonl(mtu);
+               }
+               /* Strip outer IP, ICMP and IPIP, go to IP header of
+                * original request.
+                */
+               __skb_pull(skb, offset2);
+               skb_reset_network_header(skb);
+               IP_VS_DBG(12, "Sending ICMP for %pI4->%pI4: t=%u, c=%u, i=%u\n",
+                       &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
+                       ic->type, ic->code, ntohl(info));
+               icmp_send(skb, ic->type, ic->code, info);
+               /* ICMP can be shorter but anyways, account it */
+               ip_vs_out_stats(cp, skb);
+
+ignore_ipip:
+               consume_skb(skb);
+               verdict = NF_STOLEN;
+               goto out;
+       }
+
        /* do the statistics and put it back */
        ip_vs_in_stats(cp, skb);
        if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol)
index f51013c07b9f4e5a81885f0462d5423ccbc8a44f..7e7198b51c068a7ea10446af4d390a9e6e2044f4 100644 (file)
@@ -539,8 +539,7 @@ static int ip_vs_rs_unhash(struct ip_vs_dest *dest)
         * Remove it from the rs_table table.
         */
        if (!list_empty(&dest->d_list)) {
-               list_del(&dest->d_list);
-               INIT_LIST_HEAD(&dest->d_list);
+               list_del_init(&dest->d_list);
        }
 
        return 1;
@@ -1803,6 +1802,12 @@ static struct ctl_table vs_vars[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
+       {
+               .procname       = "pmtu_disc",
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
 #ifdef CONFIG_IP_VS_DEBUG
        {
                .procname       = "debug_level",
@@ -2933,7 +2938,7 @@ static int ip_vs_genl_dump_service(struct sk_buff *skb,
 {
        void *hdr;
 
-       hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
+       hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
                          &ip_vs_genl_family, NLM_F_MULTI,
                          IPVS_CMD_NEW_SERVICE);
        if (!hdr)
@@ -3122,7 +3127,7 @@ static int ip_vs_genl_dump_dest(struct sk_buff *skb, struct ip_vs_dest *dest,
 {
        void *hdr;
 
-       hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
+       hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
                          &ip_vs_genl_family, NLM_F_MULTI,
                          IPVS_CMD_NEW_DEST);
        if (!hdr)
@@ -3251,7 +3256,7 @@ static int ip_vs_genl_dump_daemon(struct sk_buff *skb, __be32 state,
                                  struct netlink_callback *cb)
 {
        void *hdr;
-       hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
+       hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
                          &ip_vs_genl_family, NLM_F_MULTI,
                          IPVS_CMD_NEW_DAEMON);
        if (!hdr)
@@ -3678,7 +3683,7 @@ static void ip_vs_genl_unregister(void)
  * per netns intit/exit func.
  */
 #ifdef CONFIG_SYSCTL
-int __net_init ip_vs_control_net_init_sysctl(struct net *net)
+static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
 {
        int idx;
        struct netns_ipvs *ipvs = net_ipvs(net);
@@ -3729,6 +3734,8 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net)
        ipvs->sysctl_sync_retries = clamp_t(int, DEFAULT_SYNC_RETRIES, 0, 3);
        tbl[idx++].data = &ipvs->sysctl_sync_retries;
        tbl[idx++].data = &ipvs->sysctl_nat_icmp_send;
+       ipvs->sysctl_pmtu_disc = 1;
+       tbl[idx++].data = &ipvs->sysctl_pmtu_disc;
 
 
        ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
@@ -3746,7 +3753,7 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net)
        return 0;
 }
 
-void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
+static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
 {
        struct netns_ipvs *ipvs = net_ipvs(net);
 
@@ -3757,8 +3764,8 @@ void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
 
 #else
 
-int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; }
-void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net) { }
+static int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; }
+static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net) { }
 
 #endif
 
index b20b29c903efdc0a0593f90547661446c58b47e6..4f53a5f04437b4d75c9a12e672ff9beabf69c5c2 100644 (file)
@@ -268,6 +268,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
                         * packet.
                         */
                        ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
+                                                      iph->ihl * 4,
                                                       start-data, end-start,
                                                       buf, buf_len);
                        if (ret) {
@@ -441,16 +442,10 @@ static int __net_init __ip_vs_ftp_init(struct net *net)
 
        if (!ipvs)
                return -ENOENT;
-       app = kmemdup(&ip_vs_ftp, sizeof(struct ip_vs_app), GFP_KERNEL);
-       if (!app)
-               return -ENOMEM;
-       INIT_LIST_HEAD(&app->a_list);
-       INIT_LIST_HEAD(&app->incs_list);
-       ipvs->ftp_app = app;
 
-       ret = register_ip_vs_app(net, app);
-       if (ret)
-               goto err_exit;
+       app = register_ip_vs_app(net, &ip_vs_ftp);
+       if (IS_ERR(app))
+               return PTR_ERR(app);
 
        for (i = 0; i < ports_count; i++) {
                if (!ports[i])
@@ -464,9 +459,7 @@ static int __net_init __ip_vs_ftp_init(struct net *net)
        return 0;
 
 err_unreg:
-       unregister_ip_vs_app(net, app);
-err_exit:
-       kfree(ipvs->ftp_app);
+       unregister_ip_vs_app(net, &ip_vs_ftp);
        return ret;
 }
 /*
@@ -474,10 +467,7 @@ err_exit:
  */
 static void __ip_vs_ftp_exit(struct net *net)
 {
-       struct netns_ipvs *ipvs = net_ipvs(net);
-
-       unregister_ip_vs_app(net, ipvs->ftp_app);
-       kfree(ipvs->ftp_app);
+       unregister_ip_vs_app(net, &ip_vs_ftp);
 }
 
 static struct pernet_operations ip_vs_ftp_ops = {
index 65b616ae1716366f6437b9e760b22aabe7f9b4d1..56f6d5d81a7735d33ea7cc777945024850dfe177 100644 (file)
@@ -49,6 +49,7 @@ enum {
        IP_VS_RT_MODE_RDR       = 4, /* Allow redirect from remote daddr to
                                      * local
                                      */
+       IP_VS_RT_MODE_CONNECT   = 8, /* Always bind route to saddr */
 };
 
 /*
@@ -84,6 +85,58 @@ __ip_vs_dst_check(struct ip_vs_dest *dest, u32 rtos)
        return dst;
 }
 
+static inline bool
+__mtu_check_toobig_v6(const struct sk_buff *skb, u32 mtu)
+{
+       if (IP6CB(skb)->frag_max_size) {
+               /* frag_max_size tell us that, this packet have been
+                * defragmented by netfilter IPv6 conntrack module.
+                */
+               if (IP6CB(skb)->frag_max_size > mtu)
+                       return true; /* largest fragment violate MTU */
+       }
+       else if (skb->len > mtu && !skb_is_gso(skb)) {
+               return true; /* Packet size violate MTU size */
+       }
+       return false;
+}
+
+/* Get route to daddr, update *saddr, optionally bind route to saddr */
+static struct rtable *do_output_route4(struct net *net, __be32 daddr,
+                                      u32 rtos, int rt_mode, __be32 *saddr)
+{
+       struct flowi4 fl4;
+       struct rtable *rt;
+       int loop = 0;
+
+       memset(&fl4, 0, sizeof(fl4));
+       fl4.daddr = daddr;
+       fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0;
+       fl4.flowi4_tos = rtos;
+
+retry:
+       rt = ip_route_output_key(net, &fl4);
+       if (IS_ERR(rt)) {
+               /* Invalid saddr ? */
+               if (PTR_ERR(rt) == -EINVAL && *saddr &&
+                   rt_mode & IP_VS_RT_MODE_CONNECT && !loop) {
+                       *saddr = 0;
+                       flowi4_update_output(&fl4, 0, rtos, daddr, 0);
+                       goto retry;
+               }
+               IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", &daddr);
+               return NULL;
+       } else if (!*saddr && rt_mode & IP_VS_RT_MODE_CONNECT && fl4.saddr) {
+               ip_rt_put(rt);
+               *saddr = fl4.saddr;
+               flowi4_update_output(&fl4, 0, rtos, daddr, fl4.saddr);
+               loop++;
+               goto retry;
+       }
+       *saddr = fl4.saddr;
+       return rt;
+}
+
 /* Get route to destination or remote server */
 static struct rtable *
 __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
@@ -98,20 +151,13 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
                spin_lock(&dest->dst_lock);
                if (!(rt = (struct rtable *)
                      __ip_vs_dst_check(dest, rtos))) {
-                       struct flowi4 fl4;
-
-                       memset(&fl4, 0, sizeof(fl4));
-                       fl4.daddr = dest->addr.ip;
-                       fl4.flowi4_tos = rtos;
-                       rt = ip_route_output_key(net, &fl4);
-                       if (IS_ERR(rt)) {
+                       rt = do_output_route4(net, dest->addr.ip, rtos,
+                                             rt_mode, &dest->dst_saddr.ip);
+                       if (!rt) {
                                spin_unlock(&dest->dst_lock);
-                               IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
-                                            &dest->addr.ip);
                                return NULL;
                        }
                        __ip_vs_dst_set(dest, rtos, dst_clone(&rt->dst), 0);
-                       dest->dst_saddr.ip = fl4.saddr;
                        IP_VS_DBG(10, "new dst %pI4, src %pI4, refcnt=%d, "
                                  "rtos=%X\n",
                                  &dest->addr.ip, &dest->dst_saddr.ip,
@@ -122,19 +168,17 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
                        *ret_saddr = dest->dst_saddr.ip;
                spin_unlock(&dest->dst_lock);
        } else {
-               struct flowi4 fl4;
+               __be32 saddr = htonl(INADDR_ANY);
 
-               memset(&fl4, 0, sizeof(fl4));
-               fl4.daddr = daddr;
-               fl4.flowi4_tos = rtos;
-               rt = ip_route_output_key(net, &fl4);
-               if (IS_ERR(rt)) {
-                       IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
-                                    &daddr);
+               /* For such unconfigured boxes avoid many route lookups
+                * for performance reasons because we do not remember saddr
+                */
+               rt_mode &= ~IP_VS_RT_MODE_CONNECT;
+               rt = do_output_route4(net, daddr, rtos, rt_mode, &saddr);
+               if (!rt)
                        return NULL;
-               }
                if (ret_saddr)
-                       *ret_saddr = fl4.saddr;
+                       *ret_saddr = saddr;
        }
 
        local = rt->rt_flags & RTCF_LOCAL;
@@ -331,6 +375,7 @@ ip_vs_dst_reset(struct ip_vs_dest *dest)
        old_dst = dest->dst_cache;
        dest->dst_cache = NULL;
        dst_release(old_dst);
+       dest->dst_saddr.ip = 0;
 }
 
 #define IP_VS_XMIT_TUNNEL(skb, cp)                             \
@@ -462,7 +507,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        /* MTU checking */
        mtu = dst_mtu(&rt->dst);
-       if (skb->len > mtu && !skb_is_gso(skb)) {
+       if (__mtu_check_toobig_v6(skb, mtu)) {
                if (!skb->dev) {
                        struct net *net = dev_net(skb_dst(skb)->dev);
 
@@ -683,7 +728,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        /* MTU checking */
        mtu = dst_mtu(&rt->dst);
-       if (skb->len > mtu && !skb_is_gso(skb)) {
+       if (__mtu_check_toobig_v6(skb, mtu)) {
                if (!skb->dev) {
                        struct net *net = dev_net(skb_dst(skb)->dev);
 
@@ -766,12 +811,13 @@ int
 ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
                  struct ip_vs_protocol *pp)
 {
+       struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
        struct rtable *rt;                      /* Route to the other host */
        __be32 saddr;                           /* Source for tunnel */
        struct net_device *tdev;                /* Device to other host */
        struct iphdr  *old_iph = ip_hdr(skb);
        u8     tos = old_iph->tos;
-       __be16 df = old_iph->frag_off;
+       __be16 df;
        struct iphdr  *iph;                     /* Our new IP header */
        unsigned int max_headroom;              /* The extra header space needed */
        int    mtu;
@@ -781,7 +827,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
                                      RT_TOS(tos), IP_VS_RT_MODE_LOCAL |
-                                                  IP_VS_RT_MODE_NON_LOCAL,
+                                                  IP_VS_RT_MODE_NON_LOCAL |
+                                                  IP_VS_RT_MODE_CONNECT,
                                                   &saddr)))
                goto tx_error_icmp;
        if (rt->rt_flags & RTCF_LOCAL) {
@@ -796,13 +843,13 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
                IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__);
                goto tx_error_put;
        }
-       if (skb_dst(skb))
+       if (rt_is_output_route(skb_rtable(skb)))
                skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
 
-       df |= (old_iph->frag_off & htons(IP_DF));
+       /* Copy DF, reset fragment offset and MF */
+       df = sysctl_pmtu_disc(ipvs) ? old_iph->frag_off & htons(IP_DF) : 0;
 
-       if ((old_iph->frag_off & htons(IP_DF) &&
-           mtu < ntohs(old_iph->tot_len) && !skb_is_gso(skb))) {
+       if (df && mtu < ntohs(old_iph->tot_len) && !skb_is_gso(skb)) {
                icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
                IP_VS_DBG_RL("%s(): frag needed\n", __func__);
                goto tx_error_put;
@@ -915,8 +962,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
        if (skb_dst(skb))
                skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
 
-       if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr) &&
-           !skb_is_gso(skb)) {
+       /* MTU checking: Notice that 'mtu' have been adjusted before hand */
+       if (__mtu_check_toobig_v6(skb, mtu)) {
                if (!skb->dev) {
                        struct net *net = dev_net(skb_dst(skb)->dev);
 
@@ -1082,7 +1129,7 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        /* MTU checking */
        mtu = dst_mtu(&rt->dst);
-       if (skb->len > mtu) {
+       if (__mtu_check_toobig_v6(skb, mtu)) {
                if (!skb->dev) {
                        struct net *net = dev_net(skb_dst(skb)->dev);
 
@@ -1318,7 +1365,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        /* MTU checking */
        mtu = dst_mtu(&rt->dst);
-       if (skb->len > mtu && !skb_is_gso(skb)) {
+       if (__mtu_check_toobig_v6(skb, mtu)) {
                if (!skb->dev) {
                        struct net *net = dev_net(skb_dst(skb)->dev);
 
index f2de8c55ac506fcb764af21968ed2fc70a8a9da4..c514fe6033d254280f6760a3fe6d1c1f37185e1c 100644 (file)
@@ -40,6 +40,7 @@ MODULE_PARM_DESC(ts_algo, "textsearch algorithm to use (default kmp)");
 
 unsigned int (*nf_nat_amanda_hook)(struct sk_buff *skb,
                                   enum ip_conntrack_info ctinfo,
+                                  unsigned int protoff,
                                   unsigned int matchoff,
                                   unsigned int matchlen,
                                   struct nf_conntrack_expect *exp)
@@ -155,8 +156,8 @@ static int amanda_help(struct sk_buff *skb,
 
                nf_nat_amanda = rcu_dereference(nf_nat_amanda_hook);
                if (nf_nat_amanda && ct->status & IPS_NAT_MASK)
-                       ret = nf_nat_amanda(skb, ctinfo, off - dataoff,
-                                           len, exp);
+                       ret = nf_nat_amanda(skb, ctinfo, protoff,
+                                           off - dataoff, len, exp);
                else if (nf_ct_expect_related(exp) != 0)
                        ret = NF_DROP;
                nf_ct_expect_put(exp);
index 2ceec64b19f9866a222787531ba948b4c9e7e75b..0f241be28f9eec697ea9216c096657ace6eaefd6 100644 (file)
@@ -55,6 +55,12 @@ int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
                                      const struct nlattr *attr) __read_mostly;
 EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook);
 
+int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb,
+                             struct nf_conn *ct,
+                             enum ip_conntrack_info ctinfo,
+                             unsigned int protoff);
+EXPORT_SYMBOL_GPL(nf_nat_seq_adjust_hook);
+
 DEFINE_SPINLOCK(nf_conntrack_lock);
 EXPORT_SYMBOL_GPL(nf_conntrack_lock);
 
@@ -930,7 +936,6 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
        enum ip_conntrack_info ctinfo;
        struct nf_conntrack_l3proto *l3proto;
        struct nf_conntrack_l4proto *l4proto;
-       struct nf_conn_timeout *timeout_ext;
        unsigned int *timeouts;
        unsigned int dataoff;
        u_int8_t protonum;
@@ -997,11 +1002,7 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
        NF_CT_ASSERT(skb->nfct);
 
        /* Decide what timeout policy we want to apply to this flow. */
-       timeout_ext = nf_ct_timeout_find(ct);
-       if (timeout_ext)
-               timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext);
-       else
-               timeouts = l4proto->get_timeouts(net);
+       timeouts = nf_ct_timeout_lookup(net, ct, l4proto);
 
        ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum, timeouts);
        if (ret <= 0) {
@@ -1223,6 +1224,8 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
        spin_lock_bh(&nf_conntrack_lock);
        for (; *bucket < net->ct.htable_size; (*bucket)++) {
                hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
+                       if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
+                               continue;
                        ct = nf_ct_tuplehash_to_ctrack(h);
                        if (iter(ct, data))
                                goto found;
index e7be79e640de0397ac9f5e5aaab00faf1f2c8730..de9781b6464f0940d391555489782bf63f1c956e 100644 (file)
@@ -61,7 +61,7 @@ void nf_ct_deliver_cached_events(struct nf_conn *ct)
                goto out_unlock;
 
        item.ct = ct;
-       item.pid = 0;
+       item.portid = 0;
        item.report = 0;
 
        ret = notify->fcn(events | missed, &item);
index 4bb771d1f57af53545b9eb36687ba000fd535fde..1ce3befb7c8ac895360a286d7ec9c6cd0da4a067 100644 (file)
@@ -48,6 +48,7 @@ module_param(loose, bool, 0600);
 unsigned int (*nf_nat_ftp_hook)(struct sk_buff *skb,
                                enum ip_conntrack_info ctinfo,
                                enum nf_ct_ftp_type type,
+                               unsigned int protoff,
                                unsigned int matchoff,
                                unsigned int matchlen,
                                struct nf_conntrack_expect *exp);
@@ -395,6 +396,12 @@ static int help(struct sk_buff *skb,
 
        /* Look up to see if we're just after a \n. */
        if (!find_nl_seq(ntohl(th->seq), ct_ftp_info, dir)) {
+               /* We're picking up this, clear flags and let it continue */
+               if (unlikely(ct_ftp_info->flags[dir] & NF_CT_FTP_SEQ_PICKUP)) {
+                       ct_ftp_info->flags[dir] ^= NF_CT_FTP_SEQ_PICKUP;
+                       goto skip_nl_seq;
+               }
+
                /* Now if this ends in \n, update ftp info. */
                pr_debug("nf_conntrack_ftp: wrong seq pos %s(%u) or %s(%u)\n",
                         ct_ftp_info->seq_aft_nl_num[dir] > 0 ? "" : "(UNSET)",
@@ -405,6 +412,7 @@ static int help(struct sk_buff *skb,
                goto out_update_nl;
        }
 
+skip_nl_seq:
        /* Initialize IP/IPv6 addr to expected address (it's not mentioned
           in EPSV responses) */
        cmd.l3num = nf_ct_l3num(ct);
@@ -489,7 +497,7 @@ static int help(struct sk_buff *skb,
        nf_nat_ftp = rcu_dereference(nf_nat_ftp_hook);
        if (nf_nat_ftp && ct->status & IPS_NAT_MASK)
                ret = nf_nat_ftp(skb, ctinfo, search[dir][i].ftptype,
-                                matchoff, matchlen, exp);
+                                protoff, matchoff, matchlen, exp);
        else {
                /* Can't expect this?  Best to drop packet now. */
                if (nf_ct_expect_related(exp) != 0)
@@ -511,6 +519,19 @@ out_update_nl:
        return ret;
 }
 
+static int nf_ct_ftp_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
+{
+       struct nf_ct_ftp_master *ftp = nfct_help_data(ct);
+
+       /* This conntrack has been injected from user-space, always pick up
+        * sequence tracking. Otherwise, the first FTP command after the
+        * failover breaks.
+        */
+       ftp->flags[IP_CT_DIR_ORIGINAL] |= NF_CT_FTP_SEQ_PICKUP;
+       ftp->flags[IP_CT_DIR_REPLY] |= NF_CT_FTP_SEQ_PICKUP;
+       return 0;
+}
+
 static struct nf_conntrack_helper ftp[MAX_PORTS][2] __read_mostly;
 
 static const struct nf_conntrack_expect_policy ftp_exp_policy = {
@@ -560,6 +581,7 @@ static int __init nf_conntrack_ftp_init(void)
                        ftp[i][j].expect_policy = &ftp_exp_policy;
                        ftp[i][j].me = THIS_MODULE;
                        ftp[i][j].help = help;
+                       ftp[i][j].from_nlattr = nf_ct_ftp_from_nlattr;
                        if (ports[i] == FTP_PORT)
                                sprintf(ftp[i][j].name, "ftp");
                        else
index 4283b207e63be6c5da6fb9db24696d59097157fb..1b30b0dee70818c4842b1835964ffc5f59e6b6e3 100644 (file)
@@ -49,12 +49,12 @@ MODULE_PARM_DESC(callforward_filter, "only create call forwarding expectations "
                                     "(determined by routing information)");
 
 /* Hooks for NAT */
-int (*set_h245_addr_hook) (struct sk_buff *skb,
+int (*set_h245_addr_hook) (struct sk_buff *skb, unsigned int protoff,
                           unsigned char **data, int dataoff,
                           H245_TransportAddress *taddr,
                           union nf_inet_addr *addr, __be16 port)
                           __read_mostly;
-int (*set_h225_addr_hook) (struct sk_buff *skb,
+int (*set_h225_addr_hook) (struct sk_buff *skb, unsigned int protoff,
                           unsigned char **data, int dataoff,
                           TransportAddress *taddr,
                           union nf_inet_addr *addr, __be16 port)
@@ -62,16 +62,17 @@ int (*set_h225_addr_hook) (struct sk_buff *skb,
 int (*set_sig_addr_hook) (struct sk_buff *skb,
                          struct nf_conn *ct,
                          enum ip_conntrack_info ctinfo,
-                         unsigned char **data,
+                         unsigned int protoff, unsigned char **data,
                          TransportAddress *taddr, int count) __read_mostly;
 int (*set_ras_addr_hook) (struct sk_buff *skb,
                          struct nf_conn *ct,
                          enum ip_conntrack_info ctinfo,
-                         unsigned char **data,
+                         unsigned int protoff, unsigned char **data,
                          TransportAddress *taddr, int count) __read_mostly;
 int (*nat_rtp_rtcp_hook) (struct sk_buff *skb,
                          struct nf_conn *ct,
                          enum ip_conntrack_info ctinfo,
+                         unsigned int protoff,
                          unsigned char **data, int dataoff,
                          H245_TransportAddress *taddr,
                          __be16 port, __be16 rtp_port,
@@ -80,24 +81,28 @@ int (*nat_rtp_rtcp_hook) (struct sk_buff *skb,
 int (*nat_t120_hook) (struct sk_buff *skb,
                      struct nf_conn *ct,
                      enum ip_conntrack_info ctinfo,
+                     unsigned int protoff,
                      unsigned char **data, int dataoff,
                      H245_TransportAddress *taddr, __be16 port,
                      struct nf_conntrack_expect *exp) __read_mostly;
 int (*nat_h245_hook) (struct sk_buff *skb,
                      struct nf_conn *ct,
                      enum ip_conntrack_info ctinfo,
+                     unsigned int protoff,
                      unsigned char **data, int dataoff,
                      TransportAddress *taddr, __be16 port,
                      struct nf_conntrack_expect *exp) __read_mostly;
 int (*nat_callforwarding_hook) (struct sk_buff *skb,
                                struct nf_conn *ct,
                                enum ip_conntrack_info ctinfo,
+                               unsigned int protoff,
                                unsigned char **data, int dataoff,
                                TransportAddress *taddr, __be16 port,
                                struct nf_conntrack_expect *exp) __read_mostly;
 int (*nat_q931_hook) (struct sk_buff *skb,
                      struct nf_conn *ct,
                      enum ip_conntrack_info ctinfo,
+                     unsigned int protoff,
                      unsigned char **data, TransportAddress *taddr, int idx,
                      __be16 port, struct nf_conntrack_expect *exp)
                      __read_mostly;
@@ -251,6 +256,7 @@ static int get_h245_addr(struct nf_conn *ct, const unsigned char *data,
 /****************************************************************************/
 static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
                           enum ip_conntrack_info ctinfo,
+                          unsigned int protoff,
                           unsigned char **data, int dataoff,
                           H245_TransportAddress *taddr)
 {
@@ -295,9 +301,10 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
                   &ct->tuplehash[!dir].tuple.dst.u3,
                   sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
                   (nat_rtp_rtcp = rcu_dereference(nat_rtp_rtcp_hook)) &&
+                  nf_ct_l3num(ct) == NFPROTO_IPV4 &&
                   ct->status & IPS_NAT_MASK) {
                /* NAT needed */
-               ret = nat_rtp_rtcp(skb, ct, ctinfo, data, dataoff,
+               ret = nat_rtp_rtcp(skb, ct, ctinfo, protoff, data, dataoff,
                                   taddr, port, rtp_port, rtp_exp, rtcp_exp);
        } else {                /* Conntrack only */
                if (nf_ct_expect_related(rtp_exp) == 0) {
@@ -324,6 +331,7 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
 static int expect_t120(struct sk_buff *skb,
                       struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo,
+                      unsigned int protoff,
                       unsigned char **data, int dataoff,
                       H245_TransportAddress *taddr)
 {
@@ -353,9 +361,10 @@ static int expect_t120(struct sk_buff *skb,
                   &ct->tuplehash[!dir].tuple.dst.u3,
                   sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
            (nat_t120 = rcu_dereference(nat_t120_hook)) &&
+           nf_ct_l3num(ct) == NFPROTO_IPV4 &&
            ct->status & IPS_NAT_MASK) {
                /* NAT needed */
-               ret = nat_t120(skb, ct, ctinfo, data, dataoff, taddr,
+               ret = nat_t120(skb, ct, ctinfo, protoff, data, dataoff, taddr,
                               port, exp);
        } else {                /* Conntrack only */
                if (nf_ct_expect_related(exp) == 0) {
@@ -374,6 +383,7 @@ static int expect_t120(struct sk_buff *skb,
 static int process_h245_channel(struct sk_buff *skb,
                                struct nf_conn *ct,
                                enum ip_conntrack_info ctinfo,
+                               unsigned int protoff,
                                unsigned char **data, int dataoff,
                                H2250LogicalChannelParameters *channel)
 {
@@ -381,7 +391,7 @@ static int process_h245_channel(struct sk_buff *skb,
 
        if (channel->options & eH2250LogicalChannelParameters_mediaChannel) {
                /* RTP */
-               ret = expect_rtp_rtcp(skb, ct, ctinfo, data, dataoff,
+               ret = expect_rtp_rtcp(skb, ct, ctinfo, protoff, data, dataoff,
                                      &channel->mediaChannel);
                if (ret < 0)
                        return -1;
@@ -390,7 +400,7 @@ static int process_h245_channel(struct sk_buff *skb,
        if (channel->
            options & eH2250LogicalChannelParameters_mediaControlChannel) {
                /* RTCP */
-               ret = expect_rtp_rtcp(skb, ct, ctinfo, data, dataoff,
+               ret = expect_rtp_rtcp(skb, ct, ctinfo, protoff, data, dataoff,
                                      &channel->mediaControlChannel);
                if (ret < 0)
                        return -1;
@@ -402,6 +412,7 @@ static int process_h245_channel(struct sk_buff *skb,
 /****************************************************************************/
 static int process_olc(struct sk_buff *skb, struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo,
+                      unsigned int protoff,
                       unsigned char **data, int dataoff,
                       OpenLogicalChannel *olc)
 {
@@ -412,7 +423,8 @@ static int process_olc(struct sk_buff *skb, struct nf_conn *ct,
        if (olc->forwardLogicalChannelParameters.multiplexParameters.choice ==
            eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters)
        {
-               ret = process_h245_channel(skb, ct, ctinfo, data, dataoff,
+               ret = process_h245_channel(skb, ct, ctinfo,
+                                          protoff, data, dataoff,
                                           &olc->
                                           forwardLogicalChannelParameters.
                                           multiplexParameters.
@@ -430,7 +442,8 @@ static int process_olc(struct sk_buff *skb, struct nf_conn *ct,
                eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters))
        {
                ret =
-                   process_h245_channel(skb, ct, ctinfo, data, dataoff,
+                   process_h245_channel(skb, ct, ctinfo,
+                                        protoff, data, dataoff,
                                         &olc->
                                         reverseLogicalChannelParameters.
                                         multiplexParameters.
@@ -448,7 +461,7 @@ static int process_olc(struct sk_buff *skb, struct nf_conn *ct,
            t120.choice == eDataProtocolCapability_separateLANStack &&
            olc->separateStack.networkAddress.choice ==
            eNetworkAccessParameters_networkAddress_localAreaAddress) {
-               ret = expect_t120(skb, ct, ctinfo, data, dataoff,
+               ret = expect_t120(skb, ct, ctinfo, protoff, data, dataoff,
                                  &olc->separateStack.networkAddress.
                                  localAreaAddress);
                if (ret < 0)
@@ -461,7 +474,7 @@ static int process_olc(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_olca(struct sk_buff *skb, struct nf_conn *ct,
                        enum ip_conntrack_info ctinfo,
-                       unsigned char **data, int dataoff,
+                       unsigned int protoff, unsigned char **data, int dataoff,
                        OpenLogicalChannelAck *olca)
 {
        H2250LogicalChannelAckParameters *ack;
@@ -477,7 +490,8 @@ static int process_olca(struct sk_buff *skb, struct nf_conn *ct,
                choice ==
                eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters))
        {
-               ret = process_h245_channel(skb, ct, ctinfo, data, dataoff,
+               ret = process_h245_channel(skb, ct, ctinfo,
+                                          protoff, data, dataoff,
                                           &olca->
                                           reverseLogicalChannelParameters.
                                           multiplexParameters.
@@ -496,7 +510,8 @@ static int process_olca(struct sk_buff *skb, struct nf_conn *ct,
                if (ack->options &
                    eH2250LogicalChannelAckParameters_mediaChannel) {
                        /* RTP */
-                       ret = expect_rtp_rtcp(skb, ct, ctinfo, data, dataoff,
+                       ret = expect_rtp_rtcp(skb, ct, ctinfo,
+                                             protoff, data, dataoff,
                                              &ack->mediaChannel);
                        if (ret < 0)
                                return -1;
@@ -505,7 +520,8 @@ static int process_olca(struct sk_buff *skb, struct nf_conn *ct,
                if (ack->options &
                    eH2250LogicalChannelAckParameters_mediaControlChannel) {
                        /* RTCP */
-                       ret = expect_rtp_rtcp(skb, ct, ctinfo, data, dataoff,
+                       ret = expect_rtp_rtcp(skb, ct, ctinfo,
+                                             protoff, data, dataoff,
                                              &ack->mediaControlChannel);
                        if (ret < 0)
                                return -1;
@@ -515,7 +531,7 @@ static int process_olca(struct sk_buff *skb, struct nf_conn *ct,
        if ((olca->options & eOpenLogicalChannelAck_separateStack) &&
                olca->separateStack.networkAddress.choice ==
                eNetworkAccessParameters_networkAddress_localAreaAddress) {
-               ret = expect_t120(skb, ct, ctinfo, data, dataoff,
+               ret = expect_t120(skb, ct, ctinfo, protoff, data, dataoff,
                                  &olca->separateStack.networkAddress.
                                  localAreaAddress);
                if (ret < 0)
@@ -528,14 +544,15 @@ static int process_olca(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_h245(struct sk_buff *skb, struct nf_conn *ct,
                        enum ip_conntrack_info ctinfo,
-                       unsigned char **data, int dataoff,
+                       unsigned int protoff, unsigned char **data, int dataoff,
                        MultimediaSystemControlMessage *mscm)
 {
        switch (mscm->choice) {
        case eMultimediaSystemControlMessage_request:
                if (mscm->request.choice ==
                    eRequestMessage_openLogicalChannel) {
-                       return process_olc(skb, ct, ctinfo, data, dataoff,
+                       return process_olc(skb, ct, ctinfo,
+                                          protoff, data, dataoff,
                                           &mscm->request.openLogicalChannel);
                }
                pr_debug("nf_ct_h323: H.245 Request %d\n",
@@ -544,7 +561,8 @@ static int process_h245(struct sk_buff *skb, struct nf_conn *ct,
        case eMultimediaSystemControlMessage_response:
                if (mscm->response.choice ==
                    eResponseMessage_openLogicalChannelAck) {
-                       return process_olca(skb, ct, ctinfo, data, dataoff,
+                       return process_olca(skb, ct, ctinfo,
+                                           protoff, data, dataoff,
                                            &mscm->response.
                                            openLogicalChannelAck);
                }
@@ -595,7 +613,8 @@ static int h245_help(struct sk_buff *skb, unsigned int protoff,
                }
 
                /* Process H.245 signal */
-               if (process_h245(skb, ct, ctinfo, &data, dataoff, &mscm) < 0)
+               if (process_h245(skb, ct, ctinfo, protoff,
+                                &data, dataoff, &mscm) < 0)
                        goto drop;
        }
 
@@ -659,7 +678,7 @@ int get_h225_addr(struct nf_conn *ct, unsigned char *data,
 /****************************************************************************/
 static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo,
-                      unsigned char **data, int dataoff,
+                      unsigned int protoff, unsigned char **data, int dataoff,
                       TransportAddress *taddr)
 {
        int dir = CTINFO2DIR(ctinfo);
@@ -688,9 +707,10 @@ static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
                   &ct->tuplehash[!dir].tuple.dst.u3,
                   sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
            (nat_h245 = rcu_dereference(nat_h245_hook)) &&
+           nf_ct_l3num(ct) == NFPROTO_IPV4 &&
            ct->status & IPS_NAT_MASK) {
                /* NAT needed */
-               ret = nat_h245(skb, ct, ctinfo, data, dataoff, taddr,
+               ret = nat_h245(skb, ct, ctinfo, protoff, data, dataoff, taddr,
                               port, exp);
        } else {                /* Conntrack only */
                if (nf_ct_expect_related(exp) == 0) {
@@ -776,6 +796,7 @@ static int callforward_do_filter(const union nf_inet_addr *src,
 static int expect_callforwarding(struct sk_buff *skb,
                                 struct nf_conn *ct,
                                 enum ip_conntrack_info ctinfo,
+                                unsigned int protoff,
                                 unsigned char **data, int dataoff,
                                 TransportAddress *taddr)
 {
@@ -811,9 +832,11 @@ static int expect_callforwarding(struct sk_buff *skb,
                   &ct->tuplehash[!dir].tuple.dst.u3,
                   sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
            (nat_callforwarding = rcu_dereference(nat_callforwarding_hook)) &&
+           nf_ct_l3num(ct) == NFPROTO_IPV4 &&
            ct->status & IPS_NAT_MASK) {
                /* Need NAT */
-               ret = nat_callforwarding(skb, ct, ctinfo, data, dataoff,
+               ret = nat_callforwarding(skb, ct, ctinfo,
+                                        protoff, data, dataoff,
                                         taddr, port, exp);
        } else {                /* Conntrack only */
                if (nf_ct_expect_related(exp) == 0) {
@@ -831,6 +854,7 @@ static int expect_callforwarding(struct sk_buff *skb,
 /****************************************************************************/
 static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
                         enum ip_conntrack_info ctinfo,
+                        unsigned int protoff,
                         unsigned char **data, int dataoff,
                         Setup_UUIE *setup)
 {
@@ -844,7 +868,7 @@ static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
        pr_debug("nf_ct_q931: Setup\n");
 
        if (setup->options & eSetup_UUIE_h245Address) {
-               ret = expect_h245(skb, ct, ctinfo, data, dataoff,
+               ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff,
                                  &setup->h245Address);
                if (ret < 0)
                        return -1;
@@ -852,14 +876,15 @@ static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
 
        set_h225_addr = rcu_dereference(set_h225_addr_hook);
        if ((setup->options & eSetup_UUIE_destCallSignalAddress) &&
-           (set_h225_addr) && ct->status & IPS_NAT_MASK &&
+           (set_h225_addr) && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+           ct->status & IPS_NAT_MASK &&
            get_h225_addr(ct, *data, &setup->destCallSignalAddress,
                          &addr, &port) &&
            memcmp(&addr, &ct->tuplehash[!dir].tuple.src.u3, sizeof(addr))) {
                pr_debug("nf_ct_q931: set destCallSignalAddress %pI6:%hu->%pI6:%hu\n",
                         &addr, ntohs(port), &ct->tuplehash[!dir].tuple.src.u3,
                         ntohs(ct->tuplehash[!dir].tuple.src.u.tcp.port));
-               ret = set_h225_addr(skb, data, dataoff,
+               ret = set_h225_addr(skb, protoff, data, dataoff,
                                    &setup->destCallSignalAddress,
                                    &ct->tuplehash[!dir].tuple.src.u3,
                                    ct->tuplehash[!dir].tuple.src.u.tcp.port);
@@ -868,14 +893,15 @@ static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
        }
 
        if ((setup->options & eSetup_UUIE_sourceCallSignalAddress) &&
-           (set_h225_addr) && ct->status & IPS_NAT_MASK &&
+           (set_h225_addr) && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+           ct->status & IPS_NAT_MASK &&
            get_h225_addr(ct, *data, &setup->sourceCallSignalAddress,
                          &addr, &port) &&
            memcmp(&addr, &ct->tuplehash[!dir].tuple.dst.u3, sizeof(addr))) {
                pr_debug("nf_ct_q931: set sourceCallSignalAddress %pI6:%hu->%pI6:%hu\n",
                         &addr, ntohs(port), &ct->tuplehash[!dir].tuple.dst.u3,
                         ntohs(ct->tuplehash[!dir].tuple.dst.u.tcp.port));
-               ret = set_h225_addr(skb, data, dataoff,
+               ret = set_h225_addr(skb, protoff, data, dataoff,
                                    &setup->sourceCallSignalAddress,
                                    &ct->tuplehash[!dir].tuple.dst.u3,
                                    ct->tuplehash[!dir].tuple.dst.u.tcp.port);
@@ -885,7 +911,8 @@ static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
 
        if (setup->options & eSetup_UUIE_fastStart) {
                for (i = 0; i < setup->fastStart.count; i++) {
-                       ret = process_olc(skb, ct, ctinfo, data, dataoff,
+                       ret = process_olc(skb, ct, ctinfo,
+                                         protoff, data, dataoff,
                                          &setup->fastStart.item[i]);
                        if (ret < 0)
                                return -1;
@@ -899,6 +926,7 @@ static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
 static int process_callproceeding(struct sk_buff *skb,
                                  struct nf_conn *ct,
                                  enum ip_conntrack_info ctinfo,
+                                 unsigned int protoff,
                                  unsigned char **data, int dataoff,
                                  CallProceeding_UUIE *callproc)
 {
@@ -908,7 +936,7 @@ static int process_callproceeding(struct sk_buff *skb,
        pr_debug("nf_ct_q931: CallProceeding\n");
 
        if (callproc->options & eCallProceeding_UUIE_h245Address) {
-               ret = expect_h245(skb, ct, ctinfo, data, dataoff,
+               ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff,
                                  &callproc->h245Address);
                if (ret < 0)
                        return -1;
@@ -916,7 +944,8 @@ static int process_callproceeding(struct sk_buff *skb,
 
        if (callproc->options & eCallProceeding_UUIE_fastStart) {
                for (i = 0; i < callproc->fastStart.count; i++) {
-                       ret = process_olc(skb, ct, ctinfo, data, dataoff,
+                       ret = process_olc(skb, ct, ctinfo,
+                                         protoff, data, dataoff,
                                          &callproc->fastStart.item[i]);
                        if (ret < 0)
                                return -1;
@@ -929,6 +958,7 @@ static int process_callproceeding(struct sk_buff *skb,
 /****************************************************************************/
 static int process_connect(struct sk_buff *skb, struct nf_conn *ct,
                           enum ip_conntrack_info ctinfo,
+                          unsigned int protoff,
                           unsigned char **data, int dataoff,
                           Connect_UUIE *connect)
 {
@@ -938,7 +968,7 @@ static int process_connect(struct sk_buff *skb, struct nf_conn *ct,
        pr_debug("nf_ct_q931: Connect\n");
 
        if (connect->options & eConnect_UUIE_h245Address) {
-               ret = expect_h245(skb, ct, ctinfo, data, dataoff,
+               ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff,
                                  &connect->h245Address);
                if (ret < 0)
                        return -1;
@@ -946,7 +976,8 @@ static int process_connect(struct sk_buff *skb, struct nf_conn *ct,
 
        if (connect->options & eConnect_UUIE_fastStart) {
                for (i = 0; i < connect->fastStart.count; i++) {
-                       ret = process_olc(skb, ct, ctinfo, data, dataoff,
+                       ret = process_olc(skb, ct, ctinfo,
+                                         protoff, data, dataoff,
                                          &connect->fastStart.item[i]);
                        if (ret < 0)
                                return -1;
@@ -959,6 +990,7 @@ static int process_connect(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_alerting(struct sk_buff *skb, struct nf_conn *ct,
                            enum ip_conntrack_info ctinfo,
+                           unsigned int protoff,
                            unsigned char **data, int dataoff,
                            Alerting_UUIE *alert)
 {
@@ -968,7 +1000,7 @@ static int process_alerting(struct sk_buff *skb, struct nf_conn *ct,
        pr_debug("nf_ct_q931: Alerting\n");
 
        if (alert->options & eAlerting_UUIE_h245Address) {
-               ret = expect_h245(skb, ct, ctinfo, data, dataoff,
+               ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff,
                                  &alert->h245Address);
                if (ret < 0)
                        return -1;
@@ -976,7 +1008,8 @@ static int process_alerting(struct sk_buff *skb, struct nf_conn *ct,
 
        if (alert->options & eAlerting_UUIE_fastStart) {
                for (i = 0; i < alert->fastStart.count; i++) {
-                       ret = process_olc(skb, ct, ctinfo, data, dataoff,
+                       ret = process_olc(skb, ct, ctinfo,
+                                         protoff, data, dataoff,
                                          &alert->fastStart.item[i]);
                        if (ret < 0)
                                return -1;
@@ -989,6 +1022,7 @@ static int process_alerting(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_facility(struct sk_buff *skb, struct nf_conn *ct,
                            enum ip_conntrack_info ctinfo,
+                           unsigned int protoff,
                            unsigned char **data, int dataoff,
                            Facility_UUIE *facility)
 {
@@ -999,15 +1033,15 @@ static int process_facility(struct sk_buff *skb, struct nf_conn *ct,
 
        if (facility->reason.choice == eFacilityReason_callForwarded) {
                if (facility->options & eFacility_UUIE_alternativeAddress)
-                       return expect_callforwarding(skb, ct, ctinfo, data,
-                                                    dataoff,
+                       return expect_callforwarding(skb, ct, ctinfo,
+                                                    protoff, data, dataoff,
                                                     &facility->
                                                     alternativeAddress);
                return 0;
        }
 
        if (facility->options & eFacility_UUIE_h245Address) {
-               ret = expect_h245(skb, ct, ctinfo, data, dataoff,
+               ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff,
                                  &facility->h245Address);
                if (ret < 0)
                        return -1;
@@ -1015,7 +1049,8 @@ static int process_facility(struct sk_buff *skb, struct nf_conn *ct,
 
        if (facility->options & eFacility_UUIE_fastStart) {
                for (i = 0; i < facility->fastStart.count; i++) {
-                       ret = process_olc(skb, ct, ctinfo, data, dataoff,
+                       ret = process_olc(skb, ct, ctinfo,
+                                         protoff, data, dataoff,
                                          &facility->fastStart.item[i]);
                        if (ret < 0)
                                return -1;
@@ -1028,6 +1063,7 @@ static int process_facility(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_progress(struct sk_buff *skb, struct nf_conn *ct,
                            enum ip_conntrack_info ctinfo,
+                           unsigned int protoff,
                            unsigned char **data, int dataoff,
                            Progress_UUIE *progress)
 {
@@ -1037,7 +1073,7 @@ static int process_progress(struct sk_buff *skb, struct nf_conn *ct,
        pr_debug("nf_ct_q931: Progress\n");
 
        if (progress->options & eProgress_UUIE_h245Address) {
-               ret = expect_h245(skb, ct, ctinfo, data, dataoff,
+               ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff,
                                  &progress->h245Address);
                if (ret < 0)
                        return -1;
@@ -1045,7 +1081,8 @@ static int process_progress(struct sk_buff *skb, struct nf_conn *ct,
 
        if (progress->options & eProgress_UUIE_fastStart) {
                for (i = 0; i < progress->fastStart.count; i++) {
-                       ret = process_olc(skb, ct, ctinfo, data, dataoff,
+                       ret = process_olc(skb, ct, ctinfo,
+                                         protoff, data, dataoff,
                                          &progress->fastStart.item[i]);
                        if (ret < 0)
                                return -1;
@@ -1058,7 +1095,8 @@ static int process_progress(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_q931(struct sk_buff *skb, struct nf_conn *ct,
                        enum ip_conntrack_info ctinfo,
-                       unsigned char **data, int dataoff, Q931 *q931)
+                       unsigned int protoff, unsigned char **data, int dataoff,
+                       Q931 *q931)
 {
        H323_UU_PDU *pdu = &q931->UUIE.h323_uu_pdu;
        int i;
@@ -1066,28 +1104,29 @@ static int process_q931(struct sk_buff *skb, struct nf_conn *ct,
 
        switch (pdu->h323_message_body.choice) {
        case eH323_UU_PDU_h323_message_body_setup:
-               ret = process_setup(skb, ct, ctinfo, data, dataoff,
+               ret = process_setup(skb, ct, ctinfo, protoff, data, dataoff,
                                    &pdu->h323_message_body.setup);
                break;
        case eH323_UU_PDU_h323_message_body_callProceeding:
-               ret = process_callproceeding(skb, ct, ctinfo, data, dataoff,
+               ret = process_callproceeding(skb, ct, ctinfo,
+                                            protoff, data, dataoff,
                                             &pdu->h323_message_body.
                                             callProceeding);
                break;
        case eH323_UU_PDU_h323_message_body_connect:
-               ret = process_connect(skb, ct, ctinfo, data, dataoff,
+               ret = process_connect(skb, ct, ctinfo, protoff, data, dataoff,
                                      &pdu->h323_message_body.connect);
                break;
        case eH323_UU_PDU_h323_message_body_alerting:
-               ret = process_alerting(skb, ct, ctinfo, data, dataoff,
+               ret = process_alerting(skb, ct, ctinfo, protoff, data, dataoff,
                                       &pdu->h323_message_body.alerting);
                break;
        case eH323_UU_PDU_h323_message_body_facility:
-               ret = process_facility(skb, ct, ctinfo, data, dataoff,
+               ret = process_facility(skb, ct, ctinfo, protoff, data, dataoff,
                                       &pdu->h323_message_body.facility);
                break;
        case eH323_UU_PDU_h323_message_body_progress:
-               ret = process_progress(skb, ct, ctinfo, data, dataoff,
+               ret = process_progress(skb, ct, ctinfo, protoff, data, dataoff,
                                       &pdu->h323_message_body.progress);
                break;
        default:
@@ -1101,7 +1140,8 @@ static int process_q931(struct sk_buff *skb, struct nf_conn *ct,
 
        if (pdu->options & eH323_UU_PDU_h245Control) {
                for (i = 0; i < pdu->h245Control.count; i++) {
-                       ret = process_h245(skb, ct, ctinfo, data, dataoff,
+                       ret = process_h245(skb, ct, ctinfo,
+                                          protoff, data, dataoff,
                                           &pdu->h245Control.item[i]);
                        if (ret < 0)
                                return -1;
@@ -1146,7 +1186,8 @@ static int q931_help(struct sk_buff *skb, unsigned int protoff,
                }
 
                /* Process Q.931 signal */
-               if (process_q931(skb, ct, ctinfo, &data, dataoff, &q931) < 0)
+               if (process_q931(skb, ct, ctinfo, protoff,
+                                &data, dataoff, &q931) < 0)
                        goto drop;
        }
 
@@ -1243,7 +1284,7 @@ static int set_expect_timeout(struct nf_conntrack_expect *exp,
 /****************************************************************************/
 static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo,
-                      unsigned char **data,
+                      unsigned int protoff, unsigned char **data,
                       TransportAddress *taddr, int count)
 {
        struct nf_ct_h323_master *info = nfct_help_data(ct);
@@ -1278,8 +1319,10 @@ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
        exp->flags = NF_CT_EXPECT_PERMANENT;    /* Accept multiple calls */
 
        nat_q931 = rcu_dereference(nat_q931_hook);
-       if (nat_q931 && ct->status & IPS_NAT_MASK) {    /* Need NAT */
-               ret = nat_q931(skb, ct, ctinfo, data, taddr, i, port, exp);
+       if (nat_q931 && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+           ct->status & IPS_NAT_MASK) {        /* Need NAT */
+               ret = nat_q931(skb, ct, ctinfo, protoff, data,
+                              taddr, i, port, exp);
        } else {                /* Conntrack only */
                if (nf_ct_expect_related(exp) == 0) {
                        pr_debug("nf_ct_ras: expect Q.931 ");
@@ -1299,6 +1342,7 @@ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_grq(struct sk_buff *skb, struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo,
+                      unsigned int protoff,
                       unsigned char **data, GatekeeperRequest *grq)
 {
        typeof(set_ras_addr_hook) set_ras_addr;
@@ -1306,8 +1350,9 @@ static int process_grq(struct sk_buff *skb, struct nf_conn *ct,
        pr_debug("nf_ct_ras: GRQ\n");
 
        set_ras_addr = rcu_dereference(set_ras_addr_hook);
-       if (set_ras_addr && ct->status & IPS_NAT_MASK)  /* NATed */
-               return set_ras_addr(skb, ct, ctinfo, data,
+       if (set_ras_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+           ct->status & IPS_NAT_MASK)  /* NATed */
+               return set_ras_addr(skb, ct, ctinfo, protoff, data,
                                    &grq->rasAddress, 1);
        return 0;
 }
@@ -1315,6 +1360,7 @@ static int process_grq(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_gcf(struct sk_buff *skb, struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo,
+                      unsigned int protoff,
                       unsigned char **data, GatekeeperConfirm *gcf)
 {
        int dir = CTINFO2DIR(ctinfo);
@@ -1359,6 +1405,7 @@ static int process_gcf(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_rrq(struct sk_buff *skb, struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo,
+                      unsigned int protoff,
                       unsigned char **data, RegistrationRequest *rrq)
 {
        struct nf_ct_h323_master *info = nfct_help_data(ct);
@@ -1367,15 +1414,16 @@ static int process_rrq(struct sk_buff *skb, struct nf_conn *ct,
 
        pr_debug("nf_ct_ras: RRQ\n");
 
-       ret = expect_q931(skb, ct, ctinfo, data,
+       ret = expect_q931(skb, ct, ctinfo, protoff, data,
                          rrq->callSignalAddress.item,
                          rrq->callSignalAddress.count);
        if (ret < 0)
                return -1;
 
        set_ras_addr = rcu_dereference(set_ras_addr_hook);
-       if (set_ras_addr && ct->status & IPS_NAT_MASK) {
-               ret = set_ras_addr(skb, ct, ctinfo, data,
+       if (set_ras_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+           ct->status & IPS_NAT_MASK) {
+               ret = set_ras_addr(skb, ct, ctinfo, protoff, data,
                                   rrq->rasAddress.item,
                                   rrq->rasAddress.count);
                if (ret < 0)
@@ -1394,6 +1442,7 @@ static int process_rrq(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo,
+                      unsigned int protoff,
                       unsigned char **data, RegistrationConfirm *rcf)
 {
        struct nf_ct_h323_master *info = nfct_help_data(ct);
@@ -1405,8 +1454,9 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
        pr_debug("nf_ct_ras: RCF\n");
 
        set_sig_addr = rcu_dereference(set_sig_addr_hook);
-       if (set_sig_addr && ct->status & IPS_NAT_MASK) {
-               ret = set_sig_addr(skb, ct, ctinfo, data,
+       if (set_sig_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+           ct->status & IPS_NAT_MASK) {
+               ret = set_sig_addr(skb, ct, ctinfo, protoff, data,
                                        rcf->callSignalAddress.item,
                                        rcf->callSignalAddress.count);
                if (ret < 0)
@@ -1443,6 +1493,7 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_urq(struct sk_buff *skb, struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo,
+                      unsigned int protoff,
                       unsigned char **data, UnregistrationRequest *urq)
 {
        struct nf_ct_h323_master *info = nfct_help_data(ct);
@@ -1453,8 +1504,9 @@ static int process_urq(struct sk_buff *skb, struct nf_conn *ct,
        pr_debug("nf_ct_ras: URQ\n");
 
        set_sig_addr = rcu_dereference(set_sig_addr_hook);
-       if (set_sig_addr && ct->status & IPS_NAT_MASK) {
-               ret = set_sig_addr(skb, ct, ctinfo, data,
+       if (set_sig_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+           ct->status & IPS_NAT_MASK) {
+               ret = set_sig_addr(skb, ct, ctinfo, protoff, data,
                                   urq->callSignalAddress.item,
                                   urq->callSignalAddress.count);
                if (ret < 0)
@@ -1475,6 +1527,7 @@ static int process_urq(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_arq(struct sk_buff *skb, struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo,
+                      unsigned int protoff,
                       unsigned char **data, AdmissionRequest *arq)
 {
        const struct nf_ct_h323_master *info = nfct_help_data(ct);
@@ -1491,9 +1544,10 @@ static int process_arq(struct sk_buff *skb, struct nf_conn *ct,
                          &addr, &port) &&
            !memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) &&
            port == info->sig_port[dir] &&
+           nf_ct_l3num(ct) == NFPROTO_IPV4 &&
            set_h225_addr && ct->status & IPS_NAT_MASK) {
                /* Answering ARQ */
-               return set_h225_addr(skb, data, 0,
+               return set_h225_addr(skb, protoff, data, 0,
                                     &arq->destCallSignalAddress,
                                     &ct->tuplehash[!dir].tuple.dst.u3,
                                     info->sig_port[!dir]);
@@ -1503,9 +1557,10 @@ static int process_arq(struct sk_buff *skb, struct nf_conn *ct,
            get_h225_addr(ct, *data, &arq->srcCallSignalAddress,
                          &addr, &port) &&
            !memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) &&
-           set_h225_addr && ct->status & IPS_NAT_MASK) {
+           set_h225_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+           ct->status & IPS_NAT_MASK) {
                /* Calling ARQ */
-               return set_h225_addr(skb, data, 0,
+               return set_h225_addr(skb, protoff, data, 0,
                                     &arq->srcCallSignalAddress,
                                     &ct->tuplehash[!dir].tuple.dst.u3,
                                     port);
@@ -1517,6 +1572,7 @@ static int process_arq(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo,
+                      unsigned int protoff,
                       unsigned char **data, AdmissionConfirm *acf)
 {
        int dir = CTINFO2DIR(ctinfo);
@@ -1535,8 +1591,9 @@ static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
        if (!memcmp(&addr, &ct->tuplehash[dir].tuple.dst.u3, sizeof(addr))) {
                /* Answering ACF */
                set_sig_addr = rcu_dereference(set_sig_addr_hook);
-               if (set_sig_addr && ct->status & IPS_NAT_MASK)
-                       return set_sig_addr(skb, ct, ctinfo, data,
+               if (set_sig_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+                   ct->status & IPS_NAT_MASK)
+                       return set_sig_addr(skb, ct, ctinfo, protoff, data,
                                            &acf->destCallSignalAddress, 1);
                return 0;
        }
@@ -1564,6 +1621,7 @@ static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_lrq(struct sk_buff *skb, struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo,
+                      unsigned int protoff,
                       unsigned char **data, LocationRequest *lrq)
 {
        typeof(set_ras_addr_hook) set_ras_addr;
@@ -1571,8 +1629,9 @@ static int process_lrq(struct sk_buff *skb, struct nf_conn *ct,
        pr_debug("nf_ct_ras: LRQ\n");
 
        set_ras_addr = rcu_dereference(set_ras_addr_hook);
-       if (set_ras_addr && ct->status & IPS_NAT_MASK)
-               return set_ras_addr(skb, ct, ctinfo, data,
+       if (set_ras_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+           ct->status & IPS_NAT_MASK)
+               return set_ras_addr(skb, ct, ctinfo, protoff, data,
                                    &lrq->replyAddress, 1);
        return 0;
 }
@@ -1580,6 +1639,7 @@ static int process_lrq(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_lcf(struct sk_buff *skb, struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo,
+                      unsigned int protoff,
                       unsigned char **data, LocationConfirm *lcf)
 {
        int dir = CTINFO2DIR(ctinfo);
@@ -1619,6 +1679,7 @@ static int process_lcf(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_irr(struct sk_buff *skb, struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo,
+                      unsigned int protoff,
                       unsigned char **data, InfoRequestResponse *irr)
 {
        int ret;
@@ -1628,16 +1689,18 @@ static int process_irr(struct sk_buff *skb, struct nf_conn *ct,
        pr_debug("nf_ct_ras: IRR\n");
 
        set_ras_addr = rcu_dereference(set_ras_addr_hook);
-       if (set_ras_addr && ct->status & IPS_NAT_MASK) {
-               ret = set_ras_addr(skb, ct, ctinfo, data,
+       if (set_ras_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+           ct->status & IPS_NAT_MASK) {
+               ret = set_ras_addr(skb, ct, ctinfo, protoff, data,
                                   &irr->rasAddress, 1);
                if (ret < 0)
                        return -1;
        }
 
        set_sig_addr = rcu_dereference(set_sig_addr_hook);
-       if (set_sig_addr && ct->status & IPS_NAT_MASK) {
-               ret = set_sig_addr(skb, ct, ctinfo, data,
+       if (set_sig_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+           ct->status & IPS_NAT_MASK) {
+               ret = set_sig_addr(skb, ct, ctinfo, protoff, data,
                                        irr->callSignalAddress.item,
                                        irr->callSignalAddress.count);
                if (ret < 0)
@@ -1650,38 +1713,39 @@ static int process_irr(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_ras(struct sk_buff *skb, struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo,
+                      unsigned int protoff,
                       unsigned char **data, RasMessage *ras)
 {
        switch (ras->choice) {
        case eRasMessage_gatekeeperRequest:
-               return process_grq(skb, ct, ctinfo, data,
+               return process_grq(skb, ct, ctinfo, protoff, data,
                                   &ras->gatekeeperRequest);
        case eRasMessage_gatekeeperConfirm:
-               return process_gcf(skb, ct, ctinfo, data,
+               return process_gcf(skb, ct, ctinfo, protoff, data,
                                   &ras->gatekeeperConfirm);
        case eRasMessage_registrationRequest:
-               return process_rrq(skb, ct, ctinfo, data,
+               return process_rrq(skb, ct, ctinfo, protoff, data,
                                   &ras->registrationRequest);
        case eRasMessage_registrationConfirm:
-               return process_rcf(skb, ct, ctinfo, data,
+               return process_rcf(skb, ct, ctinfo, protoff, data,
                                   &ras->registrationConfirm);
        case eRasMessage_unregistrationRequest:
-               return process_urq(skb, ct, ctinfo, data,
+               return process_urq(skb, ct, ctinfo, protoff, data,
                                   &ras->unregistrationRequest);
        case eRasMessage_admissionRequest:
-               return process_arq(skb, ct, ctinfo, data,
+               return process_arq(skb, ct, ctinfo, protoff, data,
                                   &ras->admissionRequest);
        case eRasMessage_admissionConfirm:
-               return process_acf(skb, ct, ctinfo, data,
+               return process_acf(skb, ct, ctinfo, protoff, data,
                                   &ras->admissionConfirm);
        case eRasMessage_locationRequest:
-               return process_lrq(skb, ct, ctinfo, data,
+               return process_lrq(skb, ct, ctinfo, protoff, data,
                                   &ras->locationRequest);
        case eRasMessage_locationConfirm:
-               return process_lcf(skb, ct, ctinfo, data,
+               return process_lcf(skb, ct, ctinfo, protoff, data,
                                   &ras->locationConfirm);
        case eRasMessage_infoRequestResponse:
-               return process_irr(skb, ct, ctinfo, data,
+               return process_irr(skb, ct, ctinfo, protoff, data,
                                   &ras->infoRequestResponse);
        default:
                pr_debug("nf_ct_ras: RAS message %d\n", ras->choice);
@@ -1721,7 +1785,7 @@ static int ras_help(struct sk_buff *skb, unsigned int protoff,
        }
 
        /* Process RAS message */
-       if (process_ras(skb, ct, ctinfo, &data, &ras) < 0)
+       if (process_ras(skb, ct, ctinfo, protoff, &data, &ras) < 0)
                goto drop;
 
       accept:
index 009c52cfd1ec4b9b86a8dd7bf047f3eae249fd8a..3b20aa77cfc8b76ef1ed78b0b85aacd926f3ce29 100644 (file)
@@ -33,6 +33,7 @@ static DEFINE_SPINLOCK(irc_buffer_lock);
 
 unsigned int (*nf_nat_irc_hook)(struct sk_buff *skb,
                                enum ip_conntrack_info ctinfo,
+                               unsigned int protoff,
                                unsigned int matchoff,
                                unsigned int matchlen,
                                struct nf_conntrack_expect *exp) __read_mostly;
@@ -205,7 +206,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
 
                        nf_nat_irc = rcu_dereference(nf_nat_irc_hook);
                        if (nf_nat_irc && ct->status & IPS_NAT_MASK)
-                               ret = nf_nat_irc(skb, ctinfo,
+                               ret = nf_nat_irc(skb, ctinfo, protoff,
                                                 addr_beg_p - ib_ptr,
                                                 addr_end_p - addr_beg_p,
                                                 exp);
index 9807f3278fcbcdfcc28c61b9b19e6a8c74d02b18..7bbfb3deea305e4d8434c3af9a68ee6e4531c80f 100644 (file)
@@ -45,7 +45,7 @@
 #include <net/netfilter/nf_conntrack_timestamp.h>
 #ifdef CONFIG_NF_NAT_NEEDED
 #include <net/netfilter/nf_nat_core.h>
-#include <net/netfilter/nf_nat_protocol.h>
+#include <net/netfilter/nf_nat_l4proto.h>
 #include <net/netfilter/nf_nat_helper.h>
 #endif
 
@@ -418,16 +418,16 @@ nla_put_failure:
 }
 
 static int
-ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
+ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
                    struct nf_conn *ct)
 {
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
        struct nlattr *nest_parms;
-       unsigned int flags = pid ? NLM_F_MULTI : 0, event;
+       unsigned int flags = portid ? NLM_F_MULTI : 0, event;
 
        event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_NEW);
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
        if (nlh == NULL)
                goto nlmsg_failure;
 
@@ -604,7 +604,7 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
                goto errout;
 
        type |= NFNL_SUBSYS_CTNETLINK << 8;
-       nlh = nlmsg_put(skb, item->pid, 0, type, sizeof(*nfmsg), flags);
+       nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags);
        if (nlh == NULL)
                goto nlmsg_failure;
 
@@ -680,7 +680,7 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
        rcu_read_unlock();
 
        nlmsg_end(skb, nlh);
-       err = nfnetlink_send(skb, net, item->pid, group, item->report,
+       err = nfnetlink_send(skb, net, item->portid, group, item->report,
                             GFP_ATOMIC);
        if (err == -ENOBUFS || err == -EAGAIN)
                return -ENOBUFS;
@@ -757,7 +757,7 @@ restart:
 #endif
                        rcu_read_lock();
                        res =
-                       ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
+                       ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
                                            cb->nlh->nlmsg_seq,
                                            NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
                                            ct);
@@ -961,7 +961,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
        else {
                /* Flush the whole table */
                nf_conntrack_flush_report(net,
-                                        NETLINK_CB(skb).pid,
+                                        NETLINK_CB(skb).portid,
                                         nlmsg_report(nlh));
                return 0;
        }
@@ -985,7 +985,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
 
        if (del_timer(&ct->timeout)) {
                if (nf_conntrack_event_report(IPCT_DESTROY, ct,
-                                             NETLINK_CB(skb).pid,
+                                             NETLINK_CB(skb).portid,
                                              nlmsg_report(nlh)) < 0) {
                        nf_ct_delete_from_lists(ct);
                        /* we failed to report the event, try later */
@@ -1069,14 +1069,14 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
        }
 
        rcu_read_lock();
-       err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq,
+       err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
                                  NFNL_MSG_TYPE(nlh->nlmsg_type), ct);
        rcu_read_unlock();
        nf_ct_put(ct);
        if (err <= 0)
                goto free;
 
-       err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+       err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
        if (err < 0)
                goto out;
 
@@ -1096,13 +1096,14 @@ ctnetlink_parse_nat_setup(struct nf_conn *ct,
                          const struct nlattr *attr)
 {
        typeof(nfnetlink_parse_nat_setup_hook) parse_nat_setup;
+       int err;
 
        parse_nat_setup = rcu_dereference(nfnetlink_parse_nat_setup_hook);
        if (!parse_nat_setup) {
 #ifdef CONFIG_MODULES
                rcu_read_unlock();
                nfnl_unlock();
-               if (request_module("nf-nat-ipv4") < 0) {
+               if (request_module("nf-nat") < 0) {
                        nfnl_lock();
                        rcu_read_lock();
                        return -EOPNOTSUPP;
@@ -1115,7 +1116,23 @@ ctnetlink_parse_nat_setup(struct nf_conn *ct,
                return -EOPNOTSUPP;
        }
 
-       return parse_nat_setup(ct, manip, attr);
+       err = parse_nat_setup(ct, manip, attr);
+       if (err == -EAGAIN) {
+#ifdef CONFIG_MODULES
+               rcu_read_unlock();
+               nfnl_unlock();
+               if (request_module("nf-nat-%u", nf_ct_l3num(ct)) < 0) {
+                       nfnl_lock();
+                       rcu_read_lock();
+                       return -EOPNOTSUPP;
+               }
+               nfnl_lock();
+               rcu_read_lock();
+#else
+               err = -EOPNOTSUPP;
+#endif
+       }
+       return err;
 }
 #endif
 
@@ -1221,7 +1238,7 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
        if (help) {
                if (help->helper == helper) {
                        /* update private helper data if allowed. */
-                       if (helper->from_nlattr && helpinfo)
+                       if (helper->from_nlattr)
                                helper->from_nlattr(helpinfo, ct);
                        return 0;
                } else
@@ -1450,7 +1467,7 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
                                goto err2;
                        }
                        /* set private helper data if allowed. */
-                       if (helper->from_nlattr && helpinfo)
+                       if (helper->from_nlattr)
                                helper->from_nlattr(helpinfo, ct);
 
                        /* not in hash table yet so not strictly necessary */
@@ -1596,7 +1613,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
                                                      (1 << IPCT_PROTOINFO) |
                                                      (1 << IPCT_NATSEQADJ) |
                                                      (1 << IPCT_MARK) | events,
-                                                     ct, NETLINK_CB(skb).pid,
+                                                     ct, NETLINK_CB(skb).portid,
                                                      nlmsg_report(nlh));
                        nf_ct_put(ct);
                }
@@ -1618,7 +1635,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
                                                      (1 << IPCT_PROTOINFO) |
                                                      (1 << IPCT_NATSEQADJ) |
                                                      (1 << IPCT_MARK),
-                                                     ct, NETLINK_CB(skb).pid,
+                                                     ct, NETLINK_CB(skb).portid,
                                                      nlmsg_report(nlh));
                }
        }
@@ -1628,15 +1645,15 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
 }
 
 static int
-ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
+ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
                                __u16 cpu, const struct ip_conntrack_stat *st)
 {
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
-       unsigned int flags = pid ? NLM_F_MULTI : 0, event;
+       unsigned int flags = portid ? NLM_F_MULTI : 0, event;
 
        event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS_CPU);
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
        if (nlh == NULL)
                goto nlmsg_failure;
 
@@ -1688,7 +1705,7 @@ ctnetlink_ct_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
 
                st = per_cpu_ptr(net->ct.stat, cpu);
                if (ctnetlink_ct_stat_cpu_fill_info(skb,
-                                                   NETLINK_CB(cb->skb).pid,
+                                                   NETLINK_CB(cb->skb).portid,
                                                    cb->nlh->nlmsg_seq,
                                                    cpu, st) < 0)
                                break;
@@ -1714,16 +1731,16 @@ ctnetlink_stat_ct_cpu(struct sock *ctnl, struct sk_buff *skb,
 }
 
 static int
-ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
+ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
                            struct net *net)
 {
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
-       unsigned int flags = pid ? NLM_F_MULTI : 0, event;
+       unsigned int flags = portid ? NLM_F_MULTI : 0, event;
        unsigned int nr_conntracks = atomic_read(&net->ct.count);
 
        event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS);
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
        if (nlh == NULL)
                goto nlmsg_failure;
 
@@ -1756,14 +1773,14 @@ ctnetlink_stat_ct(struct sock *ctnl, struct sk_buff *skb,
        if (skb2 == NULL)
                return -ENOMEM;
 
-       err = ctnetlink_stat_ct_fill_info(skb2, NETLINK_CB(skb).pid,
+       err = ctnetlink_stat_ct_fill_info(skb2, NETLINK_CB(skb).portid,
                                          nlh->nlmsg_seq,
                                          NFNL_MSG_TYPE(nlh->nlmsg_type),
                                          sock_net(skb->sk));
        if (err <= 0)
                goto free;
 
-       err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+       err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
        if (err < 0)
                goto out;
 
@@ -1979,6 +1996,8 @@ nla_put_failure:
        return -1;
 }
 
+static const union nf_inet_addr any_addr;
+
 static int
 ctnetlink_exp_dump_expect(struct sk_buff *skb,
                          const struct nf_conntrack_expect *exp)
@@ -2005,7 +2024,8 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
                goto nla_put_failure;
 
 #ifdef CONFIG_NF_NAT_NEEDED
-       if (exp->saved_ip || exp->saved_proto.all) {
+       if (!nf_inet_addr_cmp(&exp->saved_addr, &any_addr) ||
+           exp->saved_proto.all) {
                nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT | NLA_F_NESTED);
                if (!nest_parms)
                        goto nla_put_failure;
@@ -2014,7 +2034,7 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
                        goto nla_put_failure;
 
                nat_tuple.src.l3num = nf_ct_l3num(master);
-               nat_tuple.src.u3.ip = exp->saved_ip;
+               nat_tuple.src.u3 = exp->saved_addr;
                nat_tuple.dst.protonum = nf_ct_protonum(master);
                nat_tuple.src.u = exp->saved_proto;
 
@@ -2050,15 +2070,15 @@ nla_put_failure:
 }
 
 static int
-ctnetlink_exp_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
+ctnetlink_exp_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
                        int event, const struct nf_conntrack_expect *exp)
 {
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
-       unsigned int flags = pid ? NLM_F_MULTI : 0;
+       unsigned int flags = portid ? NLM_F_MULTI : 0;
 
        event |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
        if (nlh == NULL)
                goto nlmsg_failure;
 
@@ -2109,7 +2129,7 @@ ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
                goto errout;
 
        type |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
-       nlh = nlmsg_put(skb, item->pid, 0, type, sizeof(*nfmsg), flags);
+       nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags);
        if (nlh == NULL)
                goto nlmsg_failure;
 
@@ -2124,7 +2144,7 @@ ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
        rcu_read_unlock();
 
        nlmsg_end(skb, nlh);
-       nfnetlink_send(skb, net, item->pid, group, item->report, GFP_ATOMIC);
+       nfnetlink_send(skb, net, item->portid, group, item->report, GFP_ATOMIC);
        return 0;
 
 nla_put_failure:
@@ -2167,7 +2187,7 @@ restart:
                                cb->args[1] = 0;
                        }
                        if (ctnetlink_exp_fill_info(skb,
-                                                   NETLINK_CB(cb->skb).pid,
+                                                   NETLINK_CB(cb->skb).portid,
                                                    cb->nlh->nlmsg_seq,
                                                    IPCTNL_MSG_EXP_NEW,
                                                    exp) < 0) {
@@ -2260,14 +2280,14 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
        }
 
        rcu_read_lock();
-       err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid,
+       err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).portid,
                                      nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp);
        rcu_read_unlock();
        nf_ct_expect_put(exp);
        if (err <= 0)
                goto free;
 
-       err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+       err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
        if (err < 0)
                goto out;
 
@@ -2321,7 +2341,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
                /* after list removal, usage count == 1 */
                spin_lock_bh(&nf_conntrack_lock);
                if (del_timer(&exp->timeout)) {
-                       nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).pid,
+                       nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).portid,
                                                   nlmsg_report(nlh));
                        nf_ct_expect_put(exp);
                }
@@ -2343,7 +2363,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
                                if (!strcmp(m_help->helper->name, name) &&
                                    del_timer(&exp->timeout)) {
                                        nf_ct_unlink_expect_report(exp,
-                                                       NETLINK_CB(skb).pid,
+                                                       NETLINK_CB(skb).portid,
                                                        nlmsg_report(nlh));
                                        nf_ct_expect_put(exp);
                                }
@@ -2359,7 +2379,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
                                                  hnode) {
                                if (del_timer(&exp->timeout)) {
                                        nf_ct_unlink_expect_report(exp,
-                                                       NETLINK_CB(skb).pid,
+                                                       NETLINK_CB(skb).portid,
                                                        nlmsg_report(nlh));
                                        nf_ct_expect_put(exp);
                                }
@@ -2410,7 +2430,7 @@ ctnetlink_parse_expect_nat(const struct nlattr *attr,
        if (err < 0)
                return err;
 
-       exp->saved_ip = nat_tuple.src.u3.ip;
+       exp->saved_addr = nat_tuple.src.u3;
        exp->saved_proto = nat_tuple.src.u;
        exp->dir = ntohl(nla_get_be32(tb[CTA_EXPECT_NAT_DIR]));
 
@@ -2424,7 +2444,7 @@ static int
 ctnetlink_create_expect(struct net *net, u16 zone,
                        const struct nlattr * const cda[],
                        u_int8_t u3,
-                       u32 pid, int report)
+                       u32 portid, int report)
 {
        struct nf_conntrack_tuple tuple, mask, master_tuple;
        struct nf_conntrack_tuple_hash *h = NULL;
@@ -2537,7 +2557,7 @@ ctnetlink_create_expect(struct net *net, u16 zone,
                if (err < 0)
                        goto err_out;
        }
-       err = nf_ct_expect_related_report(exp, pid, report);
+       err = nf_ct_expect_related_report(exp, portid, report);
 err_out:
        nf_ct_expect_put(exp);
 out:
@@ -2580,7 +2600,7 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
                if (nlh->nlmsg_flags & NLM_F_CREATE) {
                        err = ctnetlink_create_expect(net, zone, cda,
                                                      u3,
-                                                     NETLINK_CB(skb).pid,
+                                                     NETLINK_CB(skb).portid,
                                                      nlmsg_report(nlh));
                }
                return err;
@@ -2595,15 +2615,15 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
 }
 
 static int
-ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int cpu,
+ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int cpu,
                             const struct ip_conntrack_stat *st)
 {
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
-       unsigned int flags = pid ? NLM_F_MULTI : 0, event;
+       unsigned int flags = portid ? NLM_F_MULTI : 0, event;
 
        event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_EXP_GET_STATS_CPU);
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
        if (nlh == NULL)
                goto nlmsg_failure;
 
@@ -2642,7 +2662,7 @@ ctnetlink_exp_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
                        continue;
 
                st = per_cpu_ptr(net->ct.stat, cpu);
-               if (ctnetlink_exp_stat_fill_info(skb, NETLINK_CB(cb->skb).pid,
+               if (ctnetlink_exp_stat_fill_info(skb, NETLINK_CB(cb->skb).portid,
                                                 cb->nlh->nlmsg_seq,
                                                 cpu, st) < 0)
                        break;
index 6fed9ec35248ba2c000264164c7860851655a904..cc7669ef0b95d1d7a0dd6f2a03d5e59d9687689d 100644 (file)
@@ -45,14 +45,14 @@ static DEFINE_SPINLOCK(nf_pptp_lock);
 int
 (*nf_nat_pptp_hook_outbound)(struct sk_buff *skb,
                             struct nf_conn *ct, enum ip_conntrack_info ctinfo,
-                            struct PptpControlHeader *ctlh,
+                            unsigned int protoff, struct PptpControlHeader *ctlh,
                             union pptp_ctrl_union *pptpReq) __read_mostly;
 EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_outbound);
 
 int
 (*nf_nat_pptp_hook_inbound)(struct sk_buff *skb,
                            struct nf_conn *ct, enum ip_conntrack_info ctinfo,
-                           struct PptpControlHeader *ctlh,
+                           unsigned int protoff, struct PptpControlHeader *ctlh,
                            union pptp_ctrl_union *pptpReq) __read_mostly;
 EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_inbound);
 
@@ -262,7 +262,7 @@ out_unexpect_orig:
 }
 
 static inline int
-pptp_inbound_pkt(struct sk_buff *skb,
+pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
                 struct PptpControlHeader *ctlh,
                 union pptp_ctrl_union *pptpReq,
                 unsigned int reqlen,
@@ -376,7 +376,8 @@ pptp_inbound_pkt(struct sk_buff *skb,
 
        nf_nat_pptp_inbound = rcu_dereference(nf_nat_pptp_hook_inbound);
        if (nf_nat_pptp_inbound && ct->status & IPS_NAT_MASK)
-               return nf_nat_pptp_inbound(skb, ct, ctinfo, ctlh, pptpReq);
+               return nf_nat_pptp_inbound(skb, ct, ctinfo,
+                                          protoff, ctlh, pptpReq);
        return NF_ACCEPT;
 
 invalid:
@@ -389,7 +390,7 @@ invalid:
 }
 
 static inline int
-pptp_outbound_pkt(struct sk_buff *skb,
+pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff,
                  struct PptpControlHeader *ctlh,
                  union pptp_ctrl_union *pptpReq,
                  unsigned int reqlen,
@@ -471,7 +472,8 @@ pptp_outbound_pkt(struct sk_buff *skb,
 
        nf_nat_pptp_outbound = rcu_dereference(nf_nat_pptp_hook_outbound);
        if (nf_nat_pptp_outbound && ct->status & IPS_NAT_MASK)
-               return nf_nat_pptp_outbound(skb, ct, ctinfo, ctlh, pptpReq);
+               return nf_nat_pptp_outbound(skb, ct, ctinfo,
+                                           protoff, ctlh, pptpReq);
        return NF_ACCEPT;
 
 invalid:
@@ -570,11 +572,11 @@ conntrack_pptp_help(struct sk_buff *skb, unsigned int protoff,
         * established from PNS->PAC.  However, RFC makes no guarantee */
        if (dir == IP_CT_DIR_ORIGINAL)
                /* client -> server (PNS -> PAC) */
-               ret = pptp_outbound_pkt(skb, ctlh, pptpReq, reqlen, ct,
+               ret = pptp_outbound_pkt(skb, protoff, ctlh, pptpReq, reqlen, ct,
                                        ctinfo);
        else
                /* server -> client (PAC -> PNS) */
-               ret = pptp_inbound_pkt(skb, ctlh, pptpReq, reqlen, ct,
+               ret = pptp_inbound_pkt(skb, protoff, ctlh, pptpReq, reqlen, ct,
                                       ctinfo);
        pr_debug("sstate: %d->%d, cstate: %d->%d\n",
                 oldsstate, info->sstate, oldcstate, info->cstate);
index 0dc63854390f70f738b85df4c41bbbd2a9f314da..51e928db48c846f469da93ed70ed072807f3359f 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/notifier.h>
 #include <linux/kernel.h>
 #include <linux/netdevice.h>
-#include <linux/rtnetlink.h>
 
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_l3proto.h>
@@ -294,9 +293,7 @@ void nf_conntrack_l3proto_unregister(struct net *net,
        nf_ct_l3proto_unregister_sysctl(net, proto);
 
        /* Remove all contrack entries for this protocol */
-       rtnl_lock();
        nf_ct_iterate_cleanup(net, kill_l3proto, proto);
-       rtnl_unlock();
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_unregister);
 
@@ -502,9 +499,7 @@ void nf_conntrack_l4proto_unregister(struct net *net,
        nf_ct_l4proto_unregister_sysctl(net, pn, l4proto);
 
        /* Remove all contrack entries for this protocol */
-       rtnl_lock();
        nf_ct_iterate_cleanup(net, kill_l4proto, l4proto);
-       rtnl_unlock();
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister);
 
index e046b3756aab755080d3edced132c459b7c8d4c4..61f9285111d19ae5b34c59b5d8f98366d0645086 100644 (file)
@@ -502,10 +502,10 @@ static inline s16 nat_offset(const struct nf_conn *ct,
 
        return get_offset != NULL ? get_offset(ct, dir, seq) : 0;
 }
-#define NAT_OFFSET(pf, ct, dir, seq) \
-       (pf == NFPROTO_IPV4 ? nat_offset(ct, dir, seq) : 0)
+#define NAT_OFFSET(ct, dir, seq) \
+       (nat_offset(ct, dir, seq))
 #else
-#define NAT_OFFSET(pf, ct, dir, seq)   0
+#define NAT_OFFSET(ct, dir, seq)       0
 #endif
 
 static bool tcp_in_window(const struct nf_conn *ct,
@@ -538,7 +538,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
                tcp_sack(skb, dataoff, tcph, &sack);
 
        /* Take into account NAT sequence number mangling */
-       receiver_offset = NAT_OFFSET(pf, ct, !dir, ack - 1);
+       receiver_offset = NAT_OFFSET(ct, !dir, ack - 1);
        ack -= receiver_offset;
        sack -= receiver_offset;
 
index 5c0a112aeee6adc580f0303b50e6821d12e8839e..df8f4f284481042800b3da96ab41bf3589ef512e 100644 (file)
@@ -52,15 +52,17 @@ module_param(sip_direct_media, int, 0600);
 MODULE_PARM_DESC(sip_direct_media, "Expect Media streams between signalling "
                                   "endpoints only (default 1)");
 
-unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb, unsigned int dataoff,
-                               const char **dptr,
+unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb, unsigned int protoff,
+                               unsigned int dataoff, const char **dptr,
                                unsigned int *datalen) __read_mostly;
 EXPORT_SYMBOL_GPL(nf_nat_sip_hook);
 
-void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb, s16 off) __read_mostly;
+void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb, unsigned int protoff,
+                                  s16 off) __read_mostly;
 EXPORT_SYMBOL_GPL(nf_nat_sip_seq_adjust_hook);
 
 unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
+                                      unsigned int protoff,
                                       unsigned int dataoff,
                                       const char **dptr,
                                       unsigned int *datalen,
@@ -69,7 +71,8 @@ unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
                                       unsigned int matchlen) __read_mostly;
 EXPORT_SYMBOL_GPL(nf_nat_sip_expect_hook);
 
-unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, unsigned int dataoff,
+unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, unsigned int protoff,
+                                    unsigned int dataoff,
                                     const char **dptr,
                                     unsigned int *datalen,
                                     unsigned int sdpoff,
@@ -79,7 +82,8 @@ unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, unsigned int dataoff,
                                     __read_mostly;
 EXPORT_SYMBOL_GPL(nf_nat_sdp_addr_hook);
 
-unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, unsigned int dataoff,
+unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, unsigned int protoff,
+                                    unsigned int dataoff,
                                     const char **dptr,
                                     unsigned int *datalen,
                                     unsigned int matchoff,
@@ -88,6 +92,7 @@ unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, unsigned int dataoff,
 EXPORT_SYMBOL_GPL(nf_nat_sdp_port_hook);
 
 unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb,
+                                       unsigned int protoff,
                                        unsigned int dataoff,
                                        const char **dptr,
                                        unsigned int *datalen,
@@ -96,7 +101,8 @@ unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb,
                                        __read_mostly;
 EXPORT_SYMBOL_GPL(nf_nat_sdp_session_hook);
 
-unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb, unsigned int dataoff,
+unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb, unsigned int protoff,
+                                     unsigned int dataoff,
                                      const char **dptr,
                                      unsigned int *datalen,
                                      struct nf_conntrack_expect *rtp_exp,
@@ -737,13 +743,18 @@ static int sdp_addr_len(const struct nf_conn *ct, const char *dptr,
  * be tolerant and also accept records terminated with a single newline
  * character". We handle both cases.
  */
-static const struct sip_header ct_sdp_hdrs[] = {
-       [SDP_HDR_VERSION]               = SDP_HDR("v=", NULL, digits_len),
-       [SDP_HDR_OWNER_IP4]             = SDP_HDR("o=", "IN IP4 ", sdp_addr_len),
-       [SDP_HDR_CONNECTION_IP4]        = SDP_HDR("c=", "IN IP4 ", sdp_addr_len),
-       [SDP_HDR_OWNER_IP6]             = SDP_HDR("o=", "IN IP6 ", sdp_addr_len),
-       [SDP_HDR_CONNECTION_IP6]        = SDP_HDR("c=", "IN IP6 ", sdp_addr_len),
-       [SDP_HDR_MEDIA]                 = SDP_HDR("m=", NULL, media_len),
+static const struct sip_header ct_sdp_hdrs_v4[] = {
+       [SDP_HDR_VERSION]       = SDP_HDR("v=", NULL, digits_len),
+       [SDP_HDR_OWNER]         = SDP_HDR("o=", "IN IP4 ", sdp_addr_len),
+       [SDP_HDR_CONNECTION]    = SDP_HDR("c=", "IN IP4 ", sdp_addr_len),
+       [SDP_HDR_MEDIA]         = SDP_HDR("m=", NULL, media_len),
+};
+
+static const struct sip_header ct_sdp_hdrs_v6[] = {
+       [SDP_HDR_VERSION]       = SDP_HDR("v=", NULL, digits_len),
+       [SDP_HDR_OWNER]         = SDP_HDR("o=", "IN IP6 ", sdp_addr_len),
+       [SDP_HDR_CONNECTION]    = SDP_HDR("c=", "IN IP6 ", sdp_addr_len),
+       [SDP_HDR_MEDIA]         = SDP_HDR("m=", NULL, media_len),
 };
 
 /* Linear string search within SDP header values */
@@ -769,11 +780,14 @@ int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr,
                          enum sdp_header_types term,
                          unsigned int *matchoff, unsigned int *matchlen)
 {
-       const struct sip_header *hdr = &ct_sdp_hdrs[type];
-       const struct sip_header *thdr = &ct_sdp_hdrs[term];
+       const struct sip_header *hdrs, *hdr, *thdr;
        const char *start = dptr, *limit = dptr + datalen;
        int shift = 0;
 
+       hdrs = nf_ct_l3num(ct) == NFPROTO_IPV4 ? ct_sdp_hdrs_v4 : ct_sdp_hdrs_v6;
+       hdr = &hdrs[type];
+       thdr = &hdrs[term];
+
        for (dptr += dataoff; dptr < limit; dptr++) {
                /* Find beginning of line */
                if (*dptr != '\r' && *dptr != '\n')
@@ -883,7 +897,8 @@ static void flush_expectations(struct nf_conn *ct, bool media)
        spin_unlock_bh(&nf_conntrack_lock);
 }
 
-static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int dataoff,
+static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
+                                unsigned int dataoff,
                                 const char **dptr, unsigned int *datalen,
                                 union nf_inet_addr *daddr, __be16 port,
                                 enum sip_expectation_classes class,
@@ -939,12 +954,12 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int dataoff,
                    exp->class != class)
                        break;
 #ifdef CONFIG_NF_NAT_NEEDED
-               if (exp->tuple.src.l3num == AF_INET && !direct_rtp &&
-                   (exp->saved_ip != exp->tuple.dst.u3.ip ||
+               if (!direct_rtp &&
+                   (!nf_inet_addr_cmp(&exp->saved_addr, &exp->tuple.dst.u3) ||
                     exp->saved_proto.udp.port != exp->tuple.dst.u.udp.port) &&
                    ct->status & IPS_NAT_MASK) {
-                       daddr->ip               = exp->saved_ip;
-                       tuple.dst.u3.ip         = exp->saved_ip;
+                       *daddr                  = exp->saved_addr;
+                       tuple.dst.u3            = exp->saved_addr;
                        tuple.dst.u.udp.port    = exp->saved_proto.udp.port;
                        direct_rtp = 1;
                } else
@@ -960,7 +975,7 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int dataoff,
        if (direct_rtp) {
                nf_nat_sdp_port = rcu_dereference(nf_nat_sdp_port_hook);
                if (nf_nat_sdp_port &&
-                   !nf_nat_sdp_port(skb, dataoff, dptr, datalen,
+                   !nf_nat_sdp_port(skb, protoff, dataoff, dptr, datalen,
                                     mediaoff, medialen, ntohs(rtp_port)))
                        goto err1;
        }
@@ -982,7 +997,7 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int dataoff,
 
        nf_nat_sdp_media = rcu_dereference(nf_nat_sdp_media_hook);
        if (nf_nat_sdp_media && ct->status & IPS_NAT_MASK && !direct_rtp)
-               ret = nf_nat_sdp_media(skb, dataoff, dptr, datalen,
+               ret = nf_nat_sdp_media(skb, protoff, dataoff, dptr, datalen,
                                       rtp_exp, rtcp_exp,
                                       mediaoff, medialen, daddr);
        else {
@@ -1023,7 +1038,8 @@ static const struct sdp_media_type *sdp_media_type(const char *dptr,
        return NULL;
 }
 
-static int process_sdp(struct sk_buff *skb, unsigned int dataoff,
+static int process_sdp(struct sk_buff *skb, unsigned int protoff,
+                      unsigned int dataoff,
                       const char **dptr, unsigned int *datalen,
                       unsigned int cseq)
 {
@@ -1036,15 +1052,12 @@ static int process_sdp(struct sk_buff *skb, unsigned int dataoff,
        unsigned int i;
        union nf_inet_addr caddr, maddr, rtp_addr;
        unsigned int port;
-       enum sdp_header_types c_hdr;
        const struct sdp_media_type *t;
        int ret = NF_ACCEPT;
        typeof(nf_nat_sdp_addr_hook) nf_nat_sdp_addr;
        typeof(nf_nat_sdp_session_hook) nf_nat_sdp_session;
 
        nf_nat_sdp_addr = rcu_dereference(nf_nat_sdp_addr_hook);
-       c_hdr = nf_ct_l3num(ct) == AF_INET ? SDP_HDR_CONNECTION_IP4 :
-                                            SDP_HDR_CONNECTION_IP6;
 
        /* Find beginning of session description */
        if (ct_sip_get_sdp_header(ct, *dptr, 0, *datalen,
@@ -1058,7 +1071,7 @@ static int process_sdp(struct sk_buff *skb, unsigned int dataoff,
         * the end of the session description. */
        caddr_len = 0;
        if (ct_sip_parse_sdp_addr(ct, *dptr, sdpoff, *datalen,
-                                 c_hdr, SDP_HDR_MEDIA,
+                                 SDP_HDR_CONNECTION, SDP_HDR_MEDIA,
                                  &matchoff, &matchlen, &caddr) > 0)
                caddr_len = matchlen;
 
@@ -1088,7 +1101,7 @@ static int process_sdp(struct sk_buff *skb, unsigned int dataoff,
                /* The media description overrides the session description. */
                maddr_len = 0;
                if (ct_sip_parse_sdp_addr(ct, *dptr, mediaoff, *datalen,
-                                         c_hdr, SDP_HDR_MEDIA,
+                                         SDP_HDR_CONNECTION, SDP_HDR_MEDIA,
                                          &matchoff, &matchlen, &maddr) > 0) {
                        maddr_len = matchlen;
                        memcpy(&rtp_addr, &maddr, sizeof(rtp_addr));
@@ -1097,7 +1110,8 @@ static int process_sdp(struct sk_buff *skb, unsigned int dataoff,
                else
                        return NF_DROP;
 
-               ret = set_expected_rtp_rtcp(skb, dataoff, dptr, datalen,
+               ret = set_expected_rtp_rtcp(skb, protoff, dataoff,
+                                           dptr, datalen,
                                            &rtp_addr, htons(port), t->class,
                                            mediaoff, medialen);
                if (ret != NF_ACCEPT)
@@ -1105,8 +1119,9 @@ static int process_sdp(struct sk_buff *skb, unsigned int dataoff,
 
                /* Update media connection address if present */
                if (maddr_len && nf_nat_sdp_addr && ct->status & IPS_NAT_MASK) {
-                       ret = nf_nat_sdp_addr(skb, dataoff, dptr, datalen,
-                                             mediaoff, c_hdr, SDP_HDR_MEDIA,
+                       ret = nf_nat_sdp_addr(skb, protoff, dataoff,
+                                             dptr, datalen, mediaoff,
+                                             SDP_HDR_CONNECTION, SDP_HDR_MEDIA,
                                              &rtp_addr);
                        if (ret != NF_ACCEPT)
                                return ret;
@@ -1117,12 +1132,13 @@ static int process_sdp(struct sk_buff *skb, unsigned int dataoff,
        /* Update session connection and owner addresses */
        nf_nat_sdp_session = rcu_dereference(nf_nat_sdp_session_hook);
        if (nf_nat_sdp_session && ct->status & IPS_NAT_MASK)
-               ret = nf_nat_sdp_session(skb, dataoff, dptr, datalen, sdpoff,
-                                        &rtp_addr);
+               ret = nf_nat_sdp_session(skb, protoff, dataoff,
+                                        dptr, datalen, sdpoff, &rtp_addr);
 
        return ret;
 }
-static int process_invite_response(struct sk_buff *skb, unsigned int dataoff,
+static int process_invite_response(struct sk_buff *skb, unsigned int protoff,
+                                  unsigned int dataoff,
                                   const char **dptr, unsigned int *datalen,
                                   unsigned int cseq, unsigned int code)
 {
@@ -1132,13 +1148,14 @@ static int process_invite_response(struct sk_buff *skb, unsigned int dataoff,
 
        if ((code >= 100 && code <= 199) ||
            (code >= 200 && code <= 299))
-               return process_sdp(skb, dataoff, dptr, datalen, cseq);
+               return process_sdp(skb, protoff, dataoff, dptr, datalen, cseq);
        else if (ct_sip_info->invite_cseq == cseq)
                flush_expectations(ct, true);
        return NF_ACCEPT;
 }
 
-static int process_update_response(struct sk_buff *skb, unsigned int dataoff,
+static int process_update_response(struct sk_buff *skb, unsigned int protoff,
+                                  unsigned int dataoff,
                                   const char **dptr, unsigned int *datalen,
                                   unsigned int cseq, unsigned int code)
 {
@@ -1148,13 +1165,14 @@ static int process_update_response(struct sk_buff *skb, unsigned int dataoff,
 
        if ((code >= 100 && code <= 199) ||
            (code >= 200 && code <= 299))
-               return process_sdp(skb, dataoff, dptr, datalen, cseq);
+               return process_sdp(skb, protoff, dataoff, dptr, datalen, cseq);
        else if (ct_sip_info->invite_cseq == cseq)
                flush_expectations(ct, true);
        return NF_ACCEPT;
 }
 
-static int process_prack_response(struct sk_buff *skb, unsigned int dataoff,
+static int process_prack_response(struct sk_buff *skb, unsigned int protoff,
+                                 unsigned int dataoff,
                                  const char **dptr, unsigned int *datalen,
                                  unsigned int cseq, unsigned int code)
 {
@@ -1164,13 +1182,14 @@ static int process_prack_response(struct sk_buff *skb, unsigned int dataoff,
 
        if ((code >= 100 && code <= 199) ||
            (code >= 200 && code <= 299))
-               return process_sdp(skb, dataoff, dptr, datalen, cseq);
+               return process_sdp(skb, protoff, dataoff, dptr, datalen, cseq);
        else if (ct_sip_info->invite_cseq == cseq)
                flush_expectations(ct, true);
        return NF_ACCEPT;
 }
 
-static int process_invite_request(struct sk_buff *skb, unsigned int dataoff,
+static int process_invite_request(struct sk_buff *skb, unsigned int protoff,
+                                 unsigned int dataoff,
                                  const char **dptr, unsigned int *datalen,
                                  unsigned int cseq)
 {
@@ -1180,13 +1199,14 @@ static int process_invite_request(struct sk_buff *skb, unsigned int dataoff,
        unsigned int ret;
 
        flush_expectations(ct, true);
-       ret = process_sdp(skb, dataoff, dptr, datalen, cseq);
+       ret = process_sdp(skb, protoff, dataoff, dptr, datalen, cseq);
        if (ret == NF_ACCEPT)
                ct_sip_info->invite_cseq = cseq;
        return ret;
 }
 
-static int process_bye_request(struct sk_buff *skb, unsigned int dataoff,
+static int process_bye_request(struct sk_buff *skb, unsigned int protoff,
+                              unsigned int dataoff,
                               const char **dptr, unsigned int *datalen,
                               unsigned int cseq)
 {
@@ -1201,7 +1221,8 @@ static int process_bye_request(struct sk_buff *skb, unsigned int dataoff,
  * signalling connections. The expectation is marked inactive and is activated
  * when receiving a response indicating success from the registrar.
  */
-static int process_register_request(struct sk_buff *skb, unsigned int dataoff,
+static int process_register_request(struct sk_buff *skb, unsigned int protoff,
+                                   unsigned int dataoff,
                                    const char **dptr, unsigned int *datalen,
                                    unsigned int cseq)
 {
@@ -1276,8 +1297,8 @@ static int process_register_request(struct sk_buff *skb, unsigned int dataoff,
 
        nf_nat_sip_expect = rcu_dereference(nf_nat_sip_expect_hook);
        if (nf_nat_sip_expect && ct->status & IPS_NAT_MASK)
-               ret = nf_nat_sip_expect(skb, dataoff, dptr, datalen, exp,
-                                       matchoff, matchlen);
+               ret = nf_nat_sip_expect(skb, protoff, dataoff, dptr, datalen,
+                                       exp, matchoff, matchlen);
        else {
                if (nf_ct_expect_related(exp) != 0)
                        ret = NF_DROP;
@@ -1292,7 +1313,8 @@ store_cseq:
        return ret;
 }
 
-static int process_register_response(struct sk_buff *skb, unsigned int dataoff,
+static int process_register_response(struct sk_buff *skb, unsigned int protoff,
+                                    unsigned int dataoff,
                                     const char **dptr, unsigned int *datalen,
                                     unsigned int cseq, unsigned int code)
 {
@@ -1374,7 +1396,8 @@ static const struct sip_handler sip_handlers[] = {
        SIP_HANDLER("REGISTER", process_register_request, process_register_response),
 };
 
-static int process_sip_response(struct sk_buff *skb, unsigned int dataoff,
+static int process_sip_response(struct sk_buff *skb, unsigned int protoff,
+                               unsigned int dataoff,
                                const char **dptr, unsigned int *datalen)
 {
        enum ip_conntrack_info ctinfo;
@@ -1405,13 +1428,14 @@ static int process_sip_response(struct sk_buff *skb, unsigned int dataoff,
                if (*datalen < matchend + handler->len ||
                    strnicmp(*dptr + matchend, handler->method, handler->len))
                        continue;
-               return handler->response(skb, dataoff, dptr, datalen,
+               return handler->response(skb, protoff, dataoff, dptr, datalen,
                                         cseq, code);
        }
        return NF_ACCEPT;
 }
 
-static int process_sip_request(struct sk_buff *skb, unsigned int dataoff,
+static int process_sip_request(struct sk_buff *skb, unsigned int protoff,
+                              unsigned int dataoff,
                               const char **dptr, unsigned int *datalen)
 {
        enum ip_conntrack_info ctinfo;
@@ -1436,26 +1460,28 @@ static int process_sip_request(struct sk_buff *skb, unsigned int dataoff,
                if (!cseq)
                        return NF_DROP;
 
-               return handler->request(skb, dataoff, dptr, datalen, cseq);
+               return handler->request(skb, protoff, dataoff, dptr, datalen,
+                                       cseq);
        }
        return NF_ACCEPT;
 }
 
 static int process_sip_msg(struct sk_buff *skb, struct nf_conn *ct,
-                          unsigned int dataoff, const char **dptr,
-                          unsigned int *datalen)
+                          unsigned int protoff, unsigned int dataoff,
+                          const char **dptr, unsigned int *datalen)
 {
        typeof(nf_nat_sip_hook) nf_nat_sip;
        int ret;
 
        if (strnicmp(*dptr, "SIP/2.0 ", strlen("SIP/2.0 ")) != 0)
-               ret = process_sip_request(skb, dataoff, dptr, datalen);
+               ret = process_sip_request(skb, protoff, dataoff, dptr, datalen);
        else
-               ret = process_sip_response(skb, dataoff, dptr, datalen);
+               ret = process_sip_response(skb, protoff, dataoff, dptr, datalen);
 
        if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
                nf_nat_sip = rcu_dereference(nf_nat_sip_hook);
-               if (nf_nat_sip && !nf_nat_sip(skb, dataoff, dptr, datalen))
+               if (nf_nat_sip && !nf_nat_sip(skb, protoff, dataoff,
+                                             dptr, datalen))
                        ret = NF_DROP;
        }
 
@@ -1523,7 +1549,8 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
                if (msglen > datalen)
                        return NF_DROP;
 
-               ret = process_sip_msg(skb, ct, dataoff, &dptr, &msglen);
+               ret = process_sip_msg(skb, ct, protoff, dataoff,
+                                     &dptr, &msglen);
                if (ret != NF_ACCEPT)
                        break;
                diff     = msglen - origlen;
@@ -1537,7 +1564,7 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
        if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
                nf_nat_sip_seq_adjust = rcu_dereference(nf_nat_sip_seq_adjust_hook);
                if (nf_nat_sip_seq_adjust)
-                       nf_nat_sip_seq_adjust(skb, tdiff);
+                       nf_nat_sip_seq_adjust(skb, protoff, tdiff);
        }
 
        return ret;
@@ -1564,7 +1591,7 @@ static int sip_help_udp(struct sk_buff *skb, unsigned int protoff,
        if (datalen < strlen("SIP/2.0 200"))
                return NF_ACCEPT;
 
-       return process_sip_msg(skb, ct, dataoff, &dptr, &datalen);
+       return process_sip_msg(skb, ct, protoff, dataoff, &dptr, &datalen);
 }
 
 static struct nf_conntrack_helper sip[MAX_PORTS][4] __read_mostly;
index 770f76432ad02b89904646d80ed7c03571bccc16..3deec997be89e32770750abc504922761d096935 100644 (file)
@@ -18,13 +18,13 @@ extern unsigned int nf_iterate(struct list_head *head,
                                unsigned int hook,
                                const struct net_device *indev,
                                const struct net_device *outdev,
-                               struct list_head **i,
+                               struct nf_hook_ops **elemp,
                                int (*okfn)(struct sk_buff *),
                                int hook_thresh);
 
 /* nf_queue.c */
 extern int nf_queue(struct sk_buff *skb,
-                   struct list_head *elem,
+                   struct nf_hook_ops *elem,
                    u_int8_t pf, unsigned int hook,
                    struct net_device *indev,
                    struct net_device *outdev,
similarity index 96%
rename from net/ipv4/netfilter/nf_nat_amanda.c
rename to net/netfilter/nf_nat_amanda.c
index 3c04d24e2976c69972fd582e1960585b75b36582..42d337881171aa3ecf6933a6fdcae57620b51d5a 100644 (file)
@@ -16,7 +16,6 @@
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_expect.h>
 #include <net/netfilter/nf_nat_helper.h>
-#include <net/netfilter/nf_nat_rule.h>
 #include <linux/netfilter/nf_conntrack_amanda.h>
 
 MODULE_AUTHOR("Brian J. Murrell <netfilter@interlinx.bc.ca>");
@@ -26,6 +25,7 @@ MODULE_ALIAS("ip_nat_amanda");
 
 static unsigned int help(struct sk_buff *skb,
                         enum ip_conntrack_info ctinfo,
+                        unsigned int protoff,
                         unsigned int matchoff,
                         unsigned int matchlen,
                         struct nf_conntrack_expect *exp)
@@ -61,7 +61,7 @@ static unsigned int help(struct sk_buff *skb,
 
        sprintf(buffer, "%u", port);
        ret = nf_nat_mangle_udp_packet(skb, exp->master, ctinfo,
-                                      matchoff, matchlen,
+                                      protoff, matchoff, matchlen,
                                       buffer, strlen(buffer));
        if (ret != NF_ACCEPT)
                nf_ct_unexpect_related(exp);
similarity index 51%
rename from net/ipv4/netfilter/nf_nat_core.c
rename to net/netfilter/nf_nat_core.c
index 44b082fd48abd214b4365a964b44076c164c7148..5f2f9109f4615e3dc1fa510c5136d10bfa20fba8 100644 (file)
@@ -1,7 +1,7 @@
-/* NAT for netfilter; shared with compatibility layer. */
-
-/* (C) 1999-2001 Paul `Rusty' Russell
+/*
+ * (C) 1999-2001 Paul `Rusty' Russell
  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ * (C) 2011 Patrick McHardy <kaber@trash.net>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
 #include <linux/timer.h>
 #include <linux/skbuff.h>
 #include <linux/gfp.h>
-#include <net/checksum.h>
-#include <net/icmp.h>
-#include <net/ip.h>
-#include <net/tcp.h>  /* For tcp_prot in getorigdst */
-#include <linux/icmp.h>
-#include <linux/udp.h>
+#include <net/xfrm.h>
 #include <linux/jhash.h>
+#include <linux/rtnetlink.h>
 
-#include <linux/netfilter_ipv4.h>
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_core.h>
 #include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_protocol.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/netfilter/nf_nat_l4proto.h>
 #include <net/netfilter/nf_nat_core.h>
 #include <net/netfilter/nf_nat_helper.h>
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_l3proto.h>
 #include <net/netfilter/nf_conntrack_zones.h>
+#include <linux/netfilter/nf_nat.h>
 
 static DEFINE_SPINLOCK(nf_nat_lock);
 
-static struct nf_conntrack_l3proto *l3proto __read_mostly;
-
-#define MAX_IP_NAT_PROTO 256
-static const struct nf_nat_protocol __rcu *nf_nat_protos[MAX_IP_NAT_PROTO]
+static DEFINE_MUTEX(nf_nat_proto_mutex);
+static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO]
+                                               __read_mostly;
+static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO]
                                                __read_mostly;
 
-static inline const struct nf_nat_protocol *
-__nf_nat_proto_find(u_int8_t protonum)
+
+inline const struct nf_nat_l3proto *
+__nf_nat_l3proto_find(u8 family)
+{
+       return rcu_dereference(nf_nat_l3protos[family]);
+}
+
+inline const struct nf_nat_l4proto *
+__nf_nat_l4proto_find(u8 family, u8 protonum)
+{
+       return rcu_dereference(nf_nat_l4protos[family][protonum]);
+}
+EXPORT_SYMBOL_GPL(__nf_nat_l4proto_find);
+
+#ifdef CONFIG_XFRM
+static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl)
+{
+       const struct nf_nat_l3proto *l3proto;
+       const struct nf_conn *ct;
+       enum ip_conntrack_info ctinfo;
+       enum ip_conntrack_dir dir;
+       unsigned  long statusbit;
+       u8 family;
+
+       ct = nf_ct_get(skb, &ctinfo);
+       if (ct == NULL)
+               return;
+
+       family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
+       rcu_read_lock();
+       l3proto = __nf_nat_l3proto_find(family);
+       if (l3proto == NULL)
+               goto out;
+
+       dir = CTINFO2DIR(ctinfo);
+       if (dir == IP_CT_DIR_ORIGINAL)
+               statusbit = IPS_DST_NAT;
+       else
+               statusbit = IPS_SRC_NAT;
+
+       l3proto->decode_session(skb, ct, dir, statusbit, fl);
+out:
+       rcu_read_unlock();
+}
+
+int nf_xfrm_me_harder(struct sk_buff *skb, unsigned int family)
 {
-       return rcu_dereference(nf_nat_protos[protonum]);
+       struct flowi fl;
+       unsigned int hh_len;
+       struct dst_entry *dst;
+
+       if (xfrm_decode_session(skb, &fl, family) < 0)
+               return -1;
+
+       dst = skb_dst(skb);
+       if (dst->xfrm)
+               dst = ((struct xfrm_dst *)dst)->route;
+       dst_hold(dst);
+
+       dst = xfrm_lookup(dev_net(dst->dev), dst, &fl, skb->sk, 0);
+       if (IS_ERR(dst))
+               return -1;
+
+       skb_dst_drop(skb);
+       skb_dst_set(skb, dst);
+
+       /* Change in oif may mean change in hh_len. */
+       hh_len = skb_dst(skb)->dev->hard_header_len;
+       if (skb_headroom(skb) < hh_len &&
+           pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
+               return -1;
+       return 0;
 }
+EXPORT_SYMBOL(nf_xfrm_me_harder);
+#endif /* CONFIG_XFRM */
 
 /* We keep an extra hash for each conntrack, for fast searching. */
 static inline unsigned int
@@ -54,10 +121,9 @@ hash_by_src(const struct net *net, u16 zone,
        unsigned int hash;
 
        /* Original src, to ensure we map it consistently if poss. */
-       hash = jhash_3words((__force u32)tuple->src.u3.ip,
-                           (__force u32)tuple->src.u.all ^ zone,
-                           tuple->dst.protonum, nf_conntrack_hash_rnd);
-       return ((u64)hash * net->ipv4.nat_htable_size) >> 32;
+       hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32),
+                     tuple->dst.protonum ^ zone ^ nf_conntrack_hash_rnd);
+       return ((u64)hash * net->ct.nat_htable_size) >> 32;
 }
 
 /* Is this tuple already taken? (not by us) */
@@ -66,10 +132,11 @@ nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
                  const struct nf_conn *ignored_conntrack)
 {
        /* Conntrack tracking doesn't keep track of outgoing tuples; only
-          incoming ones.  NAT means they don't have a fixed mapping,
-          so we invert the tuple and look for the incoming reply.
-
-          We could keep a separate hash if this proves too slow. */
+        * incoming ones.  NAT means they don't have a fixed mapping,
+        * so we invert the tuple and look for the incoming reply.
+        *
+        * We could keep a separate hash if this proves too slow.
+        */
        struct nf_conntrack_tuple reply;
 
        nf_ct_invert_tuplepr(&reply, tuple);
@@ -78,31 +145,26 @@ nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
 EXPORT_SYMBOL(nf_nat_used_tuple);
 
 /* If we source map this tuple so reply looks like reply_tuple, will
- * that meet the constraints of range. */
-static int
-in_range(const struct nf_conntrack_tuple *tuple,
-        const struct nf_nat_ipv4_range *range)
+ * that meet the constraints of range.
+ */
+static int in_range(const struct nf_nat_l3proto *l3proto,
+                   const struct nf_nat_l4proto *l4proto,
+                   const struct nf_conntrack_tuple *tuple,
+                   const struct nf_nat_range *range)
 {
-       const struct nf_nat_protocol *proto;
-       int ret = 0;
-
        /* If we are supposed to map IPs, then we must be in the
-          range specified, otherwise let this drag us onto a new src IP. */
-       if (range->flags & NF_NAT_RANGE_MAP_IPS) {
-               if (ntohl(tuple->src.u3.ip) < ntohl(range->min_ip) ||
-                   ntohl(tuple->src.u3.ip) > ntohl(range->max_ip))
-                       return 0;
-       }
+        * range specified, otherwise let this drag us onto a new src IP.
+        */
+       if (range->flags & NF_NAT_RANGE_MAP_IPS &&
+           !l3proto->in_range(tuple, range))
+               return 0;
 
-       rcu_read_lock();
-       proto = __nf_nat_proto_find(tuple->dst.protonum);
        if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) ||
-           proto->in_range(tuple, NF_NAT_MANIP_SRC,
-                           &range->min, &range->max))
-               ret = 1;
-       rcu_read_unlock();
+           l4proto->in_range(tuple, NF_NAT_MANIP_SRC,
+                             &range->min_proto, &range->max_proto))
+               return 1;
 
-       return ret;
+       return 0;
 }
 
 static inline int
@@ -113,24 +175,25 @@ same_src(const struct nf_conn *ct,
 
        t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
        return (t->dst.protonum == tuple->dst.protonum &&
-               t->src.u3.ip == tuple->src.u3.ip &&
+               nf_inet_addr_cmp(&t->src.u3, &tuple->src.u3) &&
                t->src.u.all == tuple->src.u.all);
 }
 
 /* Only called for SRC manip */
 static int
 find_appropriate_src(struct net *net, u16 zone,
+                    const struct nf_nat_l3proto *l3proto,
+                    const struct nf_nat_l4proto *l4proto,
                     const struct nf_conntrack_tuple *tuple,
                     struct nf_conntrack_tuple *result,
-                    const struct nf_nat_ipv4_range *range)
+                    const struct nf_nat_range *range)
 {
        unsigned int h = hash_by_src(net, zone, tuple);
        const struct nf_conn_nat *nat;
        const struct nf_conn *ct;
        const struct hlist_node *n;
 
-       rcu_read_lock();
-       hlist_for_each_entry_rcu(nat, n, &net->ipv4.nat_bysource[h], bysource) {
+       hlist_for_each_entry_rcu(nat, n, &net->ct.nat_bysource[h], bysource) {
                ct = nat->ct;
                if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) {
                        /* Copy source part from reply tuple. */
@@ -138,119 +201,148 @@ find_appropriate_src(struct net *net, u16 zone,
                                       &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
                        result->dst = tuple->dst;
 
-                       if (in_range(result, range)) {
-                               rcu_read_unlock();
+                       if (in_range(l3proto, l4proto, result, range))
                                return 1;
-                       }
                }
        }
-       rcu_read_unlock();
        return 0;
 }
 
 /* For [FUTURE] fragmentation handling, we want the least-used
  src-ip/dst-ip/proto triple.  Fairness doesn't come into it.  Thus
  if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
  1-65535, we don't do pro-rata allocation based on ports; we choose
  the ip with the lowest src-ip/dst-ip/proto usage.
-*/
* src-ip/dst-ip/proto triple.  Fairness doesn't come into it.  Thus
* if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
* 1-65535, we don't do pro-rata allocation based on ports; we choose
* the ip with the lowest src-ip/dst-ip/proto usage.
+ */
 static void
 find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
-                   const struct nf_nat_ipv4_range *range,
+                   const struct nf_nat_range *range,
                    const struct nf_conn *ct,
                    enum nf_nat_manip_type maniptype)
 {
-       __be32 *var_ipp;
+       union nf_inet_addr *var_ipp;
+       unsigned int i, max;
        /* Host order */
-       u_int32_t minip, maxip, j;
+       u32 minip, maxip, j, dist;
+       bool full_range;
 
        /* No IP mapping?  Do nothing. */
        if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
                return;
 
        if (maniptype == NF_NAT_MANIP_SRC)
-               var_ipp = &tuple->src.u3.ip;
+               var_ipp = &tuple->src.u3;
        else
-               var_ipp = &tuple->dst.u3.ip;
+               var_ipp = &tuple->dst.u3;
 
        /* Fast path: only one choice. */
-       if (range->min_ip == range->max_ip) {
-               *var_ipp = range->min_ip;
+       if (nf_inet_addr_cmp(&range->min_addr, &range->max_addr)) {
+               *var_ipp = range->min_addr;
                return;
        }
 
+       if (nf_ct_l3num(ct) == NFPROTO_IPV4)
+               max = sizeof(var_ipp->ip) / sizeof(u32) - 1;
+       else
+               max = sizeof(var_ipp->ip6) / sizeof(u32) - 1;
+
        /* Hashing source and destination IPs gives a fairly even
         * spread in practice (if there are a small number of IPs
         * involved, there usually aren't that many connections
         * anyway).  The consistency means that servers see the same
         * client coming from the same IP (some Internet Banking sites
-        * like this), even across reboots. */
-       minip = ntohl(range->min_ip);
-       maxip = ntohl(range->max_ip);
-       j = jhash_2words((__force u32)tuple->src.u3.ip,
-                        range->flags & NF_NAT_RANGE_PERSISTENT ?
-                               0 : (__force u32)tuple->dst.u3.ip ^ zone, 0);
-       j = ((u64)j * (maxip - minip + 1)) >> 32;
-       *var_ipp = htonl(minip + j);
+        * like this), even across reboots.
+        */
+       j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32),
+                  range->flags & NF_NAT_RANGE_PERSISTENT ?
+                       0 : (__force u32)tuple->dst.u3.all[max] ^ zone);
+
+       full_range = false;
+       for (i = 0; i <= max; i++) {
+               /* If first bytes of the address are at the maximum, use the
+                * distance. Otherwise use the full range.
+                */
+               if (!full_range) {
+                       minip = ntohl((__force __be32)range->min_addr.all[i]);
+                       maxip = ntohl((__force __be32)range->max_addr.all[i]);
+                       dist  = maxip - minip + 1;
+               } else {
+                       minip = 0;
+                       dist  = ~0;
+               }
+
+               var_ipp->all[i] = (__force __u32)
+                       htonl(minip + (((u64)j * dist) >> 32));
+               if (var_ipp->all[i] != range->max_addr.all[i])
+                       full_range = true;
+
+               if (!(range->flags & NF_NAT_RANGE_PERSISTENT))
+                       j ^= (__force u32)tuple->dst.u3.all[i];
+       }
 }
 
-/* Manipulate the tuple into the range given.  For NF_INET_POST_ROUTING,
- * we change the source to map into the range.  For NF_INET_PRE_ROUTING
+/* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING,
+ * we change the source to map into the range. For NF_INET_PRE_ROUTING
  * and NF_INET_LOCAL_OUT, we change the destination to map into the
- * range.  It might not be possible to get a unique tuple, but we try.
+ * range. It might not be possible to get a unique tuple, but we try.
  * At worst (or if we race), we will end up with a final duplicate in
  * __ip_conntrack_confirm and drop the packet. */
 static void
 get_unique_tuple(struct nf_conntrack_tuple *tuple,
                 const struct nf_conntrack_tuple *orig_tuple,
-                const struct nf_nat_ipv4_range *range,
+                const struct nf_nat_range *range,
                 struct nf_conn *ct,
                 enum nf_nat_manip_type maniptype)
 {
+       const struct nf_nat_l3proto *l3proto;
+       const struct nf_nat_l4proto *l4proto;
        struct net *net = nf_ct_net(ct);
-       const struct nf_nat_protocol *proto;
        u16 zone = nf_ct_zone(ct);
 
-       /* 1) If this srcip/proto/src-proto-part is currently mapped,
-          and that same mapping gives a unique tuple within the given
-          range, use that.
+       rcu_read_lock();
+       l3proto = __nf_nat_l3proto_find(orig_tuple->src.l3num);
+       l4proto = __nf_nat_l4proto_find(orig_tuple->src.l3num,
+                                       orig_tuple->dst.protonum);
 
-          This is only required for source (ie. NAT/masq) mappings.
-          So far, we don't do local source mappings, so multiple
-          manips not an issue.  */
+       /* 1) If this srcip/proto/src-proto-part is currently mapped,
+        * and that same mapping gives a unique tuple within the given
+        * range, use that.
+        *
+        * This is only required for source (ie. NAT/masq) mappings.
+        * So far, we don't do local source mappings, so multiple
+        * manips not an issue.
+        */
        if (maniptype == NF_NAT_MANIP_SRC &&
            !(range->flags & NF_NAT_RANGE_PROTO_RANDOM)) {
                /* try the original tuple first */
-               if (in_range(orig_tuple, range)) {
+               if (in_range(l3proto, l4proto, orig_tuple, range)) {
                        if (!nf_nat_used_tuple(orig_tuple, ct)) {
                                *tuple = *orig_tuple;
-                               return;
+                               goto out;
                        }
-               } else if (find_appropriate_src(net, zone, orig_tuple, tuple,
-                          range)) {
+               } else if (find_appropriate_src(net, zone, l3proto, l4proto,
+                                               orig_tuple, tuple, range)) {
                        pr_debug("get_unique_tuple: Found current src map\n");
                        if (!nf_nat_used_tuple(tuple, ct))
-                               return;
+                               goto out;
                }
        }
 
-       /* 2) Select the least-used IP/proto combination in the given
-          range. */
+       /* 2) Select the least-used IP/proto combination in the given range */
        *tuple = *orig_tuple;
        find_best_ips_proto(zone, tuple, range, ct, maniptype);
 
        /* 3) The per-protocol part of the manip is made to map into
-          the range to make a unique tuple. */
-
-       rcu_read_lock();
-       proto = __nf_nat_proto_find(orig_tuple->dst.protonum);
+        * the range to make a unique tuple.
+        */
 
        /* Only bother mapping if it's not already in range and unique */
        if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM)) {
                if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
-                       if (proto->in_range(tuple, maniptype, &range->min,
-                                           &range->max) &&
-                           (range->min.all == range->max.all ||
+                       if (l4proto->in_range(tuple, maniptype,
+                                             &range->min_proto,
+                                             &range->max_proto) &&
+                           (range->min_proto.all == range->max_proto.all ||
                             !nf_nat_used_tuple(tuple, ct)))
                                goto out;
                } else if (!nf_nat_used_tuple(tuple, ct)) {
@@ -259,14 +351,14 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
        }
 
        /* Last change: get protocol to try to obtain unique tuple. */
-       proto->unique_tuple(tuple, range, maniptype, ct);
+       l4proto->unique_tuple(l3proto, tuple, range, maniptype, ct);
 out:
        rcu_read_unlock();
 }
 
 unsigned int
 nf_nat_setup_info(struct nf_conn *ct,
-                 const struct nf_nat_ipv4_range *range,
+                 const struct nf_nat_range *range,
                  enum nf_nat_manip_type maniptype)
 {
        struct net *net = nf_ct_net(ct);
@@ -288,10 +380,10 @@ nf_nat_setup_info(struct nf_conn *ct,
        BUG_ON(nf_nat_initialized(ct, maniptype));
 
        /* What we've got will look like inverse of reply. Normally
-          this is what is in the conntrack, except for prior
-          manipulations (future optimization: if num_manips == 0,
-          orig_tp =
-          conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple) */
+        * this is what is in the conntrack, except for prior
+        * manipulations (future optimization: if num_manips == 0,
+        * orig_tp = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
+        */
        nf_ct_invert_tuplepr(&curr_tuple,
                             &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 
@@ -317,11 +409,11 @@ nf_nat_setup_info(struct nf_conn *ct,
                srchash = hash_by_src(net, nf_ct_zone(ct),
                                      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
                spin_lock_bh(&nf_nat_lock);
-               /* nf_conntrack_alter_reply might re-allocate extension area */
+               /* nf_conntrack_alter_reply might re-allocate extension aera */
                nat = nfct_nat(ct);
                nat->ct = ct;
                hlist_add_head_rcu(&nat->bysource,
-                                  &net->ipv4.nat_bysource[srchash]);
+                                  &net->ct.nat_bysource[srchash]);
                spin_unlock_bh(&nf_nat_lock);
        }
 
@@ -335,47 +427,14 @@ nf_nat_setup_info(struct nf_conn *ct,
 }
 EXPORT_SYMBOL(nf_nat_setup_info);
 
-/* Returns true if succeeded. */
-static bool
-manip_pkt(u_int16_t proto,
-         struct sk_buff *skb,
-         unsigned int iphdroff,
-         const struct nf_conntrack_tuple *target,
-         enum nf_nat_manip_type maniptype)
-{
-       struct iphdr *iph;
-       const struct nf_nat_protocol *p;
-
-       if (!skb_make_writable(skb, iphdroff + sizeof(*iph)))
-               return false;
-
-       iph = (void *)skb->data + iphdroff;
-
-       /* Manipulate protcol part. */
-
-       /* rcu_read_lock()ed by nf_hook_slow */
-       p = __nf_nat_proto_find(proto);
-       if (!p->manip_pkt(skb, iphdroff, target, maniptype))
-               return false;
-
-       iph = (void *)skb->data + iphdroff;
-
-       if (maniptype == NF_NAT_MANIP_SRC) {
-               csum_replace4(&iph->check, iph->saddr, target->src.u3.ip);
-               iph->saddr = target->src.u3.ip;
-       } else {
-               csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip);
-               iph->daddr = target->dst.u3.ip;
-       }
-       return true;
-}
-
 /* Do packet manipulations according to nf_nat_setup_info. */
 unsigned int nf_nat_packet(struct nf_conn *ct,
                           enum ip_conntrack_info ctinfo,
                           unsigned int hooknum,
                           struct sk_buff *skb)
 {
+       const struct nf_nat_l3proto *l3proto;
+       const struct nf_nat_l4proto *l4proto;
        enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
        unsigned long statusbit;
        enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
@@ -396,129 +455,176 @@ unsigned int nf_nat_packet(struct nf_conn *ct,
                /* We are aiming to look like inverse of other direction. */
                nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
 
-               if (!manip_pkt(target.dst.protonum, skb, 0, &target, mtype))
+               l3proto = __nf_nat_l3proto_find(target.src.l3num);
+               l4proto = __nf_nat_l4proto_find(target.src.l3num,
+                                               target.dst.protonum);
+               if (!l3proto->manip_pkt(skb, 0, l4proto, &target, mtype))
                        return NF_DROP;
        }
        return NF_ACCEPT;
 }
 EXPORT_SYMBOL_GPL(nf_nat_packet);
 
-/* Dir is direction ICMP is coming from (opposite to packet it contains) */
-int nf_nat_icmp_reply_translation(struct nf_conn *ct,
-                                 enum ip_conntrack_info ctinfo,
-                                 unsigned int hooknum,
-                                 struct sk_buff *skb)
+struct nf_nat_proto_clean {
+       u8      l3proto;
+       u8      l4proto;
+       bool    hash;
+};
+
+/* Clear NAT section of all conntracks, in case we're loaded again. */
+static int nf_nat_proto_clean(struct nf_conn *i, void *data)
 {
-       struct {
-               struct icmphdr icmp;
-               struct iphdr ip;
-       } *inside;
-       struct nf_conntrack_tuple target;
-       int hdrlen = ip_hdrlen(skb);
-       enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-       unsigned long statusbit;
-       enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
+       const struct nf_nat_proto_clean *clean = data;
+       struct nf_conn_nat *nat = nfct_nat(i);
 
-       if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
+       if (!nat)
                return 0;
-
-       inside = (void *)skb->data + hdrlen;
-
-       /* We're actually going to mangle it beyond trivial checksum
-          adjustment, so make sure the current checksum is correct. */
-       if (nf_ip_checksum(skb, hooknum, hdrlen, 0))
+       if (!(i->status & IPS_SRC_NAT_DONE))
+               return 0;
+       if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) ||
+           (clean->l4proto && nf_ct_protonum(i) != clean->l4proto))
                return 0;
 
-       /* Must be RELATED */
-       NF_CT_ASSERT(skb->nfctinfo == IP_CT_RELATED ||
-                    skb->nfctinfo == IP_CT_RELATED_REPLY);
-
-       /* Redirects on non-null nats must be dropped, else they'll
-          start talking to each other without our translation, and be
-          confused... --RR */
-       if (inside->icmp.type == ICMP_REDIRECT) {
-               /* If NAT isn't finished, assume it and drop. */
-               if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
-                       return 0;
-
-               if (ct->status & IPS_NAT_MASK)
-                       return 0;
+       if (clean->hash) {
+               spin_lock_bh(&nf_nat_lock);
+               hlist_del_rcu(&nat->bysource);
+               spin_unlock_bh(&nf_nat_lock);
+       } else {
+               memset(nat, 0, sizeof(*nat));
+               i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK |
+                              IPS_SEQ_ADJUST);
        }
+       return 0;
+}
 
-       if (manip == NF_NAT_MANIP_SRC)
-               statusbit = IPS_SRC_NAT;
-       else
-               statusbit = IPS_DST_NAT;
-
-       /* Invert if this is reply dir. */
-       if (dir == IP_CT_DIR_REPLY)
-               statusbit ^= IPS_NAT_MASK;
-
-       if (!(ct->status & statusbit))
-               return 1;
-
-       pr_debug("icmp_reply_translation: translating error %p manip %u "
-                "dir %s\n", skb, manip,
-                dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY");
-
-       /* Change inner back to look like incoming packet.  We do the
-          opposite manip on this hook to normal, because it might not
-          pass all hooks (locally-generated ICMP).  Consider incoming
-          packet: PREROUTING (DST manip), routing produces ICMP, goes
-          through POSTROUTING (which must correct the DST manip). */
-       if (!manip_pkt(inside->ip.protocol, skb, hdrlen + sizeof(inside->icmp),
-                      &ct->tuplehash[!dir].tuple, !manip))
-               return 0;
+static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
+{
+       struct nf_nat_proto_clean clean = {
+               .l3proto = l3proto,
+               .l4proto = l4proto,
+       };
+       struct net *net;
+
+       rtnl_lock();
+       /* Step 1 - remove from bysource hash */
+       clean.hash = true;
+       for_each_net(net)
+               nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
+       synchronize_rcu();
 
-       if (skb->ip_summed != CHECKSUM_PARTIAL) {
-               /* Reloading "inside" here since manip_pkt inner. */
-               inside = (void *)skb->data + hdrlen;
-               inside->icmp.checksum = 0;
-               inside->icmp.checksum =
-                       csum_fold(skb_checksum(skb, hdrlen,
-                                              skb->len - hdrlen, 0));
-       }
+       /* Step 2 - clean NAT section */
+       clean.hash = false;
+       for_each_net(net)
+               nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
+       rtnl_unlock();
+}
 
-       /* Change outer to look the reply to an incoming packet
-        * (proto 0 means don't invert per-proto part). */
-       nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
-       if (!manip_pkt(0, skb, 0, &target, manip))
-               return 0;
+static void nf_nat_l3proto_clean(u8 l3proto)
+{
+       struct nf_nat_proto_clean clean = {
+               .l3proto = l3proto,
+       };
+       struct net *net;
+
+       rtnl_lock();
+       /* Step 1 - remove from bysource hash */
+       clean.hash = true;
+       for_each_net(net)
+               nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
+       synchronize_rcu();
 
-       return 1;
+       /* Step 2 - clean NAT section */
+       clean.hash = false;
+       for_each_net(net)
+               nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
+       rtnl_unlock();
 }
-EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation);
 
 /* Protocol registration. */
-int nf_nat_protocol_register(const struct nf_nat_protocol *proto)
+int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto)
 {
+       const struct nf_nat_l4proto **l4protos;
+       unsigned int i;
        int ret = 0;
 
-       spin_lock_bh(&nf_nat_lock);
+       mutex_lock(&nf_nat_proto_mutex);
+       if (nf_nat_l4protos[l3proto] == NULL) {
+               l4protos = kmalloc(IPPROTO_MAX * sizeof(struct nf_nat_l4proto *),
+                                  GFP_KERNEL);
+               if (l4protos == NULL) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+
+               for (i = 0; i < IPPROTO_MAX; i++)
+                       RCU_INIT_POINTER(l4protos[i], &nf_nat_l4proto_unknown);
+
+               /* Before making proto_array visible to lockless readers,
+                * we must make sure its content is committed to memory.
+                */
+               smp_wmb();
+
+               nf_nat_l4protos[l3proto] = l4protos;
+       }
+
        if (rcu_dereference_protected(
-                       nf_nat_protos[proto->protonum],
-                       lockdep_is_held(&nf_nat_lock)
-                       ) != &nf_nat_unknown_protocol) {
+                       nf_nat_l4protos[l3proto][l4proto->l4proto],
+                       lockdep_is_held(&nf_nat_proto_mutex)
+                       ) != &nf_nat_l4proto_unknown) {
                ret = -EBUSY;
                goto out;
        }
-       RCU_INIT_POINTER(nf_nat_protos[proto->protonum], proto);
+       RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto], l4proto);
  out:
-       spin_unlock_bh(&nf_nat_lock);
+       mutex_unlock(&nf_nat_proto_mutex);
        return ret;
 }
-EXPORT_SYMBOL(nf_nat_protocol_register);
+EXPORT_SYMBOL_GPL(nf_nat_l4proto_register);
 
 /* No one stores the protocol anywhere; simply delete it. */
-void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto)
+void nf_nat_l4proto_unregister(u8 l3proto, const struct nf_nat_l4proto *l4proto)
 {
-       spin_lock_bh(&nf_nat_lock);
-       RCU_INIT_POINTER(nf_nat_protos[proto->protonum],
-                          &nf_nat_unknown_protocol);
-       spin_unlock_bh(&nf_nat_lock);
+       mutex_lock(&nf_nat_proto_mutex);
+       RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto],
+                        &nf_nat_l4proto_unknown);
+       mutex_unlock(&nf_nat_proto_mutex);
        synchronize_rcu();
+
+       nf_nat_l4proto_clean(l3proto, l4proto->l4proto);
+}
+EXPORT_SYMBOL_GPL(nf_nat_l4proto_unregister);
+
+int nf_nat_l3proto_register(const struct nf_nat_l3proto *l3proto)
+{
+       int err;
+
+       err = nf_ct_l3proto_try_module_get(l3proto->l3proto);
+       if (err < 0)
+               return err;
+
+       mutex_lock(&nf_nat_proto_mutex);
+       RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_TCP],
+                        &nf_nat_l4proto_tcp);
+       RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_UDP],
+                        &nf_nat_l4proto_udp);
+       mutex_unlock(&nf_nat_proto_mutex);
+
+       RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], l3proto);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nf_nat_l3proto_register);
+
+void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *l3proto)
+{
+       mutex_lock(&nf_nat_proto_mutex);
+       RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], NULL);
+       mutex_unlock(&nf_nat_proto_mutex);
+       synchronize_rcu();
+
+       nf_nat_l3proto_clean(l3proto->l3proto);
+       nf_ct_l3proto_module_put(l3proto->l3proto);
 }
-EXPORT_SYMBOL(nf_nat_protocol_unregister);
+EXPORT_SYMBOL_GPL(nf_nat_l3proto_unregister);
 
 /* No one using conntrack by the time this called. */
 static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
@@ -570,34 +676,36 @@ static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
 
 static int nfnetlink_parse_nat_proto(struct nlattr *attr,
                                     const struct nf_conn *ct,
-                                    struct nf_nat_ipv4_range *range)
+                                    struct nf_nat_range *range)
 {
        struct nlattr *tb[CTA_PROTONAT_MAX+1];
-       const struct nf_nat_protocol *npt;
+       const struct nf_nat_l4proto *l4proto;
        int err;
 
        err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr, protonat_nla_policy);
        if (err < 0)
                return err;
 
-       rcu_read_lock();
-       npt = __nf_nat_proto_find(nf_ct_protonum(ct));
-       if (npt->nlattr_to_range)
-               err = npt->nlattr_to_range(tb, range);
-       rcu_read_unlock();
+       l4proto = __nf_nat_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
+       if (l4proto->nlattr_to_range)
+               err = l4proto->nlattr_to_range(tb, range);
+
        return err;
 }
 
 static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
-       [CTA_NAT_MINIP]         = { .type = NLA_U32 },
-       [CTA_NAT_MAXIP]         = { .type = NLA_U32 },
+       [CTA_NAT_V4_MINIP]      = { .type = NLA_U32 },
+       [CTA_NAT_V4_MAXIP]      = { .type = NLA_U32 },
+       [CTA_NAT_V6_MINIP]      = { .len = sizeof(struct in6_addr) },
+       [CTA_NAT_V6_MAXIP]      = { .len = sizeof(struct in6_addr) },
        [CTA_NAT_PROTO]         = { .type = NLA_NESTED },
 };
 
 static int
 nfnetlink_parse_nat(const struct nlattr *nat,
-                   const struct nf_conn *ct, struct nf_nat_ipv4_range *range)
+                   const struct nf_conn *ct, struct nf_nat_range *range)
 {
+       const struct nf_nat_l3proto *l3proto;
        struct nlattr *tb[CTA_NAT_MAX+1];
        int err;
 
@@ -607,25 +715,23 @@ nfnetlink_parse_nat(const struct nlattr *nat,
        if (err < 0)
                return err;
 
-       if (tb[CTA_NAT_MINIP])
-               range->min_ip = nla_get_be32(tb[CTA_NAT_MINIP]);
-
-       if (!tb[CTA_NAT_MAXIP])
-               range->max_ip = range->min_ip;
-       else
-               range->max_ip = nla_get_be32(tb[CTA_NAT_MAXIP]);
-
-       if (range->min_ip)
-               range->flags |= NF_NAT_RANGE_MAP_IPS;
+       rcu_read_lock();
+       l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct));
+       if (l3proto == NULL) {
+               err = -EAGAIN;
+               goto out;
+       }
+       err = l3proto->nlattr_to_range(tb, range);
+       if (err < 0)
+               goto out;
 
        if (!tb[CTA_NAT_PROTO])
-               return 0;
+               goto out;
 
        err = nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
-       if (err < 0)
-               return err;
-
-       return 0;
+out:
+       rcu_read_unlock();
+       return err;
 }
 
 static int
@@ -633,10 +739,12 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct,
                          enum nf_nat_manip_type manip,
                          const struct nlattr *attr)
 {
-       struct nf_nat_ipv4_range range;
+       struct nf_nat_range range;
+       int err;
 
-       if (nfnetlink_parse_nat(attr, ct, &range) < 0)
-               return -EINVAL;
+       err = nfnetlink_parse_nat(attr, ct, &range);
+       if (err < 0)
+               return err;
        if (nf_nat_initialized(ct, manip))
                return -EEXIST;
 
@@ -655,30 +763,20 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct,
 static int __net_init nf_nat_net_init(struct net *net)
 {
        /* Leave them the same for the moment. */
-       net->ipv4.nat_htable_size = net->ct.htable_size;
-       net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size, 0);
-       if (!net->ipv4.nat_bysource)
+       net->ct.nat_htable_size = net->ct.htable_size;
+       net->ct.nat_bysource = nf_ct_alloc_hashtable(&net->ct.nat_htable_size, 0);
+       if (!net->ct.nat_bysource)
                return -ENOMEM;
        return 0;
 }
 
-/* Clear NAT section of all conntracks, in case we're loaded again. */
-static int clean_nat(struct nf_conn *i, void *data)
-{
-       struct nf_conn_nat *nat = nfct_nat(i);
-
-       if (!nat)
-               return 0;
-       memset(nat, 0, sizeof(*nat));
-       i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST);
-       return 0;
-}
-
 static void __net_exit nf_nat_net_exit(struct net *net)
 {
-       nf_ct_iterate_cleanup(net, &clean_nat, NULL);
+       struct nf_nat_proto_clean clean = {};
+
+       nf_ct_iterate_cleanup(net, &nf_nat_proto_clean, &clean);
        synchronize_rcu();
-       nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_htable_size);
+       nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
 }
 
 static struct pernet_operations nf_nat_net_ops = {
@@ -697,11 +795,8 @@ static struct nfq_ct_nat_hook nfq_ct_nat = {
 
 static int __init nf_nat_init(void)
 {
-       size_t i;
        int ret;
 
-       need_ipv4_conntrack();
-
        ret = nf_ct_extend_register(&nat_extend);
        if (ret < 0) {
                printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
@@ -712,22 +807,11 @@ static int __init nf_nat_init(void)
        if (ret < 0)
                goto cleanup_extend;
 
-       /* Sew in builtin protocols. */
-       spin_lock_bh(&nf_nat_lock);
-       for (i = 0; i < MAX_IP_NAT_PROTO; i++)
-               RCU_INIT_POINTER(nf_nat_protos[i], &nf_nat_unknown_protocol);
-       RCU_INIT_POINTER(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp);
-       RCU_INIT_POINTER(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp);
-       RCU_INIT_POINTER(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp);
-       spin_unlock_bh(&nf_nat_lock);
+       nf_ct_helper_expectfn_register(&follow_master_nat);
 
        /* Initialize fake conntrack so that NAT will skip it */
        nf_ct_untracked_status_or(IPS_NAT_DONE_MASK);
 
-       l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
-
-       nf_ct_helper_expectfn_register(&follow_master_nat);
-
        BUG_ON(nf_nat_seq_adjust_hook != NULL);
        RCU_INIT_POINTER(nf_nat_seq_adjust_hook, nf_nat_seq_adjust);
        BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
@@ -736,6 +820,10 @@ static int __init nf_nat_init(void)
        BUG_ON(nf_ct_nat_offset != NULL);
        RCU_INIT_POINTER(nf_ct_nat_offset, nf_nat_get_offset);
        RCU_INIT_POINTER(nfq_ct_nat_hook, &nfq_ct_nat);
+#ifdef CONFIG_XFRM
+       BUG_ON(nf_nat_decode_session_hook != NULL);
+       RCU_INIT_POINTER(nf_nat_decode_session_hook, __nf_nat_decode_session);
+#endif
        return 0;
 
  cleanup_extend:
@@ -745,19 +833,24 @@ static int __init nf_nat_init(void)
 
 static void __exit nf_nat_cleanup(void)
 {
+       unsigned int i;
+
        unregister_pernet_subsys(&nf_nat_net_ops);
-       nf_ct_l3proto_put(l3proto);
        nf_ct_extend_unregister(&nat_extend);
        nf_ct_helper_expectfn_unregister(&follow_master_nat);
        RCU_INIT_POINTER(nf_nat_seq_adjust_hook, NULL);
        RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL);
        RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
        RCU_INIT_POINTER(nfq_ct_nat_hook, NULL);
+#ifdef CONFIG_XFRM
+       RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL);
+#endif
+       for (i = 0; i < NFPROTO_NUMPROTO; i++)
+               kfree(nf_nat_l4protos[i]);
        synchronize_net();
 }
 
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("nf-nat-ipv4");
 
 module_init(nf_nat_init);
 module_exit(nf_nat_cleanup);
similarity index 79%
rename from net/ipv4/netfilter/nf_nat_ftp.c
rename to net/netfilter/nf_nat_ftp.c
index e462a957d0805c324fa3db0bdd8d8a317b629f59..e839b97b2863a052c2c69872f98041114327f152 100644 (file)
 
 #include <linux/module.h>
 #include <linux/moduleparam.h>
-#include <linux/ip.h>
+#include <linux/inet.h>
 #include <linux/tcp.h>
 #include <linux/netfilter_ipv4.h>
 #include <net/netfilter/nf_nat.h>
 #include <net/netfilter/nf_nat_helper.h>
-#include <net/netfilter/nf_nat_rule.h>
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_expect.h>
 #include <linux/netfilter/nf_conntrack_ftp.h>
@@ -27,22 +26,27 @@ MODULE_ALIAS("ip_nat_ftp");
 
 /* FIXME: Time out? --RR */
 
-static int nf_nat_ftp_fmt_cmd(enum nf_ct_ftp_type type,
+static int nf_nat_ftp_fmt_cmd(struct nf_conn *ct, enum nf_ct_ftp_type type,
                              char *buffer, size_t buflen,
-                             __be32 addr, u16 port)
+                             union nf_inet_addr *addr, u16 port)
 {
        switch (type) {
        case NF_CT_FTP_PORT:
        case NF_CT_FTP_PASV:
                return snprintf(buffer, buflen, "%u,%u,%u,%u,%u,%u",
-                               ((unsigned char *)&addr)[0],
-                               ((unsigned char *)&addr)[1],
-                               ((unsigned char *)&addr)[2],
-                               ((unsigned char *)&addr)[3],
+                               ((unsigned char *)&addr->ip)[0],
+                               ((unsigned char *)&addr->ip)[1],
+                               ((unsigned char *)&addr->ip)[2],
+                               ((unsigned char *)&addr->ip)[3],
                                port >> 8,
                                port & 0xFF);
        case NF_CT_FTP_EPRT:
-               return snprintf(buffer, buflen, "|1|%pI4|%u|", &addr, port);
+               if (nf_ct_l3num(ct) == NFPROTO_IPV4)
+                       return snprintf(buffer, buflen, "|1|%pI4|%u|",
+                                       &addr->ip, port);
+               else
+                       return snprintf(buffer, buflen, "|2|%pI6|%u|",
+                                       &addr->ip6, port);
        case NF_CT_FTP_EPSV:
                return snprintf(buffer, buflen, "|||%u|", port);
        }
@@ -55,21 +59,22 @@ static int nf_nat_ftp_fmt_cmd(enum nf_ct_ftp_type type,
 static unsigned int nf_nat_ftp(struct sk_buff *skb,
                               enum ip_conntrack_info ctinfo,
                               enum nf_ct_ftp_type type,
+                              unsigned int protoff,
                               unsigned int matchoff,
                               unsigned int matchlen,
                               struct nf_conntrack_expect *exp)
 {
-       __be32 newip;
+       union nf_inet_addr newaddr;
        u_int16_t port;
        int dir = CTINFO2DIR(ctinfo);
        struct nf_conn *ct = exp->master;
-       char buffer[sizeof("|1|255.255.255.255|65535|")];
+       char buffer[sizeof("|1||65535|") + INET6_ADDRSTRLEN];
        unsigned int buflen;
 
        pr_debug("FTP_NAT: type %i, off %u len %u\n", type, matchoff, matchlen);
 
        /* Connection will come from wherever this packet goes, hence !dir */
-       newip = ct->tuplehash[!dir].tuple.dst.u3.ip;
+       newaddr = ct->tuplehash[!dir].tuple.dst.u3;
        exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
        exp->dir = !dir;
 
@@ -94,13 +99,14 @@ static unsigned int nf_nat_ftp(struct sk_buff *skb,
        if (port == 0)
                return NF_DROP;
 
-       buflen = nf_nat_ftp_fmt_cmd(type, buffer, sizeof(buffer), newip, port);
+       buflen = nf_nat_ftp_fmt_cmd(ct, type, buffer, sizeof(buffer),
+                                   &newaddr, port);
        if (!buflen)
                goto out;
 
        pr_debug("calling nf_nat_mangle_tcp_packet\n");
 
-       if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff,
+       if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff,
                                      matchlen, buffer, buflen))
                goto out;
 
similarity index 82%
rename from net/ipv4/netfilter/nf_nat_helper.c
rename to net/netfilter/nf_nat_helper.c
index 2e59ad0b90ca2d572baf2c3243702648710868f9..23c2b38676a6b6bd8cc4e1cd0fd0baa63ccd94e3 100644 (file)
@@ -1,4 +1,4 @@
-/* ip_nat_helper.c - generic support functions for NAT helpers
+/* nf_nat_helper.c - generic support functions for NAT helpers
  *
  * (C) 2000-2002 Harald Welte <laforge@netfilter.org>
  * (C) 2003-2006 Netfilter Core Team <coreteam@netfilter.org>
@@ -9,23 +9,19 @@
  */
 #include <linux/module.h>
 #include <linux/gfp.h>
-#include <linux/kmod.h>
 #include <linux/types.h>
-#include <linux/timer.h>
 #include <linux/skbuff.h>
 #include <linux/tcp.h>
 #include <linux/udp.h>
-#include <net/checksum.h>
 #include <net/tcp.h>
-#include <net/route.h>
 
-#include <linux/netfilter_ipv4.h>
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_ecache.h>
 #include <net/netfilter/nf_conntrack_expect.h>
 #include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_protocol.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/netfilter/nf_nat_l4proto.h>
 #include <net/netfilter/nf_nat_core.h>
 #include <net/netfilter/nf_nat_helper.h>
 
@@ -90,7 +86,6 @@ s16 nf_nat_get_offset(const struct nf_conn *ct,
 
        return offset;
 }
-EXPORT_SYMBOL_GPL(nf_nat_get_offset);
 
 /* Frobs data inside this packet, which is linear. */
 static void mangle_contents(struct sk_buff *skb,
@@ -125,9 +120,13 @@ static void mangle_contents(struct sk_buff *skb,
                __skb_trim(skb, skb->len + rep_len - match_len);
        }
 
-       /* fix IP hdr checksum information */
-       ip_hdr(skb)->tot_len = htons(skb->len);
-       ip_send_check(ip_hdr(skb));
+       if (nf_ct_l3num((struct nf_conn *)skb->nfct) == NFPROTO_IPV4) {
+               /* fix IP hdr checksum information */
+               ip_hdr(skb)->tot_len = htons(skb->len);
+               ip_send_check(ip_hdr(skb));
+       } else
+               ipv6_hdr(skb)->payload_len =
+                       htons(skb->len - sizeof(struct ipv6hdr));
 }
 
 /* Unusual, but possible case. */
@@ -166,35 +165,6 @@ void nf_nat_tcp_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
 }
 EXPORT_SYMBOL_GPL(nf_nat_tcp_seq_adjust);
 
-static void nf_nat_csum(struct sk_buff *skb, const struct iphdr *iph, void *data,
-                       int datalen, __sum16 *check, int oldlen)
-{
-       struct rtable *rt = skb_rtable(skb);
-
-       if (skb->ip_summed != CHECKSUM_PARTIAL) {
-               if (!(rt->rt_flags & RTCF_LOCAL) &&
-                   (!skb->dev || skb->dev->features & NETIF_F_V4_CSUM)) {
-                       skb->ip_summed = CHECKSUM_PARTIAL;
-                       skb->csum_start = skb_headroom(skb) +
-                                         skb_network_offset(skb) +
-                                         iph->ihl * 4;
-                       skb->csum_offset = (void *)check - data;
-                       *check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
-                                                   datalen, iph->protocol, 0);
-               } else {
-                       *check = 0;
-                       *check = csum_tcpudp_magic(iph->saddr, iph->daddr,
-                                                  datalen, iph->protocol,
-                                                  csum_partial(data, datalen,
-                                                               0));
-                       if (iph->protocol == IPPROTO_UDP && !*check)
-                               *check = CSUM_MANGLED_0;
-               }
-       } else
-               inet_proto_csum_replace2(check, skb,
-                                        htons(oldlen), htons(datalen), 1);
-}
-
 /* Generic function for mangling variable-length address changes inside
  * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
  * command in FTP).
@@ -206,12 +176,13 @@ static void nf_nat_csum(struct sk_buff *skb, const struct iphdr *iph, void *data
 int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
                               struct nf_conn *ct,
                               enum ip_conntrack_info ctinfo,
+                              unsigned int protoff,
                               unsigned int match_offset,
                               unsigned int match_len,
                               const char *rep_buffer,
                               unsigned int rep_len, bool adjust)
 {
-       struct iphdr *iph;
+       const struct nf_nat_l3proto *l3proto;
        struct tcphdr *tcph;
        int oldlen, datalen;
 
@@ -225,15 +196,17 @@ int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
 
        SKB_LINEAR_ASSERT(skb);
 
-       iph = ip_hdr(skb);
-       tcph = (void *)iph + iph->ihl*4;
+       tcph = (void *)skb->data + protoff;
 
-       oldlen = skb->len - iph->ihl*4;
-       mangle_contents(skb, iph->ihl*4 + tcph->doff*4,
+       oldlen = skb->len - protoff;
+       mangle_contents(skb, protoff + tcph->doff*4,
                        match_offset, match_len, rep_buffer, rep_len);
 
-       datalen = skb->len - iph->ihl*4;
-       nf_nat_csum(skb, iph, tcph, datalen, &tcph->check, oldlen);
+       datalen = skb->len - protoff;
+
+       l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct));
+       l3proto->csum_recalc(skb, IPPROTO_TCP, tcph, &tcph->check,
+                            datalen, oldlen);
 
        if (adjust && rep_len != match_len)
                nf_nat_set_seq_adjust(ct, ctinfo, tcph->seq,
@@ -257,12 +230,13 @@ int
 nf_nat_mangle_udp_packet(struct sk_buff *skb,
                         struct nf_conn *ct,
                         enum ip_conntrack_info ctinfo,
+                        unsigned int protoff,
                         unsigned int match_offset,
                         unsigned int match_len,
                         const char *rep_buffer,
                         unsigned int rep_len)
 {
-       struct iphdr *iph;
+       const struct nf_nat_l3proto *l3proto;
        struct udphdr *udph;
        int datalen, oldlen;
 
@@ -274,22 +248,23 @@ nf_nat_mangle_udp_packet(struct sk_buff *skb,
            !enlarge_skb(skb, rep_len - match_len))
                return 0;
 
-       iph = ip_hdr(skb);
-       udph = (void *)iph + iph->ihl*4;
+       udph = (void *)skb->data + protoff;
 
-       oldlen = skb->len - iph->ihl*4;
-       mangle_contents(skb, iph->ihl*4 + sizeof(*udph),
+       oldlen = skb->len - protoff;
+       mangle_contents(skb, protoff + sizeof(*udph),
                        match_offset, match_len, rep_buffer, rep_len);
 
        /* update the length of the UDP packet */
-       datalen = skb->len - iph->ihl*4;
+       datalen = skb->len - protoff;
        udph->len = htons(datalen);
 
        /* fix udp checksum if udp checksum was previously calculated */
        if (!udph->check && skb->ip_summed != CHECKSUM_PARTIAL)
                return 1;
 
-       nf_nat_csum(skb, iph, udph, datalen, &udph->check, oldlen);
+       l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct));
+       l3proto->csum_recalc(skb, IPPROTO_UDP, udph, &udph->check,
+                            datalen, oldlen);
 
        return 1;
 }
@@ -341,6 +316,7 @@ sack_adjust(struct sk_buff *skb,
 /* TCP SACK sequence number adjustment */
 static inline unsigned int
 nf_nat_sack_adjust(struct sk_buff *skb,
+                  unsigned int protoff,
                   struct tcphdr *tcph,
                   struct nf_conn *ct,
                   enum ip_conntrack_info ctinfo)
@@ -348,8 +324,8 @@ nf_nat_sack_adjust(struct sk_buff *skb,
        unsigned int dir, optoff, optend;
        struct nf_conn_nat *nat = nfct_nat(ct);
 
-       optoff = ip_hdrlen(skb) + sizeof(struct tcphdr);
-       optend = ip_hdrlen(skb) + tcph->doff * 4;
+       optoff = protoff + sizeof(struct tcphdr);
+       optend = protoff + tcph->doff * 4;
 
        if (!skb_make_writable(skb, optend))
                return 0;
@@ -387,7 +363,8 @@ nf_nat_sack_adjust(struct sk_buff *skb,
 int
 nf_nat_seq_adjust(struct sk_buff *skb,
                  struct nf_conn *ct,
-                 enum ip_conntrack_info ctinfo)
+                 enum ip_conntrack_info ctinfo,
+                 unsigned int protoff)
 {
        struct tcphdr *tcph;
        int dir;
@@ -401,10 +378,10 @@ nf_nat_seq_adjust(struct sk_buff *skb,
        this_way = &nat->seq[dir];
        other_way = &nat->seq[!dir];
 
-       if (!skb_make_writable(skb, ip_hdrlen(skb) + sizeof(*tcph)))
+       if (!skb_make_writable(skb, protoff + sizeof(*tcph)))
                return 0;
 
-       tcph = (void *)skb->data + ip_hdrlen(skb);
+       tcph = (void *)skb->data + protoff;
        if (after(ntohl(tcph->seq), this_way->correction_pos))
                seqoff = this_way->offset_after;
        else
@@ -429,7 +406,7 @@ nf_nat_seq_adjust(struct sk_buff *skb,
        tcph->seq = newseq;
        tcph->ack_seq = newack;
 
-       return nf_nat_sack_adjust(skb, tcph, ct, ctinfo);
+       return nf_nat_sack_adjust(skb, protoff, tcph, ct, ctinfo);
 }
 
 /* Setup NAT on this expected conntrack so it follows master. */
@@ -437,22 +414,22 @@ nf_nat_seq_adjust(struct sk_buff *skb,
 void nf_nat_follow_master(struct nf_conn *ct,
                          struct nf_conntrack_expect *exp)
 {
-       struct nf_nat_ipv4_range range;
+       struct nf_nat_range range;
 
        /* This must be a fresh one. */
        BUG_ON(ct->status & IPS_NAT_DONE_MASK);
 
        /* Change src to where master sends to */
        range.flags = NF_NAT_RANGE_MAP_IPS;
-       range.min_ip = range.max_ip
-               = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
+       range.min_addr = range.max_addr
+               = ct->master->tuplehash[!exp->dir].tuple.dst.u3;
        nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
 
        /* For DST manip, map port here to where it's expected. */
        range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
-       range.min = range.max = exp->saved_proto;
-       range.min_ip = range.max_ip
-               = ct->master->tuplehash[!exp->dir].tuple.src.u3.ip;
+       range.min_proto = range.max_proto = exp->saved_proto;
+       range.min_addr = range.max_addr
+               = ct->master->tuplehash[!exp->dir].tuple.src.u3;
        nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
 }
 EXPORT_SYMBOL(nf_nat_follow_master);
similarity index 89%
rename from net/ipv4/netfilter/nf_nat_irc.c
rename to net/netfilter/nf_nat_irc.c
index 979ae165f4eff97f98bddb7659c70a9b70156d13..1fedee6e7fb6974300aeac0d86c2c360a29b6394 100644 (file)
@@ -17,7 +17,6 @@
 
 #include <net/netfilter/nf_nat.h>
 #include <net/netfilter/nf_nat_helper.h>
-#include <net/netfilter/nf_nat_rule.h>
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_expect.h>
 #include <linux/netfilter/nf_conntrack_irc.h>
@@ -29,12 +28,12 @@ MODULE_ALIAS("ip_nat_irc");
 
 static unsigned int help(struct sk_buff *skb,
                         enum ip_conntrack_info ctinfo,
+                        unsigned int protoff,
                         unsigned int matchoff,
                         unsigned int matchlen,
                         struct nf_conntrack_expect *exp)
 {
        char buffer[sizeof("4294967296 65635")];
-       u_int32_t ip;
        u_int16_t port;
        unsigned int ret;
 
@@ -60,13 +59,8 @@ static unsigned int help(struct sk_buff *skb,
        if (port == 0)
                return NF_DROP;
 
-       ip = ntohl(exp->master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip);
-       sprintf(buffer, "%u %u", ip, port);
-       pr_debug("nf_nat_irc: inserting '%s' == %pI4, port %u\n",
-                buffer, &ip, port);
-
        ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo,
-                                      matchoff, matchlen, buffer,
+                                      protoff, matchoff, matchlen, buffer,
                                       strlen(buffer));
        if (ret != NF_ACCEPT)
                nf_ct_unexpect_related(exp);
similarity index 62%
rename from net/ipv4/netfilter/nf_nat_proto_common.c
rename to net/netfilter/nf_nat_proto_common.c
index 9993bc93e102d562b3f7c12c5dc79f388608454c..9baaf734c1421e4e1334a5aed777af7d8ee9d9d1 100644 (file)
@@ -9,20 +9,18 @@
 
 #include <linux/types.h>
 #include <linux/random.h>
-#include <linux/ip.h>
-
 #include <linux/netfilter.h>
 #include <linux/export.h>
-#include <net/secure_seq.h>
+
 #include <net/netfilter/nf_nat.h>
 #include <net/netfilter/nf_nat_core.h>
-#include <net/netfilter/nf_nat_rule.h>
-#include <net/netfilter/nf_nat_protocol.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/netfilter/nf_nat_l4proto.h>
 
-bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple,
-                          enum nf_nat_manip_type maniptype,
-                          const union nf_conntrack_man_proto *min,
-                          const union nf_conntrack_man_proto *max)
+bool nf_nat_l4proto_in_range(const struct nf_conntrack_tuple *tuple,
+                            enum nf_nat_manip_type maniptype,
+                            const union nf_conntrack_man_proto *min,
+                            const union nf_conntrack_man_proto *max)
 {
        __be16 port;
 
@@ -34,13 +32,14 @@ bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple,
        return ntohs(port) >= ntohs(min->all) &&
               ntohs(port) <= ntohs(max->all);
 }
-EXPORT_SYMBOL_GPL(nf_nat_proto_in_range);
+EXPORT_SYMBOL_GPL(nf_nat_l4proto_in_range);
 
-void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
-                              const struct nf_nat_ipv4_range *range,
-                              enum nf_nat_manip_type maniptype,
-                              const struct nf_conn *ct,
-                              u_int16_t *rover)
+void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
+                                struct nf_conntrack_tuple *tuple,
+                                const struct nf_nat_range *range,
+                                enum nf_nat_manip_type maniptype,
+                                const struct nf_conn *ct,
+                                u16 *rover)
 {
        unsigned int range_size, min, i;
        __be16 *portptr;
@@ -71,15 +70,14 @@ void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
                        range_size = 65535 - 1024 + 1;
                }
        } else {
-               min = ntohs(range->min.all);
-               range_size = ntohs(range->max.all) - min + 1;
+               min = ntohs(range->min_proto.all);
+               range_size = ntohs(range->max_proto.all) - min + 1;
        }
 
        if (range->flags & NF_NAT_RANGE_PROTO_RANDOM)
-               off = secure_ipv4_port_ephemeral(tuple->src.u3.ip, tuple->dst.u3.ip,
-                                                maniptype == NF_NAT_MANIP_SRC
-                                                ? tuple->dst.u.all
-                                                : tuple->src.u.all);
+               off = l3proto->secure_port(tuple, maniptype == NF_NAT_MANIP_SRC
+                                                 ? tuple->dst.u.all
+                                                 : tuple->src.u.all);
        else
                off = *rover;
 
@@ -93,22 +91,22 @@ void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
        }
        return;
 }
-EXPORT_SYMBOL_GPL(nf_nat_proto_unique_tuple);
+EXPORT_SYMBOL_GPL(nf_nat_l4proto_unique_tuple);
 
 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-int nf_nat_proto_nlattr_to_range(struct nlattr *tb[],
-                                struct nf_nat_ipv4_range *range)
+int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
+                                  struct nf_nat_range *range)
 {
        if (tb[CTA_PROTONAT_PORT_MIN]) {
-               range->min.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]);
-               range->max.all = range->min.tcp.port;
+               range->min_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]);
+               range->max_proto.all = range->min_proto.all;
                range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
        }
        if (tb[CTA_PROTONAT_PORT_MAX]) {
-               range->max.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]);
+               range->max_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]);
                range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
        }
        return 0;
 }
-EXPORT_SYMBOL_GPL(nf_nat_proto_nlattr_to_range);
+EXPORT_SYMBOL_GPL(nf_nat_l4proto_nlattr_to_range);
 #endif
similarity index 61%
rename from net/ipv4/netfilter/nf_nat_proto_dccp.c
rename to net/netfilter/nf_nat_proto_dccp.c
index 3f67138d187cb25080900bf75351ed936fdbeeec..c8be2cdac0bffebe77a8f9bb5fef0ac35c6f2de0 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DCCP NAT protocol helper
  *
- * Copyright (c) 2005, 2006. 2008 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2005, 2006, 2008 Patrick McHardy <kaber@trash.net>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/skbuff.h>
-#include <linux/ip.h>
 #include <linux/dccp.h>
 
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_protocol.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/netfilter/nf_nat_l4proto.h>
 
 static u_int16_t dccp_port_rover;
 
 static void
-dccp_unique_tuple(struct nf_conntrack_tuple *tuple,
-                 const struct nf_nat_ipv4_range *range,
+dccp_unique_tuple(const struct nf_nat_l3proto *l3proto,
+                 struct nf_conntrack_tuple *tuple,
+                 const struct nf_nat_range *range,
                  enum nf_nat_manip_type maniptype,
                  const struct nf_conn *ct)
 {
-       nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
-                                 &dccp_port_rover);
+       nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct,
+                                   &dccp_port_rover);
 }
 
 static bool
 dccp_manip_pkt(struct sk_buff *skb,
-              unsigned int iphdroff,
+              const struct nf_nat_l3proto *l3proto,
+              unsigned int iphdroff, unsigned int hdroff,
               const struct nf_conntrack_tuple *tuple,
               enum nf_nat_manip_type maniptype)
 {
-       const struct iphdr *iph = (const void *)(skb->data + iphdroff);
        struct dccp_hdr *hdr;
-       unsigned int hdroff = iphdroff + iph->ihl * 4;
-       __be32 oldip, newip;
        __be16 *portptr, oldport, newport;
        int hdrsize = 8; /* DCCP connection tracking guarantees this much */
 
@@ -51,17 +50,12 @@ dccp_manip_pkt(struct sk_buff *skb,
        if (!skb_make_writable(skb, hdroff + hdrsize))
                return false;
 
-       iph = (struct iphdr *)(skb->data + iphdroff);
        hdr = (struct dccp_hdr *)(skb->data + hdroff);
 
        if (maniptype == NF_NAT_MANIP_SRC) {
-               oldip = iph->saddr;
-               newip = tuple->src.u3.ip;
                newport = tuple->src.u.dccp.port;
                portptr = &hdr->dccph_sport;
        } else {
-               oldip = iph->daddr;
-               newip = tuple->dst.u3.ip;
                newport = tuple->dst.u.dccp.port;
                portptr = &hdr->dccph_dport;
        }
@@ -72,30 +66,46 @@ dccp_manip_pkt(struct sk_buff *skb,
        if (hdrsize < sizeof(*hdr))
                return true;
 
-       inet_proto_csum_replace4(&hdr->dccph_checksum, skb, oldip, newip, 1);
+       l3proto->csum_update(skb, iphdroff, &hdr->dccph_checksum,
+                            tuple, maniptype);
        inet_proto_csum_replace2(&hdr->dccph_checksum, skb, oldport, newport,
                                 0);
        return true;
 }
 
-static const struct nf_nat_protocol nf_nat_protocol_dccp = {
-       .protonum               = IPPROTO_DCCP,
+static const struct nf_nat_l4proto nf_nat_l4proto_dccp = {
+       .l4proto                = IPPROTO_DCCP,
        .manip_pkt              = dccp_manip_pkt,
-       .in_range               = nf_nat_proto_in_range,
+       .in_range               = nf_nat_l4proto_in_range,
        .unique_tuple           = dccp_unique_tuple,
 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-       .nlattr_to_range        = nf_nat_proto_nlattr_to_range,
+       .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
 #endif
 };
 
 static int __init nf_nat_proto_dccp_init(void)
 {
-       return nf_nat_protocol_register(&nf_nat_protocol_dccp);
+       int err;
+
+       err = nf_nat_l4proto_register(NFPROTO_IPV4, &nf_nat_l4proto_dccp);
+       if (err < 0)
+               goto err1;
+       err = nf_nat_l4proto_register(NFPROTO_IPV6, &nf_nat_l4proto_dccp);
+       if (err < 0)
+               goto err2;
+       return 0;
+
+err2:
+       nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_dccp);
+err1:
+       return err;
 }
 
 static void __exit nf_nat_proto_dccp_fini(void)
 {
-       nf_nat_protocol_unregister(&nf_nat_protocol_dccp);
+       nf_nat_l4proto_unregister(NFPROTO_IPV6, &nf_nat_l4proto_dccp);
+       nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_dccp);
+
 }
 
 module_init(nf_nat_proto_dccp_init);
similarity index 61%
rename from net/ipv4/netfilter/nf_nat_proto_sctp.c
rename to net/netfilter/nf_nat_proto_sctp.c
index 3cce9b6c1c293c6c539cee0c3feef0c1930e2181..e64faa5ca89314ac0d3dadef64366b70b60c3cdc 100644 (file)
@@ -8,53 +8,46 @@
 
 #include <linux/types.h>
 #include <linux/init.h>
-#include <linux/ip.h>
 #include <linux/sctp.h>
 #include <linux/module.h>
 #include <net/sctp/checksum.h>
 
-#include <net/netfilter/nf_nat_protocol.h>
+#include <net/netfilter/nf_nat_l4proto.h>
 
 static u_int16_t nf_sctp_port_rover;
 
 static void
-sctp_unique_tuple(struct nf_conntrack_tuple *tuple,
-                 const struct nf_nat_ipv4_range *range,
+sctp_unique_tuple(const struct nf_nat_l3proto *l3proto,
+                 struct nf_conntrack_tuple *tuple,
+                 const struct nf_nat_range *range,
                  enum nf_nat_manip_type maniptype,
                  const struct nf_conn *ct)
 {
-       nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
-                                 &nf_sctp_port_rover);
+       nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct,
+                                   &nf_sctp_port_rover);
 }
 
 static bool
 sctp_manip_pkt(struct sk_buff *skb,
-              unsigned int iphdroff,
+              const struct nf_nat_l3proto *l3proto,
+              unsigned int iphdroff, unsigned int hdroff,
               const struct nf_conntrack_tuple *tuple,
               enum nf_nat_manip_type maniptype)
 {
-       const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
        struct sk_buff *frag;
        sctp_sctphdr_t *hdr;
-       unsigned int hdroff = iphdroff + iph->ihl*4;
-       __be32 oldip, newip;
        __be32 crc32;
 
        if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
                return false;
 
-       iph = (struct iphdr *)(skb->data + iphdroff);
        hdr = (struct sctphdr *)(skb->data + hdroff);
 
        if (maniptype == NF_NAT_MANIP_SRC) {
-               /* Get rid of src ip and src pt */
-               oldip = iph->saddr;
-               newip = tuple->src.u3.ip;
+               /* Get rid of src port */
                hdr->source = tuple->src.u.sctp.port;
        } else {
-               /* Get rid of dst ip and dst pt */
-               oldip = iph->daddr;
-               newip = tuple->dst.u3.ip;
+               /* Get rid of dst port */
                hdr->dest = tuple->dst.u.sctp.port;
        }
 
@@ -68,24 +61,38 @@ sctp_manip_pkt(struct sk_buff *skb,
        return true;
 }
 
-static const struct nf_nat_protocol nf_nat_protocol_sctp = {
-       .protonum               = IPPROTO_SCTP,
+static const struct nf_nat_l4proto nf_nat_l4proto_sctp = {
+       .l4proto                = IPPROTO_SCTP,
        .manip_pkt              = sctp_manip_pkt,
-       .in_range               = nf_nat_proto_in_range,
+       .in_range               = nf_nat_l4proto_in_range,
        .unique_tuple           = sctp_unique_tuple,
 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-       .nlattr_to_range        = nf_nat_proto_nlattr_to_range,
+       .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
 #endif
 };
 
 static int __init nf_nat_proto_sctp_init(void)
 {
-       return nf_nat_protocol_register(&nf_nat_protocol_sctp);
+       int err;
+
+       err = nf_nat_l4proto_register(NFPROTO_IPV4, &nf_nat_l4proto_sctp);
+       if (err < 0)
+               goto err1;
+       err = nf_nat_l4proto_register(NFPROTO_IPV6, &nf_nat_l4proto_sctp);
+       if (err < 0)
+               goto err2;
+       return 0;
+
+err2:
+       nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_sctp);
+err1:
+       return err;
 }
 
 static void __exit nf_nat_proto_sctp_exit(void)
 {
-       nf_nat_protocol_unregister(&nf_nat_protocol_sctp);
+       nf_nat_l4proto_unregister(NFPROTO_IPV6, &nf_nat_l4proto_sctp);
+       nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_sctp);
 }
 
 module_init(nf_nat_proto_sctp_init);
similarity index 65%
rename from net/ipv4/netfilter/nf_nat_proto_tcp.c
rename to net/netfilter/nf_nat_proto_tcp.c
index 9fb4b4e72bbfeea34f894eeacbac20da9e01a396..83ec8a6e4c36775f3041cedf31419d937ac077bb 100644 (file)
@@ -9,37 +9,36 @@
 #include <linux/types.h>
 #include <linux/init.h>
 #include <linux/export.h>
-#include <linux/ip.h>
 #include <linux/tcp.h>
 
 #include <linux/netfilter.h>
 #include <linux/netfilter/nfnetlink_conntrack.h>
 #include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_rule.h>
-#include <net/netfilter/nf_nat_protocol.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/netfilter/nf_nat_l4proto.h>
 #include <net/netfilter/nf_nat_core.h>
 
-static u_int16_t tcp_port_rover;
+static u16 tcp_port_rover;
 
 static void
-tcp_unique_tuple(struct nf_conntrack_tuple *tuple,
-                const struct nf_nat_ipv4_range *range,
+tcp_unique_tuple(const struct nf_nat_l3proto *l3proto,
+                struct nf_conntrack_tuple *tuple,
+                const struct nf_nat_range *range,
                 enum nf_nat_manip_type maniptype,
                 const struct nf_conn *ct)
 {
-       nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, &tcp_port_rover);
+       nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct,
+                                   &tcp_port_rover);
 }
 
 static bool
 tcp_manip_pkt(struct sk_buff *skb,
-             unsigned int iphdroff,
+             const struct nf_nat_l3proto *l3proto,
+             unsigned int iphdroff, unsigned int hdroff,
              const struct nf_conntrack_tuple *tuple,
              enum nf_nat_manip_type maniptype)
 {
-       const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
        struct tcphdr *hdr;
-       unsigned int hdroff = iphdroff + iph->ihl*4;
-       __be32 oldip, newip;
        __be16 *portptr, newport, oldport;
        int hdrsize = 8; /* TCP connection tracking guarantees this much */
 
@@ -52,19 +51,14 @@ tcp_manip_pkt(struct sk_buff *skb,
        if (!skb_make_writable(skb, hdroff + hdrsize))
                return false;
 
-       iph = (struct iphdr *)(skb->data + iphdroff);
        hdr = (struct tcphdr *)(skb->data + hdroff);
 
        if (maniptype == NF_NAT_MANIP_SRC) {
-               /* Get rid of src ip and src pt */
-               oldip = iph->saddr;
-               newip = tuple->src.u3.ip;
+               /* Get rid of src port */
                newport = tuple->src.u.tcp.port;
                portptr = &hdr->source;
        } else {
-               /* Get rid of dst ip and dst pt */
-               oldip = iph->daddr;
-               newip = tuple->dst.u3.ip;
+               /* Get rid of dst port */
                newport = tuple->dst.u.tcp.port;
                portptr = &hdr->dest;
        }
@@ -75,17 +69,17 @@ tcp_manip_pkt(struct sk_buff *skb,
        if (hdrsize < sizeof(*hdr))
                return true;
 
-       inet_proto_csum_replace4(&hdr->check, skb, oldip, newip, 1);
+       l3proto->csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
        inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, 0);
        return true;
 }
 
-const struct nf_nat_protocol nf_nat_protocol_tcp = {
-       .protonum               = IPPROTO_TCP,
+const struct nf_nat_l4proto nf_nat_l4proto_tcp = {
+       .l4proto                = IPPROTO_TCP,
        .manip_pkt              = tcp_manip_pkt,
-       .in_range               = nf_nat_proto_in_range,
+       .in_range               = nf_nat_l4proto_in_range,
        .unique_tuple           = tcp_unique_tuple,
 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-       .nlattr_to_range        = nf_nat_proto_nlattr_to_range,
+       .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
 #endif
 };
similarity index 60%
rename from net/ipv4/netfilter/nf_nat_proto_udp.c
rename to net/netfilter/nf_nat_proto_udp.c
index 9883336e628fd2174ec7b272ed55481a2a8b847d..7df613fb34a23d29af3b5d1775182ae38e86ded3 100644 (file)
@@ -9,59 +9,53 @@
 #include <linux/types.h>
 #include <linux/export.h>
 #include <linux/init.h>
-#include <linux/ip.h>
 #include <linux/udp.h>
 
 #include <linux/netfilter.h>
 #include <net/netfilter/nf_nat.h>
 #include <net/netfilter/nf_nat_core.h>
-#include <net/netfilter/nf_nat_rule.h>
-#include <net/netfilter/nf_nat_protocol.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/netfilter/nf_nat_l4proto.h>
 
-static u_int16_t udp_port_rover;
+static u16 udp_port_rover;
 
 static void
-udp_unique_tuple(struct nf_conntrack_tuple *tuple,
-                const struct nf_nat_ipv4_range *range,
+udp_unique_tuple(const struct nf_nat_l3proto *l3proto,
+                struct nf_conntrack_tuple *tuple,
+                const struct nf_nat_range *range,
                 enum nf_nat_manip_type maniptype,
                 const struct nf_conn *ct)
 {
-       nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, &udp_port_rover);
+       nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct,
+                                   &udp_port_rover);
 }
 
 static bool
 udp_manip_pkt(struct sk_buff *skb,
-             unsigned int iphdroff,
+             const struct nf_nat_l3proto *l3proto,
+             unsigned int iphdroff, unsigned int hdroff,
              const struct nf_conntrack_tuple *tuple,
              enum nf_nat_manip_type maniptype)
 {
-       const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
        struct udphdr *hdr;
-       unsigned int hdroff = iphdroff + iph->ihl*4;
-       __be32 oldip, newip;
        __be16 *portptr, newport;
 
        if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
                return false;
-
-       iph = (struct iphdr *)(skb->data + iphdroff);
        hdr = (struct udphdr *)(skb->data + hdroff);
 
        if (maniptype == NF_NAT_MANIP_SRC) {
-               /* Get rid of src ip and src pt */
-               oldip = iph->saddr;
-               newip = tuple->src.u3.ip;
+               /* Get rid of src port */
                newport = tuple->src.u.udp.port;
                portptr = &hdr->source;
        } else {
-               /* Get rid of dst ip and dst pt */
-               oldip = iph->daddr;
-               newip = tuple->dst.u3.ip;
+               /* Get rid of dst port */
                newport = tuple->dst.u.udp.port;
                portptr = &hdr->dest;
        }
        if (hdr->check || skb->ip_summed == CHECKSUM_PARTIAL) {
-               inet_proto_csum_replace4(&hdr->check, skb, oldip, newip, 1);
+               l3proto->csum_update(skb, iphdroff, &hdr->check,
+                                    tuple, maniptype);
                inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport,
                                         0);
                if (!hdr->check)
@@ -71,12 +65,12 @@ udp_manip_pkt(struct sk_buff *skb,
        return true;
 }
 
-const struct nf_nat_protocol nf_nat_protocol_udp = {
-       .protonum               = IPPROTO_UDP,
+const struct nf_nat_l4proto nf_nat_l4proto_udp = {
+       .l4proto                = IPPROTO_UDP,
        .manip_pkt              = udp_manip_pkt,
-       .in_range               = nf_nat_proto_in_range,
+       .in_range               = nf_nat_l4proto_in_range,
        .unique_tuple           = udp_unique_tuple,
 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-       .nlattr_to_range        = nf_nat_proto_nlattr_to_range,
+       .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
 #endif
 };
similarity index 58%
rename from net/ipv4/netfilter/nf_nat_proto_udplite.c
rename to net/netfilter/nf_nat_proto_udplite.c
index d24d10a7beb2ac50bb16bbe7968e8c7b567bbb34..776a0d1317b16df837b5017fc2020e99a222b167 100644 (file)
@@ -9,59 +9,53 @@
 
 #include <linux/types.h>
 #include <linux/init.h>
-#include <linux/ip.h>
 #include <linux/udp.h>
 
 #include <linux/netfilter.h>
 #include <linux/module.h>
 #include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_protocol.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/netfilter/nf_nat_l4proto.h>
 
-static u_int16_t udplite_port_rover;
+static u16 udplite_port_rover;
 
 static void
-udplite_unique_tuple(struct nf_conntrack_tuple *tuple,
-                    const struct nf_nat_ipv4_range *range,
+udplite_unique_tuple(const struct nf_nat_l3proto *l3proto,
+                    struct nf_conntrack_tuple *tuple,
+                    const struct nf_nat_range *range,
                     enum nf_nat_manip_type maniptype,
                     const struct nf_conn *ct)
 {
-       nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
-                                 &udplite_port_rover);
+       nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct,
+                                   &udplite_port_rover);
 }
 
 static bool
 udplite_manip_pkt(struct sk_buff *skb,
-                 unsigned int iphdroff,
+                 const struct nf_nat_l3proto *l3proto,
+                 unsigned int iphdroff, unsigned int hdroff,
                  const struct nf_conntrack_tuple *tuple,
                  enum nf_nat_manip_type maniptype)
 {
-       const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
        struct udphdr *hdr;
-       unsigned int hdroff = iphdroff + iph->ihl*4;
-       __be32 oldip, newip;
        __be16 *portptr, newport;
 
        if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
                return false;
 
-       iph = (struct iphdr *)(skb->data + iphdroff);
        hdr = (struct udphdr *)(skb->data + hdroff);
 
        if (maniptype == NF_NAT_MANIP_SRC) {
-               /* Get rid of src ip and src pt */
-               oldip = iph->saddr;
-               newip = tuple->src.u3.ip;
+               /* Get rid of source port */
                newport = tuple->src.u.udp.port;
                portptr = &hdr->source;
        } else {
-               /* Get rid of dst ip and dst pt */
-               oldip = iph->daddr;
-               newip = tuple->dst.u3.ip;
+               /* Get rid of dst port */
                newport = tuple->dst.u.udp.port;
                portptr = &hdr->dest;
        }
 
-       inet_proto_csum_replace4(&hdr->check, skb, oldip, newip, 1);
+       l3proto->csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
        inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, 0);
        if (!hdr->check)
                hdr->check = CSUM_MANGLED_0;
@@ -70,24 +64,38 @@ udplite_manip_pkt(struct sk_buff *skb,
        return true;
 }
 
-static const struct nf_nat_protocol nf_nat_protocol_udplite = {
-       .protonum               = IPPROTO_UDPLITE,
+static const struct nf_nat_l4proto nf_nat_l4proto_udplite = {
+       .l4proto                = IPPROTO_UDPLITE,
        .manip_pkt              = udplite_manip_pkt,
-       .in_range               = nf_nat_proto_in_range,
+       .in_range               = nf_nat_l4proto_in_range,
        .unique_tuple           = udplite_unique_tuple,
 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-       .nlattr_to_range        = nf_nat_proto_nlattr_to_range,
+       .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
 #endif
 };
 
 static int __init nf_nat_proto_udplite_init(void)
 {
-       return nf_nat_protocol_register(&nf_nat_protocol_udplite);
+       int err;
+
+       err = nf_nat_l4proto_register(NFPROTO_IPV4, &nf_nat_l4proto_udplite);
+       if (err < 0)
+               goto err1;
+       err = nf_nat_l4proto_register(NFPROTO_IPV6, &nf_nat_l4proto_udplite);
+       if (err < 0)
+               goto err2;
+       return 0;
+
+err2:
+       nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_udplite);
+err1:
+       return err;
 }
 
 static void __exit nf_nat_proto_udplite_fini(void)
 {
-       nf_nat_protocol_unregister(&nf_nat_protocol_udplite);
+       nf_nat_l4proto_unregister(NFPROTO_IPV6, &nf_nat_l4proto_udplite);
+       nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_udplite);
 }
 
 module_init(nf_nat_proto_udplite_init);
similarity index 76%
rename from net/ipv4/netfilter/nf_nat_proto_unknown.c
rename to net/netfilter/nf_nat_proto_unknown.c
index e0afe8112b1c20bccd18ad20dd1634d0e64f240d..6e494d5844128077dadc9f1cf1c44f2f039043f0 100644 (file)
@@ -15,8 +15,7 @@
 
 #include <linux/netfilter.h>
 #include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_rule.h>
-#include <net/netfilter/nf_nat_protocol.h>
+#include <net/netfilter/nf_nat_l4proto.h>
 
 static bool unknown_in_range(const struct nf_conntrack_tuple *tuple,
                             enum nf_nat_manip_type manip_type,
@@ -26,26 +25,29 @@ static bool unknown_in_range(const struct nf_conntrack_tuple *tuple,
        return true;
 }
 
-static void unknown_unique_tuple(struct nf_conntrack_tuple *tuple,
-                                const struct nf_nat_ipv4_range *range,
+static void unknown_unique_tuple(const struct nf_nat_l3proto *l3proto,
+                                struct nf_conntrack_tuple *tuple,
+                                const struct nf_nat_range *range,
                                 enum nf_nat_manip_type maniptype,
                                 const struct nf_conn *ct)
 {
        /* Sorry: we can't help you; if it's not unique, we can't frob
-          anything. */
+        * anything.
+        */
        return;
 }
 
 static bool
 unknown_manip_pkt(struct sk_buff *skb,
-                 unsigned int iphdroff,
+                 const struct nf_nat_l3proto *l3proto,
+                 unsigned int iphdroff, unsigned int hdroff,
                  const struct nf_conntrack_tuple *tuple,
                  enum nf_nat_manip_type maniptype)
 {
        return true;
 }
 
-const struct nf_nat_protocol nf_nat_unknown_protocol = {
+const struct nf_nat_l4proto nf_nat_l4proto_unknown = {
        .manip_pkt              = unknown_manip_pkt,
        .in_range               = unknown_in_range,
        .unique_tuple           = unknown_unique_tuple,
similarity index 62%
rename from net/ipv4/netfilter/nf_nat_sip.c
rename to net/netfilter/nf_nat_sip.c
index 9c87cde28ff831472cc7072a05526d753727d1fa..16303c752213119fa9f2c19adddc73bf191d9d68 100644 (file)
@@ -3,7 +3,7 @@
  * (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar>
  * based on RR's ip_nat_ftp.c and other modules.
  * (C) 2007 United Security Providers
- * (C) 2007, 2008 Patrick McHardy <kaber@trash.net>
+ * (C) 2007, 2008, 2011, 2012 Patrick McHardy <kaber@trash.net>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
 
 #include <linux/module.h>
 #include <linux/skbuff.h>
-#include <linux/ip.h>
-#include <net/ip.h>
+#include <linux/inet.h>
 #include <linux/udp.h>
 #include <linux/tcp.h>
 
 #include <net/netfilter/nf_nat.h>
 #include <net/netfilter/nf_nat_helper.h>
-#include <net/netfilter/nf_nat_rule.h>
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_expect.h>
 #include <linux/netfilter/nf_conntrack_sip.h>
@@ -30,7 +28,8 @@ MODULE_DESCRIPTION("SIP NAT helper");
 MODULE_ALIAS("ip_nat_sip");
 
 
-static unsigned int mangle_packet(struct sk_buff *skb, unsigned int dataoff,
+static unsigned int mangle_packet(struct sk_buff *skb, unsigned int protoff,
+                                 unsigned int dataoff,
                                  const char **dptr, unsigned int *datalen,
                                  unsigned int matchoff, unsigned int matchlen,
                                  const char *buffer, unsigned int buflen)
@@ -41,20 +40,20 @@ static unsigned int mangle_packet(struct sk_buff *skb, unsigned int dataoff,
        unsigned int baseoff;
 
        if (nf_ct_protonum(ct) == IPPROTO_TCP) {
-               th = (struct tcphdr *)(skb->data + ip_hdrlen(skb));
-               baseoff = ip_hdrlen(skb) + th->doff * 4;
+               th = (struct tcphdr *)(skb->data + protoff);
+               baseoff = protoff + th->doff * 4;
                matchoff += dataoff - baseoff;
 
                if (!__nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
-                                               matchoff, matchlen,
+                                               protoff, matchoff, matchlen,
                                                buffer, buflen, false))
                        return 0;
        } else {
-               baseoff = ip_hdrlen(skb) + sizeof(struct udphdr);
+               baseoff = protoff + sizeof(struct udphdr);
                matchoff += dataoff - baseoff;
 
                if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo,
-                                             matchoff, matchlen,
+                                             protoff, matchoff, matchlen,
                                              buffer, buflen))
                        return 0;
        }
@@ -65,7 +64,30 @@ static unsigned int mangle_packet(struct sk_buff *skb, unsigned int dataoff,
        return 1;
 }
 
-static int map_addr(struct sk_buff *skb, unsigned int dataoff,
+static int sip_sprintf_addr(const struct nf_conn *ct, char *buffer,
+                           const union nf_inet_addr *addr, bool delim)
+{
+       if (nf_ct_l3num(ct) == NFPROTO_IPV4)
+               return sprintf(buffer, "%pI4", &addr->ip);
+       else {
+               if (delim)
+                       return sprintf(buffer, "[%pI6c]", &addr->ip6);
+               else
+                       return sprintf(buffer, "%pI6c", &addr->ip6);
+       }
+}
+
+static int sip_sprintf_addr_port(const struct nf_conn *ct, char *buffer,
+                                const union nf_inet_addr *addr, u16 port)
+{
+       if (nf_ct_l3num(ct) == NFPROTO_IPV4)
+               return sprintf(buffer, "%pI4:%u", &addr->ip, port);
+       else
+               return sprintf(buffer, "[%pI6c]:%u", &addr->ip6, port);
+}
+
+static int map_addr(struct sk_buff *skb, unsigned int protoff,
+                   unsigned int dataoff,
                    const char **dptr, unsigned int *datalen,
                    unsigned int matchoff, unsigned int matchlen,
                    union nf_inet_addr *addr, __be16 port)
@@ -73,32 +95,32 @@ static int map_addr(struct sk_buff *skb, unsigned int dataoff,
        enum ip_conntrack_info ctinfo;
        struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
        enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-       char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
+       char buffer[INET6_ADDRSTRLEN + sizeof("[]:nnnnn")];
        unsigned int buflen;
-       __be32 newaddr;
+       union nf_inet_addr newaddr;
        __be16 newport;
 
-       if (ct->tuplehash[dir].tuple.src.u3.ip == addr->ip &&
+       if (nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, addr) &&
            ct->tuplehash[dir].tuple.src.u.udp.port == port) {
-               newaddr = ct->tuplehash[!dir].tuple.dst.u3.ip;
+               newaddr = ct->tuplehash[!dir].tuple.dst.u3;
                newport = ct->tuplehash[!dir].tuple.dst.u.udp.port;
-       } else if (ct->tuplehash[dir].tuple.dst.u3.ip == addr->ip &&
+       } else if (nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, addr) &&
                   ct->tuplehash[dir].tuple.dst.u.udp.port == port) {
-               newaddr = ct->tuplehash[!dir].tuple.src.u3.ip;
+               newaddr = ct->tuplehash[!dir].tuple.src.u3;
                newport = ct->tuplehash[!dir].tuple.src.u.udp.port;
        } else
                return 1;
 
-       if (newaddr == addr->ip && newport == port)
+       if (nf_inet_addr_cmp(&newaddr, addr) && newport == port)
                return 1;
 
-       buflen = sprintf(buffer, "%pI4:%u", &newaddr, ntohs(newport));
-
-       return mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen,
-                            buffer, buflen);
+       buflen = sip_sprintf_addr_port(ct, buffer, &newaddr, ntohs(newport));
+       return mangle_packet(skb, protoff, dataoff, dptr, datalen,
+                            matchoff, matchlen, buffer, buflen);
 }
 
-static int map_sip_addr(struct sk_buff *skb, unsigned int dataoff,
+static int map_sip_addr(struct sk_buff *skb, unsigned int protoff,
+                       unsigned int dataoff,
                        const char **dptr, unsigned int *datalen,
                        enum sip_header_types type)
 {
@@ -111,11 +133,12 @@ static int map_sip_addr(struct sk_buff *skb, unsigned int dataoff,
        if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, type, NULL,
                                    &matchoff, &matchlen, &addr, &port) <= 0)
                return 1;
-       return map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
-                       &addr, port);
+       return map_addr(skb, protoff, dataoff, dptr, datalen,
+                       matchoff, matchlen, &addr, port);
 }
 
-static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
+static unsigned int nf_nat_sip(struct sk_buff *skb, unsigned int protoff,
+                              unsigned int dataoff,
                               const char **dptr, unsigned int *datalen)
 {
        enum ip_conntrack_info ctinfo;
@@ -132,8 +155,8 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
                if (ct_sip_parse_request(ct, *dptr, *datalen,
                                         &matchoff, &matchlen,
                                         &addr, &port) > 0 &&
-                   !map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
-                             &addr, port))
+                   !map_addr(skb, protoff, dataoff, dptr, datalen,
+                             matchoff, matchlen, &addr, port))
                        return NF_DROP;
                request = 1;
        } else
@@ -149,23 +172,25 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
                                    hdr, NULL, &matchoff, &matchlen,
                                    &addr, &port) > 0) {
                unsigned int olen, matchend, poff, plen, buflen, n;
-               char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
+               char buffer[INET6_ADDRSTRLEN + sizeof("[]:nnnnn")];
 
                /* We're only interested in headers related to this
                 * connection */
                if (request) {
-                       if (addr.ip != ct->tuplehash[dir].tuple.src.u3.ip ||
+                       if (!nf_inet_addr_cmp(&addr,
+                                       &ct->tuplehash[dir].tuple.src.u3) ||
                            port != ct->tuplehash[dir].tuple.src.u.udp.port)
                                goto next;
                } else {
-                       if (addr.ip != ct->tuplehash[dir].tuple.dst.u3.ip ||
+                       if (!nf_inet_addr_cmp(&addr,
+                                       &ct->tuplehash[dir].tuple.dst.u3) ||
                            port != ct->tuplehash[dir].tuple.dst.u.udp.port)
                                goto next;
                }
 
                olen = *datalen;
-               if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
-                             &addr, port))
+               if (!map_addr(skb, protoff, dataoff, dptr, datalen,
+                             matchoff, matchlen, &addr, port))
                        return NF_DROP;
 
                matchend = matchoff + matchlen + *datalen - olen;
@@ -175,11 +200,12 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
                if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen,
                                               "maddr=", &poff, &plen,
                                               &addr, true) > 0 &&
-                   addr.ip == ct->tuplehash[dir].tuple.src.u3.ip &&
-                   addr.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) {
-                       buflen = sprintf(buffer, "%pI4",
-                                       &ct->tuplehash[!dir].tuple.dst.u3.ip);
-                       if (!mangle_packet(skb, dataoff, dptr, datalen,
+                   nf_inet_addr_cmp(&addr, &ct->tuplehash[dir].tuple.src.u3) &&
+                   !nf_inet_addr_cmp(&addr, &ct->tuplehash[!dir].tuple.dst.u3)) {
+                       buflen = sip_sprintf_addr(ct, buffer,
+                                       &ct->tuplehash[!dir].tuple.dst.u3,
+                                       true);
+                       if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
                                           poff, plen, buffer, buflen))
                                return NF_DROP;
                }
@@ -189,11 +215,12 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
                if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen,
                                               "received=", &poff, &plen,
                                               &addr, false) > 0 &&
-                   addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip &&
-                   addr.ip != ct->tuplehash[!dir].tuple.src.u3.ip) {
-                       buflen = sprintf(buffer, "%pI4",
-                                       &ct->tuplehash[!dir].tuple.src.u3.ip);
-                       if (!mangle_packet(skb, dataoff, dptr, datalen,
+                   nf_inet_addr_cmp(&addr, &ct->tuplehash[dir].tuple.dst.u3) &&
+                   !nf_inet_addr_cmp(&addr, &ct->tuplehash[!dir].tuple.src.u3)) {
+                       buflen = sip_sprintf_addr(ct, buffer,
+                                       &ct->tuplehash[!dir].tuple.src.u3,
+                                       false);
+                       if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
                                           poff, plen, buffer, buflen))
                                return NF_DROP;
                }
@@ -207,7 +234,7 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
                    htons(n) != ct->tuplehash[!dir].tuple.src.u.udp.port) {
                        __be16 p = ct->tuplehash[!dir].tuple.src.u.udp.port;
                        buflen = sprintf(buffer, "%u", ntohs(p));
-                       if (!mangle_packet(skb, dataoff, dptr, datalen,
+                       if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
                                           poff, plen, buffer, buflen))
                                return NF_DROP;
                }
@@ -221,19 +248,21 @@ next:
                                       SIP_HDR_CONTACT, &in_header,
                                       &matchoff, &matchlen,
                                       &addr, &port) > 0) {
-               if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
+               if (!map_addr(skb, protoff, dataoff, dptr, datalen,
+                             matchoff, matchlen,
                              &addr, port))
                        return NF_DROP;
        }
 
-       if (!map_sip_addr(skb, dataoff, dptr, datalen, SIP_HDR_FROM) ||
-           !map_sip_addr(skb, dataoff, dptr, datalen, SIP_HDR_TO))
+       if (!map_sip_addr(skb, protoff, dataoff, dptr, datalen, SIP_HDR_FROM) ||
+           !map_sip_addr(skb, protoff, dataoff, dptr, datalen, SIP_HDR_TO))
                return NF_DROP;
 
        return NF_ACCEPT;
 }
 
-static void ip_nat_sip_seq_adjust(struct sk_buff *skb, s16 off)
+static void nf_nat_sip_seq_adjust(struct sk_buff *skb, unsigned int protoff,
+                                 s16 off)
 {
        enum ip_conntrack_info ctinfo;
        struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
@@ -242,37 +271,38 @@ static void ip_nat_sip_seq_adjust(struct sk_buff *skb, s16 off)
        if (nf_ct_protonum(ct) != IPPROTO_TCP || off == 0)
                return;
 
-       th = (struct tcphdr *)(skb->data + ip_hdrlen(skb));
+       th = (struct tcphdr *)(skb->data + protoff);
        nf_nat_set_seq_adjust(ct, ctinfo, th->seq, off);
 }
 
 /* Handles expected signalling connections and media streams */
-static void ip_nat_sip_expected(struct nf_conn *ct,
+static void nf_nat_sip_expected(struct nf_conn *ct,
                                struct nf_conntrack_expect *exp)
 {
-       struct nf_nat_ipv4_range range;
+       struct nf_nat_range range;
 
        /* This must be a fresh one. */
        BUG_ON(ct->status & IPS_NAT_DONE_MASK);
 
        /* For DST manip, map port here to where it's expected. */
        range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
-       range.min = range.max = exp->saved_proto;
-       range.min_ip = range.max_ip = exp->saved_ip;
+       range.min_proto = range.max_proto = exp->saved_proto;
+       range.min_addr = range.max_addr = exp->saved_addr;
        nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
 
        /* Change src to where master sends to, but only if the connection
         * actually came from the same source. */
-       if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip ==
-           ct->master->tuplehash[exp->dir].tuple.src.u3.ip) {
+       if (nf_inet_addr_cmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3,
+                            &ct->master->tuplehash[exp->dir].tuple.src.u3)) {
                range.flags = NF_NAT_RANGE_MAP_IPS;
-               range.min_ip = range.max_ip
-                       = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
+               range.min_addr = range.max_addr
+                       = ct->master->tuplehash[!exp->dir].tuple.dst.u3;
                nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
        }
 }
 
-static unsigned int ip_nat_sip_expect(struct sk_buff *skb, unsigned int dataoff,
+static unsigned int nf_nat_sip_expect(struct sk_buff *skb, unsigned int protoff,
+                                     unsigned int dataoff,
                                      const char **dptr, unsigned int *datalen,
                                      struct nf_conntrack_expect *exp,
                                      unsigned int matchoff,
@@ -281,16 +311,17 @@ static unsigned int ip_nat_sip_expect(struct sk_buff *skb, unsigned int dataoff,
        enum ip_conntrack_info ctinfo;
        struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
        enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-       __be32 newip;
+       union nf_inet_addr newaddr;
        u_int16_t port;
-       char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
+       char buffer[INET6_ADDRSTRLEN + sizeof("[]:nnnnn")];
        unsigned int buflen;
 
        /* Connection will come from reply */
-       if (ct->tuplehash[dir].tuple.src.u3.ip == ct->tuplehash[!dir].tuple.dst.u3.ip)
-               newip = exp->tuple.dst.u3.ip;
+       if (nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3,
+                            &ct->tuplehash[!dir].tuple.dst.u3))
+               newaddr = exp->tuple.dst.u3;
        else
-               newip = ct->tuplehash[!dir].tuple.dst.u3.ip;
+               newaddr = ct->tuplehash[!dir].tuple.dst.u3;
 
        /* If the signalling port matches the connection's source port in the
         * original direction, try to use the destination port in the opposite
@@ -301,11 +332,11 @@ static unsigned int ip_nat_sip_expect(struct sk_buff *skb, unsigned int dataoff,
        else
                port = ntohs(exp->tuple.dst.u.udp.port);
 
-       exp->saved_ip = exp->tuple.dst.u3.ip;
-       exp->tuple.dst.u3.ip = newip;
+       exp->saved_addr = exp->tuple.dst.u3;
+       exp->tuple.dst.u3 = newaddr;
        exp->saved_proto.udp.port = exp->tuple.dst.u.udp.port;
        exp->dir = !dir;
-       exp->expectfn = ip_nat_sip_expected;
+       exp->expectfn = nf_nat_sip_expected;
 
        for (; port != 0; port++) {
                int ret;
@@ -323,10 +354,10 @@ static unsigned int ip_nat_sip_expect(struct sk_buff *skb, unsigned int dataoff,
        if (port == 0)
                return NF_DROP;
 
-       if (exp->tuple.dst.u3.ip != exp->saved_ip ||
+       if (!nf_inet_addr_cmp(&exp->tuple.dst.u3, &exp->saved_addr) ||
            exp->tuple.dst.u.udp.port != exp->saved_proto.udp.port) {
-               buflen = sprintf(buffer, "%pI4:%u", &newip, port);
-               if (!mangle_packet(skb, dataoff, dptr, datalen,
+               buflen = sip_sprintf_addr_port(ct, buffer, &newaddr, port);
+               if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
                                   matchoff, matchlen, buffer, buflen))
                        goto err;
        }
@@ -337,7 +368,8 @@ err:
        return NF_DROP;
 }
 
-static int mangle_content_len(struct sk_buff *skb, unsigned int dataoff,
+static int mangle_content_len(struct sk_buff *skb, unsigned int protoff,
+                             unsigned int dataoff,
                              const char **dptr, unsigned int *datalen)
 {
        enum ip_conntrack_info ctinfo;
@@ -359,11 +391,12 @@ static int mangle_content_len(struct sk_buff *skb, unsigned int dataoff,
                return 0;
 
        buflen = sprintf(buffer, "%u", c_len);
-       return mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen,
-                            buffer, buflen);
+       return mangle_packet(skb, protoff, dataoff, dptr, datalen,
+                            matchoff, matchlen, buffer, buflen);
 }
 
-static int mangle_sdp_packet(struct sk_buff *skb, unsigned int dataoff,
+static int mangle_sdp_packet(struct sk_buff *skb, unsigned int protoff,
+                            unsigned int dataoff,
                             const char **dptr, unsigned int *datalen,
                             unsigned int sdpoff,
                             enum sdp_header_types type,
@@ -377,29 +410,33 @@ static int mangle_sdp_packet(struct sk_buff *skb, unsigned int dataoff,
        if (ct_sip_get_sdp_header(ct, *dptr, sdpoff, *datalen, type, term,
                                  &matchoff, &matchlen) <= 0)
                return -ENOENT;
-       return mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen,
-                            buffer, buflen) ? 0 : -EINVAL;
+       return mangle_packet(skb, protoff, dataoff, dptr, datalen,
+                            matchoff, matchlen, buffer, buflen) ? 0 : -EINVAL;
 }
 
-static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, unsigned int dataoff,
+static unsigned int nf_nat_sdp_addr(struct sk_buff *skb, unsigned int protoff,
+                                   unsigned int dataoff,
                                    const char **dptr, unsigned int *datalen,
                                    unsigned int sdpoff,
                                    enum sdp_header_types type,
                                    enum sdp_header_types term,
                                    const union nf_inet_addr *addr)
 {
-       char buffer[sizeof("nnn.nnn.nnn.nnn")];
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+       char buffer[INET6_ADDRSTRLEN];
        unsigned int buflen;
 
-       buflen = sprintf(buffer, "%pI4", &addr->ip);
-       if (mangle_sdp_packet(skb, dataoff, dptr, datalen, sdpoff, type, term,
-                             buffer, buflen))
+       buflen = sip_sprintf_addr(ct, buffer, addr, false);
+       if (mangle_sdp_packet(skb, protoff, dataoff, dptr, datalen,
+                             sdpoff, type, term, buffer, buflen))
                return 0;
 
-       return mangle_content_len(skb, dataoff, dptr, datalen);
+       return mangle_content_len(skb, protoff, dataoff, dptr, datalen);
 }
 
-static unsigned int ip_nat_sdp_port(struct sk_buff *skb, unsigned int dataoff,
+static unsigned int nf_nat_sdp_port(struct sk_buff *skb, unsigned int protoff,
+                                   unsigned int dataoff,
                                    const char **dptr, unsigned int *datalen,
                                    unsigned int matchoff,
                                    unsigned int matchlen,
@@ -409,30 +446,32 @@ static unsigned int ip_nat_sdp_port(struct sk_buff *skb, unsigned int dataoff,
        unsigned int buflen;
 
        buflen = sprintf(buffer, "%u", port);
-       if (!mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen,
-                          buffer, buflen))
+       if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
+                          matchoff, matchlen, buffer, buflen))
                return 0;
 
-       return mangle_content_len(skb, dataoff, dptr, datalen);
+       return mangle_content_len(skb, protoff, dataoff, dptr, datalen);
 }
 
-static unsigned int ip_nat_sdp_session(struct sk_buff *skb, unsigned int dataoff,
+static unsigned int nf_nat_sdp_session(struct sk_buff *skb, unsigned int protoff,
+                                      unsigned int dataoff,
                                       const char **dptr, unsigned int *datalen,
                                       unsigned int sdpoff,
                                       const union nf_inet_addr *addr)
 {
-       char buffer[sizeof("nnn.nnn.nnn.nnn")];
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+       char buffer[INET6_ADDRSTRLEN];
        unsigned int buflen;
 
        /* Mangle session description owner and contact addresses */
-       buflen = sprintf(buffer, "%pI4", &addr->ip);
-       if (mangle_sdp_packet(skb, dataoff, dptr, datalen, sdpoff,
-                              SDP_HDR_OWNER_IP4, SDP_HDR_MEDIA,
-                              buffer, buflen))
+       buflen = sip_sprintf_addr(ct, buffer, addr, false);
+       if (mangle_sdp_packet(skb, protoff, dataoff, dptr, datalen, sdpoff,
+                             SDP_HDR_OWNER, SDP_HDR_MEDIA, buffer, buflen))
                return 0;
 
-       switch (mangle_sdp_packet(skb, dataoff, dptr, datalen, sdpoff,
-                                 SDP_HDR_CONNECTION_IP4, SDP_HDR_MEDIA,
+       switch (mangle_sdp_packet(skb, protoff, dataoff, dptr, datalen, sdpoff,
+                                 SDP_HDR_CONNECTION, SDP_HDR_MEDIA,
                                  buffer, buflen)) {
        case 0:
        /*
@@ -448,12 +487,13 @@ static unsigned int ip_nat_sdp_session(struct sk_buff *skb, unsigned int dataoff
                return 0;
        }
 
-       return mangle_content_len(skb, dataoff, dptr, datalen);
+       return mangle_content_len(skb, protoff, dataoff, dptr, datalen);
 }
 
 /* So, this packet has hit the connection tracking matching code.
    Mangle it, and change the expectation to match the new version. */
-static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff,
+static unsigned int nf_nat_sdp_media(struct sk_buff *skb, unsigned int protoff,
+                                    unsigned int dataoff,
                                     const char **dptr, unsigned int *datalen,
                                     struct nf_conntrack_expect *rtp_exp,
                                     struct nf_conntrack_expect *rtcp_exp,
@@ -467,23 +507,23 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff,
        u_int16_t port;
 
        /* Connection will come from reply */
-       if (ct->tuplehash[dir].tuple.src.u3.ip ==
-           ct->tuplehash[!dir].tuple.dst.u3.ip)
-               rtp_addr->ip = rtp_exp->tuple.dst.u3.ip;
+       if (nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3,
+                            &ct->tuplehash[!dir].tuple.dst.u3))
+               *rtp_addr = rtp_exp->tuple.dst.u3;
        else
-               rtp_addr->ip = ct->tuplehash[!dir].tuple.dst.u3.ip;
+               *rtp_addr = ct->tuplehash[!dir].tuple.dst.u3;
 
-       rtp_exp->saved_ip = rtp_exp->tuple.dst.u3.ip;
-       rtp_exp->tuple.dst.u3.ip = rtp_addr->ip;
+       rtp_exp->saved_addr = rtp_exp->tuple.dst.u3;
+       rtp_exp->tuple.dst.u3 = *rtp_addr;
        rtp_exp->saved_proto.udp.port = rtp_exp->tuple.dst.u.udp.port;
        rtp_exp->dir = !dir;
-       rtp_exp->expectfn = ip_nat_sip_expected;
+       rtp_exp->expectfn = nf_nat_sip_expected;
 
-       rtcp_exp->saved_ip = rtcp_exp->tuple.dst.u3.ip;
-       rtcp_exp->tuple.dst.u3.ip = rtp_addr->ip;
+       rtcp_exp->saved_addr = rtcp_exp->tuple.dst.u3;
+       rtcp_exp->tuple.dst.u3 = *rtp_addr;
        rtcp_exp->saved_proto.udp.port = rtcp_exp->tuple.dst.u.udp.port;
        rtcp_exp->dir = !dir;
-       rtcp_exp->expectfn = ip_nat_sip_expected;
+       rtcp_exp->expectfn = nf_nat_sip_expected;
 
        /* Try to get same pair of ports: if not, try to change them. */
        for (port = ntohs(rtp_exp->tuple.dst.u.udp.port);
@@ -517,7 +557,7 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff,
 
        /* Update media port. */
        if (rtp_exp->tuple.dst.u.udp.port != rtp_exp->saved_proto.udp.port &&
-           !ip_nat_sdp_port(skb, dataoff, dptr, datalen,
+           !nf_nat_sdp_port(skb, protoff, dataoff, dptr, datalen,
                             mediaoff, medialen, port))
                goto err2;
 
@@ -531,8 +571,8 @@ err1:
 }
 
 static struct nf_ct_helper_expectfn sip_nat = {
-        .name           = "sip",
-        .expectfn       = ip_nat_sip_expected,
+       .name           = "sip",
+       .expectfn       = nf_nat_sip_expected,
 };
 
 static void __exit nf_nat_sip_fini(void)
@@ -557,13 +597,13 @@ static int __init nf_nat_sip_init(void)
        BUG_ON(nf_nat_sdp_port_hook != NULL);
        BUG_ON(nf_nat_sdp_session_hook != NULL);
        BUG_ON(nf_nat_sdp_media_hook != NULL);
-       RCU_INIT_POINTER(nf_nat_sip_hook, ip_nat_sip);
-       RCU_INIT_POINTER(nf_nat_sip_seq_adjust_hook, ip_nat_sip_seq_adjust);
-       RCU_INIT_POINTER(nf_nat_sip_expect_hook, ip_nat_sip_expect);
-       RCU_INIT_POINTER(nf_nat_sdp_addr_hook, ip_nat_sdp_addr);
-       RCU_INIT_POINTER(nf_nat_sdp_port_hook, ip_nat_sdp_port);
-       RCU_INIT_POINTER(nf_nat_sdp_session_hook, ip_nat_sdp_session);
-       RCU_INIT_POINTER(nf_nat_sdp_media_hook, ip_nat_sdp_media);
+       RCU_INIT_POINTER(nf_nat_sip_hook, nf_nat_sip);
+       RCU_INIT_POINTER(nf_nat_sip_seq_adjust_hook, nf_nat_sip_seq_adjust);
+       RCU_INIT_POINTER(nf_nat_sip_expect_hook, nf_nat_sip_expect);
+       RCU_INIT_POINTER(nf_nat_sdp_addr_hook, nf_nat_sdp_addr);
+       RCU_INIT_POINTER(nf_nat_sdp_port_hook, nf_nat_sdp_port);
+       RCU_INIT_POINTER(nf_nat_sdp_session_hook, nf_nat_sdp_session);
+       RCU_INIT_POINTER(nf_nat_sdp_media_hook, nf_nat_sdp_media);
        nf_ct_helper_expectfn_register(&sip_nat);
        return 0;
 }
similarity index 97%
rename from net/ipv4/netfilter/nf_nat_tftp.c
rename to net/netfilter/nf_nat_tftp.c
index 9dbb8d284f992ac8e16cd2145eeedff5f15cf48d..ccabbda71a3e5ca0e893dc03bdd48816d4560503 100644 (file)
@@ -11,7 +11,6 @@
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_expect.h>
 #include <net/netfilter/nf_nat_helper.h>
-#include <net/netfilter/nf_nat_rule.h>
 #include <linux/netfilter/nf_conntrack_tftp.h>
 
 MODULE_AUTHOR("Magnus Boden <mb@ozaba.mine.nu>");
index ce60cf0f6c11a49d9d8bc8bf5c8918d10dbfb316..8d2cf9ec37a850951648640728ab1ad6d8f53e6d 100644 (file)
@@ -118,7 +118,7 @@ static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
  * through nf_reinject().
  */
 static int __nf_queue(struct sk_buff *skb,
-                     struct list_head *elem,
+                     struct nf_hook_ops *elem,
                      u_int8_t pf, unsigned int hook,
                      struct net_device *indev,
                      struct net_device *outdev,
@@ -155,7 +155,7 @@ static int __nf_queue(struct sk_buff *skb,
 
        *entry = (struct nf_queue_entry) {
                .skb    = skb,
-               .elem   = list_entry(elem, struct nf_hook_ops, list),
+               .elem   = elem,
                .pf     = pf,
                .hook   = hook,
                .indev  = indev,
@@ -225,7 +225,7 @@ static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
 #endif
 
 int nf_queue(struct sk_buff *skb,
-            struct list_head *elem,
+            struct nf_hook_ops *elem,
             u_int8_t pf, unsigned int hook,
             struct net_device *indev,
             struct net_device *outdev,
@@ -287,7 +287,7 @@ int nf_queue(struct sk_buff *skb,
 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
 {
        struct sk_buff *skb = entry->skb;
-       struct list_head *elem = &entry->elem->list;
+       struct nf_hook_ops *elem = entry->elem;
        const struct nf_afinfo *afinfo;
        int err;
 
@@ -297,7 +297,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
 
        /* Continue traversal iff userspace said ok... */
        if (verdict == NF_REPEAT) {
-               elem = elem->prev;
+               elem = list_entry(elem->list.prev, struct nf_hook_ops, list);
                verdict = NF_ACCEPT;
        }
 
index a26503342e7184737c419ddd36144dacebc59d20..ffb92c03a358a8ce64c9e824a09db72a823b76d8 100644 (file)
@@ -241,7 +241,7 @@ static int __net_init nfnetlink_net_init(struct net *net)
 #endif
        };
 
-       nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, THIS_MODULE, &cfg);
+       nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, &cfg);
        if (!nfnl)
                return -ENOMEM;
        net->nfnl_stash = nfnl;
index b2e7310ca0b8e05d9835c4898f9670993d5ebed3..589d686f0b4cbe0f25b785790dfeba9bf1a13d98 100644 (file)
@@ -79,11 +79,11 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
 
        if (tb[NFACCT_BYTES]) {
                atomic64_set(&nfacct->bytes,
-                            be64_to_cpu(nla_get_u64(tb[NFACCT_BYTES])));
+                            be64_to_cpu(nla_get_be64(tb[NFACCT_BYTES])));
        }
        if (tb[NFACCT_PKTS]) {
                atomic64_set(&nfacct->pkts,
-                            be64_to_cpu(nla_get_u64(tb[NFACCT_PKTS])));
+                            be64_to_cpu(nla_get_be64(tb[NFACCT_PKTS])));
        }
        atomic_set(&nfacct->refcnt, 1);
        list_add_tail_rcu(&nfacct->head, &nfnl_acct_list);
@@ -91,16 +91,16 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
 }
 
 static int
-nfnl_acct_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
+nfnl_acct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
                   int event, struct nf_acct *acct)
 {
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
-       unsigned int flags = pid ? NLM_F_MULTI : 0;
+       unsigned int flags = portid ? NLM_F_MULTI : 0;
        u64 pkts, bytes;
 
        event |= NFNL_SUBSYS_ACCT << 8;
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
        if (nlh == NULL)
                goto nlmsg_failure;
 
@@ -150,7 +150,7 @@ nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb)
                if (last && cur != last)
                        continue;
 
-               if (nfnl_acct_fill_info(skb, NETLINK_CB(cb->skb).pid,
+               if (nfnl_acct_fill_info(skb, NETLINK_CB(cb->skb).portid,
                                       cb->nlh->nlmsg_seq,
                                       NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
                                       NFNL_MSG_ACCT_NEW, cur) < 0) {
@@ -195,7 +195,7 @@ nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb,
                        break;
                }
 
-               ret = nfnl_acct_fill_info(skb2, NETLINK_CB(skb).pid,
+               ret = nfnl_acct_fill_info(skb2, NETLINK_CB(skb).portid,
                                         nlh->nlmsg_seq,
                                         NFNL_MSG_TYPE(nlh->nlmsg_type),
                                         NFNL_MSG_ACCT_NEW, cur);
@@ -203,7 +203,7 @@ nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb,
                        kfree_skb(skb2);
                        break;
                }
-               ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).pid,
+               ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
                                        MSG_DONTWAIT);
                if (ret > 0)
                        ret = 0;
index d6836193d479a00b155884a259a6d1f2f0245a4c..945950a8b1f11b50bccd440006b3f4afc1be2bee 100644 (file)
@@ -74,7 +74,7 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple,
        if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM])
                return -EINVAL;
 
-       tuple->src.l3num = ntohs(nla_get_u16(tb[NFCTH_TUPLE_L3PROTONUM]));
+       tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM]));
        tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]);
 
        return 0;
@@ -85,6 +85,9 @@ nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
 {
        const struct nf_conn_help *help = nfct_help(ct);
 
+       if (attr == NULL)
+               return -EINVAL;
+
        if (help->helper->data_len == 0)
                return -EINVAL;
 
@@ -395,16 +398,16 @@ nla_put_failure:
 }
 
 static int
-nfnl_cthelper_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
+nfnl_cthelper_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
                        int event, struct nf_conntrack_helper *helper)
 {
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
-       unsigned int flags = pid ? NLM_F_MULTI : 0;
+       unsigned int flags = portid ? NLM_F_MULTI : 0;
        int status;
 
        event |= NFNL_SUBSYS_CTHELPER << 8;
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
        if (nlh == NULL)
                goto nlmsg_failure;
 
@@ -468,7 +471,7 @@ restart:
                                cb->args[1] = 0;
                        }
                        if (nfnl_cthelper_fill_info(skb,
-                                           NETLINK_CB(cb->skb).pid,
+                                           NETLINK_CB(cb->skb).portid,
                                            cb->nlh->nlmsg_seq,
                                            NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
                                            NFNL_MSG_CTHELPER_NEW, cur) < 0) {
@@ -538,7 +541,7 @@ nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb,
                                break;
                        }
 
-                       ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).pid,
+                       ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid,
                                                nlh->nlmsg_seq,
                                                NFNL_MSG_TYPE(nlh->nlmsg_type),
                                                NFNL_MSG_CTHELPER_NEW, cur);
@@ -547,7 +550,7 @@ nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb,
                                break;
                        }
 
-                       ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).pid,
+                       ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
                                                MSG_DONTWAIT);
                        if (ret > 0)
                                ret = 0;
index cdecbc8fe965e9ed66216e1702fd1b43fe569091..8847b4d8be06b9ad536c2bb32d9bbd3d34a7c289 100644 (file)
@@ -155,16 +155,16 @@ err_proto_put:
 }
 
 static int
-ctnl_timeout_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
+ctnl_timeout_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
                       int event, struct ctnl_timeout *timeout)
 {
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
-       unsigned int flags = pid ? NLM_F_MULTI : 0;
+       unsigned int flags = portid ? NLM_F_MULTI : 0;
        struct nf_conntrack_l4proto *l4proto = timeout->l4proto;
 
        event |= NFNL_SUBSYS_CTNETLINK_TIMEOUT << 8;
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
        if (nlh == NULL)
                goto nlmsg_failure;
 
@@ -222,7 +222,7 @@ ctnl_timeout_dump(struct sk_buff *skb, struct netlink_callback *cb)
                if (last && cur != last)
                        continue;
 
-               if (ctnl_timeout_fill_info(skb, NETLINK_CB(cb->skb).pid,
+               if (ctnl_timeout_fill_info(skb, NETLINK_CB(cb->skb).portid,
                                           cb->nlh->nlmsg_seq,
                                           NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
                                           IPCTNL_MSG_TIMEOUT_NEW, cur) < 0) {
@@ -268,7 +268,7 @@ cttimeout_get_timeout(struct sock *ctnl, struct sk_buff *skb,
                        break;
                }
 
-               ret = ctnl_timeout_fill_info(skb2, NETLINK_CB(skb).pid,
+               ret = ctnl_timeout_fill_info(skb2, NETLINK_CB(skb).portid,
                                             nlh->nlmsg_seq,
                                             NFNL_MSG_TYPE(nlh->nlmsg_type),
                                             IPCTNL_MSG_TIMEOUT_NEW, cur);
@@ -276,7 +276,7 @@ cttimeout_get_timeout(struct sock *ctnl, struct sk_buff *skb,
                        kfree_skb(skb2);
                        break;
                }
-               ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid,
+               ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid,
                                        MSG_DONTWAIT);
                if (ret > 0)
                        ret = 0;
index 8cfc401e197e83092b12d4c9b6395889e4752239..9f199f2e31fae16ff667e7a1f230e6826d015794 100644 (file)
@@ -56,7 +56,7 @@ struct nfulnl_instance {
        struct sk_buff *skb;            /* pre-allocatd skb */
        struct timer_list timer;
        struct user_namespace *peer_user_ns;    /* User namespace of the peer process */
-       int peer_pid;                   /* PID of the peer process */
+       int peer_portid;                        /* PORTID of the peer process */
 
        /* configurable parameters */
        unsigned int flushtimeout;      /* timeout until queue flush */
@@ -133,7 +133,7 @@ instance_put(struct nfulnl_instance *inst)
 static void nfulnl_timer(unsigned long data);
 
 static struct nfulnl_instance *
-instance_create(u_int16_t group_num, int pid, struct user_namespace *user_ns)
+instance_create(u_int16_t group_num, int portid, struct user_namespace *user_ns)
 {
        struct nfulnl_instance *inst;
        int err;
@@ -164,7 +164,7 @@ instance_create(u_int16_t group_num, int pid, struct user_namespace *user_ns)
        setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst);
 
        inst->peer_user_ns = user_ns;
-       inst->peer_pid = pid;
+       inst->peer_portid = portid;
        inst->group_num = group_num;
 
        inst->qthreshold        = NFULNL_QTHRESH_DEFAULT;
@@ -336,7 +336,7 @@ __nfulnl_send(struct nfulnl_instance *inst)
                if (!nlh)
                        goto out;
        }
-       status = nfnetlink_unicast(inst->skb, &init_net, inst->peer_pid,
+       status = nfnetlink_unicast(inst->skb, &init_net, inst->peer_portid,
                                   MSG_DONTWAIT);
 
        inst->qlen = 0;
@@ -704,7 +704,7 @@ nfulnl_rcv_nl_event(struct notifier_block *this,
        if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
                int i;
 
-               /* destroy all instances for this pid */
+               /* destroy all instances for this portid */
                spin_lock_bh(&instances_lock);
                for  (i = 0; i < INSTANCE_BUCKETS; i++) {
                        struct hlist_node *tmp, *t2;
@@ -713,7 +713,7 @@ nfulnl_rcv_nl_event(struct notifier_block *this,
 
                        hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
                                if ((net_eq(n->net, &init_net)) &&
-                                   (n->pid == inst->peer_pid))
+                                   (n->portid == inst->peer_portid))
                                        __instance_destroy(inst);
                        }
                }
@@ -775,7 +775,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
        }
 
        inst = instance_lookup_get(group_num);
-       if (inst && inst->peer_pid != NETLINK_CB(skb).pid) {
+       if (inst && inst->peer_portid != NETLINK_CB(skb).portid) {
                ret = -EPERM;
                goto out_put;
        }
@@ -789,7 +789,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
                        }
 
                        inst = instance_create(group_num,
-                                              NETLINK_CB(skb).pid,
+                                              NETLINK_CB(skb).portid,
                                               sk_user_ns(NETLINK_CB(skb).ssk));
                        if (IS_ERR(inst)) {
                                ret = PTR_ERR(inst);
@@ -948,7 +948,7 @@ static int seq_show(struct seq_file *s, void *v)
 
        return seq_printf(s, "%5d %6d %5d %1d %5d %6d %2d\n",
                          inst->group_num,
-                         inst->peer_pid, inst->qlen,
+                         inst->peer_portid, inst->qlen,
                          inst->copy_mode, inst->copy_range,
                          inst->flushtimeout, atomic_read(&inst->use));
 }
index c0496a55ad0ceffb5470872cadc83c218a4b70c9..e12d44e75b21f79f266b3dc580fd283aa81b58f9 100644 (file)
@@ -44,7 +44,7 @@ struct nfqnl_instance {
        struct hlist_node hlist;                /* global list of queues */
        struct rcu_head rcu;
 
-       int peer_pid;
+       int peer_portid;
        unsigned int queue_maxlen;
        unsigned int copy_range;
        unsigned int queue_dropped;
@@ -92,7 +92,7 @@ instance_lookup(u_int16_t queue_num)
 }
 
 static struct nfqnl_instance *
-instance_create(u_int16_t queue_num, int pid)
+instance_create(u_int16_t queue_num, int portid)
 {
        struct nfqnl_instance *inst;
        unsigned int h;
@@ -111,7 +111,7 @@ instance_create(u_int16_t queue_num, int pid)
        }
 
        inst->queue_num = queue_num;
-       inst->peer_pid = pid;
+       inst->peer_portid = portid;
        inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
        inst->copy_range = 0xfffff;
        inst->copy_mode = NFQNL_COPY_NONE;
@@ -225,7 +225,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
 {
        sk_buff_data_t old_tail;
        size_t size;
-       size_t data_len = 0;
+       size_t data_len = 0, cap_len = 0;
        struct sk_buff *skb;
        struct nlattr *nla;
        struct nfqnl_msg_packet_hdr *pmsg;
@@ -247,7 +247,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
 #endif
                + nla_total_size(sizeof(u_int32_t))     /* mark */
                + nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
-               + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
+               + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp)
+               + nla_total_size(sizeof(u_int32_t)));   /* cap_len */
 
        outdev = entry->outdev;
 
@@ -266,6 +267,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
                        data_len = entskb->len;
 
                size += nla_total_size(data_len);
+               cap_len = entskb->len;
                break;
        }
 
@@ -402,12 +404,14 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
        if (ct && nfqnl_ct_put(skb, ct, ctinfo) < 0)
                goto nla_put_failure;
 
+       if (cap_len > 0 && nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len)))
+               goto nla_put_failure;
+
        nlh->nlmsg_len = skb->tail - old_tail;
        return skb;
 
 nla_put_failure:
-       if (skb)
-               kfree_skb(skb);
+       kfree_skb(skb);
        net_err_ratelimited("nf_queue: error creating packet message\n");
        return NULL;
 }
@@ -440,7 +444,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
        }
        spin_lock_bh(&queue->lock);
 
-       if (!queue->peer_pid) {
+       if (!queue->peer_portid) {
                err = -EINVAL;
                goto err_out_free_nskb;
        }
@@ -459,7 +463,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
        *packet_id_ptr = htonl(entry->id);
 
        /* nfnetlink_unicast will either free the nskb or add it to a socket */
-       err = nfnetlink_unicast(nskb, &init_net, queue->peer_pid, MSG_DONTWAIT);
+       err = nfnetlink_unicast(nskb, &init_net, queue->peer_portid, MSG_DONTWAIT);
        if (err < 0) {
                queue->queue_user_dropped++;
                goto err_out_unlock;
@@ -527,9 +531,13 @@ nfqnl_set_mode(struct nfqnl_instance *queue,
 
        case NFQNL_COPY_PACKET:
                queue->copy_mode = mode;
-               /* we're using struct nlattr which has 16bit nla_len */
-               if (range > 0xffff)
-                       queue->copy_range = 0xffff;
+               /* We're using struct nlattr which has 16bit nla_len. Note that
+                * nla_len includes the header length. Thus, the maximum packet
+                * length that we support is 65531 bytes. We send truncated
+                * packets if the specified length is larger than that.
+                */
+               if (range > 0xffff - NLA_HDRLEN)
+                       queue->copy_range = 0xffff - NLA_HDRLEN;
                else
                        queue->copy_range = range;
                break;
@@ -616,7 +624,7 @@ nfqnl_rcv_nl_event(struct notifier_block *this,
        if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
                int i;
 
-               /* destroy all instances for this pid */
+               /* destroy all instances for this portid */
                spin_lock(&instances_lock);
                for (i = 0; i < INSTANCE_BUCKETS; i++) {
                        struct hlist_node *tmp, *t2;
@@ -625,7 +633,7 @@ nfqnl_rcv_nl_event(struct notifier_block *this,
 
                        hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
                                if ((n->net == &init_net) &&
-                                   (n->pid == inst->peer_pid))
+                                   (n->portid == inst->peer_portid))
                                        __instance_destroy(inst);
                        }
                }
@@ -650,7 +658,7 @@ static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
        [NFQA_MARK]             = { .type = NLA_U32 },
 };
 
-static struct nfqnl_instance *verdict_instance_lookup(u16 queue_num, int nlpid)
+static struct nfqnl_instance *verdict_instance_lookup(u16 queue_num, int nlportid)
 {
        struct nfqnl_instance *queue;
 
@@ -658,7 +666,7 @@ static struct nfqnl_instance *verdict_instance_lookup(u16 queue_num, int nlpid)
        if (!queue)
                return ERR_PTR(-ENODEV);
 
-       if (queue->peer_pid != nlpid)
+       if (queue->peer_portid != nlportid)
                return ERR_PTR(-EPERM);
 
        return queue;
@@ -698,7 +706,7 @@ nfqnl_recv_verdict_batch(struct sock *ctnl, struct sk_buff *skb,
        LIST_HEAD(batch_list);
        u16 queue_num = ntohs(nfmsg->res_id);
 
-       queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).pid);
+       queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).portid);
        if (IS_ERR(queue))
                return PTR_ERR(queue);
 
@@ -749,7 +757,7 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
        queue = instance_lookup(queue_num);
        if (!queue)
 
-       queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).pid);
+       queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).portid);
        if (IS_ERR(queue))
                return PTR_ERR(queue);
 
@@ -832,7 +840,7 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
 
        rcu_read_lock();
        queue = instance_lookup(queue_num);
-       if (queue && queue->peer_pid != NETLINK_CB(skb).pid) {
+       if (queue && queue->peer_portid != NETLINK_CB(skb).portid) {
                ret = -EPERM;
                goto err_out_unlock;
        }
@@ -844,7 +852,7 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
                                ret = -EBUSY;
                                goto err_out_unlock;
                        }
-                       queue = instance_create(queue_num, NETLINK_CB(skb).pid);
+                       queue = instance_create(queue_num, NETLINK_CB(skb).portid);
                        if (IS_ERR(queue)) {
                                ret = PTR_ERR(queue);
                                goto err_out_unlock;
@@ -1016,7 +1024,7 @@ static int seq_show(struct seq_file *s, void *v)
 
        return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n",
                          inst->queue_num,
-                         inst->peer_pid, inst->queue_total,
+                         inst->peer_portid, inst->queue_total,
                          inst->copy_mode, inst->copy_range,
                          inst->queue_dropped, inst->queue_user_dropped,
                          inst->id_sequence, 1);
index 116018560c6028789835b92abf481747765b469b..16c712563860bad8b8ba03041b06cf2c386d43ab 100644 (file)
@@ -72,14 +72,44 @@ static u8 xt_ct_find_proto(const struct xt_tgchk_param *par)
                return 0;
 }
 
+static int
+xt_ct_set_helper(struct nf_conn *ct, const char *helper_name,
+                const struct xt_tgchk_param *par)
+{
+       struct nf_conntrack_helper *helper;
+       struct nf_conn_help *help;
+       u8 proto;
+
+       proto = xt_ct_find_proto(par);
+       if (!proto) {
+               pr_info("You must specify a L4 protocol, and not use "
+                       "inversions on it.\n");
+               return -ENOENT;
+       }
+
+       helper = nf_conntrack_helper_try_module_get(helper_name, par->family,
+                                                   proto);
+       if (helper == NULL) {
+               pr_info("No such helper \"%s\"\n", helper_name);
+               return -ENOENT;
+       }
+
+       help = nf_ct_helper_ext_add(ct, helper, GFP_KERNEL);
+       if (help == NULL) {
+               module_put(helper->me);
+               return -ENOMEM;
+       }
+
+       help->helper = helper;
+       return 0;
+}
+
 static int xt_ct_tg_check_v0(const struct xt_tgchk_param *par)
 {
        struct xt_ct_target_info *info = par->targinfo;
        struct nf_conntrack_tuple t;
-       struct nf_conn_help *help;
        struct nf_conn *ct;
-       int ret = 0;
-       u8 proto;
+       int ret;
 
        if (info->flags & ~XT_CT_NOTRACK)
                return -EINVAL;
@@ -112,31 +142,9 @@ static int xt_ct_tg_check_v0(const struct xt_tgchk_param *par)
                goto err3;
 
        if (info->helper[0]) {
-               struct nf_conntrack_helper *helper;
-
-               ret = -ENOENT;
-               proto = xt_ct_find_proto(par);
-               if (!proto) {
-                       pr_info("You must specify a L4 protocol, "
-                               "and not use inversions on it.\n");
-                       goto err3;
-               }
-
-               ret = -ENOENT;
-               helper = nf_conntrack_helper_try_module_get(info->helper,
-                                                           par->family,
-                                                           proto);
-               if (helper == NULL) {
-                       pr_info("No such helper \"%s\"\n", info->helper);
-                       goto err3;
-               }
-
-               ret = -ENOMEM;
-               help = nf_ct_helper_ext_add(ct, helper, GFP_KERNEL);
-               if (help == NULL)
+               ret = xt_ct_set_helper(ct, info->helper, par);
+               if (ret < 0)
                        goto err3;
-
-               help->helper = helper;
        }
 
        __set_bit(IPS_TEMPLATE_BIT, &ct->status);
@@ -164,17 +172,77 @@ static void __xt_ct_tg_timeout_put(struct ctnl_timeout *timeout)
 }
 #endif
 
+static int
+xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
+                 const char *timeout_name)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+       typeof(nf_ct_timeout_find_get_hook) timeout_find_get;
+       struct ctnl_timeout *timeout;
+       struct nf_conn_timeout *timeout_ext;
+       const struct ipt_entry *e = par->entryinfo;
+       struct nf_conntrack_l4proto *l4proto;
+       int ret = 0;
+
+       rcu_read_lock();
+       timeout_find_get = rcu_dereference(nf_ct_timeout_find_get_hook);
+       if (timeout_find_get == NULL) {
+               ret = -ENOENT;
+               pr_info("Timeout policy base is empty\n");
+               goto out;
+       }
+
+       if (e->ip.invflags & IPT_INV_PROTO) {
+               ret = -EINVAL;
+               pr_info("You cannot use inversion on L4 protocol\n");
+               goto out;
+       }
+
+       timeout = timeout_find_get(timeout_name);
+       if (timeout == NULL) {
+               ret = -ENOENT;
+               pr_info("No such timeout policy \"%s\"\n", timeout_name);
+               goto out;
+       }
+
+       if (timeout->l3num != par->family) {
+               ret = -EINVAL;
+               pr_info("Timeout policy `%s' can only be used by L3 protocol "
+                       "number %d\n", timeout_name, timeout->l3num);
+               goto err_put_timeout;
+       }
+       /* Make sure the timeout policy matches any existing protocol tracker,
+        * otherwise default to generic.
+        */
+       l4proto = __nf_ct_l4proto_find(par->family, e->ip.proto);
+       if (timeout->l4proto->l4proto != l4proto->l4proto) {
+               ret = -EINVAL;
+               pr_info("Timeout policy `%s' can only be used by L4 protocol "
+                       "number %d\n",
+                       timeout_name, timeout->l4proto->l4proto);
+               goto err_put_timeout;
+       }
+       timeout_ext = nf_ct_timeout_ext_add(ct, timeout, GFP_ATOMIC);
+       if (timeout_ext == NULL)
+               ret = -ENOMEM;
+
+err_put_timeout:
+       __xt_ct_tg_timeout_put(timeout);
+out:
+       rcu_read_unlock();
+       return ret;
+#else
+       return -EOPNOTSUPP;
+#endif
+}
+
 static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
 {
        struct xt_ct_target_info_v1 *info = par->targinfo;
        struct nf_conntrack_tuple t;
-       struct nf_conn_help *help;
        struct nf_conn *ct;
-       int ret = 0;
-       u8 proto;
-#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
-       struct ctnl_timeout *timeout;
-#endif
+       int ret;
+
        if (info->flags & ~XT_CT_NOTRACK)
                return -EINVAL;
 
@@ -206,93 +274,16 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
                goto err3;
 
        if (info->helper[0]) {
-               struct nf_conntrack_helper *helper;
-
-               ret = -ENOENT;
-               proto = xt_ct_find_proto(par);
-               if (!proto) {
-                       pr_info("You must specify a L4 protocol, "
-                               "and not use inversions on it.\n");
-                       goto err3;
-               }
-
-               ret = -ENOENT;
-               helper = nf_conntrack_helper_try_module_get(info->helper,
-                                                           par->family,
-                                                           proto);
-               if (helper == NULL) {
-                       pr_info("No such helper \"%s\"\n", info->helper);
-                       goto err3;
-               }
-
-               ret = -ENOMEM;
-               help = nf_ct_helper_ext_add(ct, helper, GFP_KERNEL);
-               if (help == NULL)
+               ret = xt_ct_set_helper(ct, info->helper, par);
+               if (ret < 0)
                        goto err3;
-
-               help->helper = helper;
        }
 
-#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        if (info->timeout[0]) {
-               typeof(nf_ct_timeout_find_get_hook) timeout_find_get;
-               struct nf_conn_timeout *timeout_ext;
-
-               rcu_read_lock();
-               timeout_find_get =
-                       rcu_dereference(nf_ct_timeout_find_get_hook);
-
-               if (timeout_find_get) {
-                       const struct ipt_entry *e = par->entryinfo;
-                       struct nf_conntrack_l4proto *l4proto;
-
-                       if (e->ip.invflags & IPT_INV_PROTO) {
-                               ret = -EINVAL;
-                               pr_info("You cannot use inversion on "
-                                        "L4 protocol\n");
-                               goto err4;
-                       }
-                       timeout = timeout_find_get(info->timeout);
-                       if (timeout == NULL) {
-                               ret = -ENOENT;
-                               pr_info("No such timeout policy \"%s\"\n",
-                                       info->timeout);
-                               goto err4;
-                       }
-                       if (timeout->l3num != par->family) {
-                               ret = -EINVAL;
-                               pr_info("Timeout policy `%s' can only be "
-                                       "used by L3 protocol number %d\n",
-                                       info->timeout, timeout->l3num);
-                               goto err5;
-                       }
-                       /* Make sure the timeout policy matches any existing
-                        * protocol tracker, otherwise default to generic.
-                        */
-                       l4proto = __nf_ct_l4proto_find(par->family,
-                                                      e->ip.proto);
-                       if (timeout->l4proto->l4proto != l4proto->l4proto) {
-                               ret = -EINVAL;
-                               pr_info("Timeout policy `%s' can only be "
-                                       "used by L4 protocol number %d\n",
-                                       info->timeout,
-                                       timeout->l4proto->l4proto);
-                               goto err5;
-                       }
-                       timeout_ext = nf_ct_timeout_ext_add(ct, timeout,
-                                                           GFP_ATOMIC);
-                       if (timeout_ext == NULL) {
-                               ret = -ENOMEM;
-                               goto err5;
-                       }
-               } else {
-                       ret = -ENOENT;
-                       pr_info("Timeout policy base is empty\n");
-                       goto err4;
-               }
-               rcu_read_unlock();
+               ret = xt_ct_set_timeout(ct, par, info->timeout);
+               if (ret < 0)
+                       goto err3;
        }
-#endif
 
        __set_bit(IPS_TEMPLATE_BIT, &ct->status);
        __set_bit(IPS_CONFIRMED_BIT, &ct->status);
@@ -300,12 +291,6 @@ out:
        info->ct = ct;
        return 0;
 
-#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
-err5:
-       __xt_ct_tg_timeout_put(timeout);
-err4:
-       rcu_read_unlock();
-#endif
 err3:
        nf_conntrack_free(ct);
 err2:
@@ -330,15 +315,30 @@ static void xt_ct_tg_destroy_v0(const struct xt_tgdtor_param *par)
        nf_ct_put(info->ct);
 }
 
-static void xt_ct_tg_destroy_v1(const struct xt_tgdtor_param *par)
+static void xt_ct_destroy_timeout(struct nf_conn *ct)
 {
-       struct xt_ct_target_info_v1 *info = par->targinfo;
-       struct nf_conn *ct = info->ct;
-       struct nf_conn_help *help;
 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        struct nf_conn_timeout *timeout_ext;
        typeof(nf_ct_timeout_put_hook) timeout_put;
+
+       rcu_read_lock();
+       timeout_put = rcu_dereference(nf_ct_timeout_put_hook);
+
+       if (timeout_put) {
+               timeout_ext = nf_ct_timeout_find(ct);
+               if (timeout_ext)
+                       timeout_put(timeout_ext->timeout);
+       }
+       rcu_read_unlock();
 #endif
+}
+
+static void xt_ct_tg_destroy_v1(const struct xt_tgdtor_param *par)
+{
+       struct xt_ct_target_info_v1 *info = par->targinfo;
+       struct nf_conn *ct = info->ct;
+       struct nf_conn_help *help;
+
        if (!nf_ct_is_untracked(ct)) {
                help = nfct_help(ct);
                if (help)
@@ -346,17 +346,7 @@ static void xt_ct_tg_destroy_v1(const struct xt_tgdtor_param *par)
 
                nf_ct_l3proto_module_put(par->family);
 
-#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
-               rcu_read_lock();
-               timeout_put = rcu_dereference(nf_ct_timeout_put_hook);
-
-               if (timeout_put) {
-                       timeout_ext = nf_ct_timeout_find(ct);
-                       if (timeout_ext)
-                               timeout_put(timeout_ext->timeout);
-               }
-               rcu_read_unlock();
-#endif
+               xt_ct_destroy_timeout(ct);
        }
        nf_ct_put(info->ct);
 }
diff --git a/net/netfilter/xt_NETMAP.c b/net/netfilter/xt_NETMAP.c
new file mode 100644 (file)
index 0000000..b253e07
--- /dev/null
@@ -0,0 +1,165 @@
+/*
+ * (C) 2000-2001 Svenning Soerensen <svenning@post5.tele.dk>
+ * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/ipv6.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter/x_tables.h>
+#include <net/netfilter/nf_nat.h>
+
+static unsigned int
+netmap_tg6(struct sk_buff *skb, const struct xt_action_param *par)
+{
+       const struct nf_nat_range *range = par->targinfo;
+       struct nf_nat_range newrange;
+       struct nf_conn *ct;
+       enum ip_conntrack_info ctinfo;
+       union nf_inet_addr new_addr, netmask;
+       unsigned int i;
+
+       ct = nf_ct_get(skb, &ctinfo);
+       for (i = 0; i < ARRAY_SIZE(range->min_addr.ip6); i++)
+               netmask.ip6[i] = ~(range->min_addr.ip6[i] ^
+                                  range->max_addr.ip6[i]);
+
+       if (par->hooknum == NF_INET_PRE_ROUTING ||
+           par->hooknum == NF_INET_LOCAL_OUT)
+               new_addr.in6 = ipv6_hdr(skb)->daddr;
+       else
+               new_addr.in6 = ipv6_hdr(skb)->saddr;
+
+       for (i = 0; i < ARRAY_SIZE(new_addr.ip6); i++) {
+               new_addr.ip6[i] &= ~netmask.ip6[i];
+               new_addr.ip6[i] |= range->min_addr.ip6[i] &
+                                  netmask.ip6[i];
+       }
+
+       newrange.flags  = range->flags | NF_NAT_RANGE_MAP_IPS;
+       newrange.min_addr       = new_addr;
+       newrange.max_addr       = new_addr;
+       newrange.min_proto      = range->min_proto;
+       newrange.max_proto      = range->max_proto;
+
+       return nf_nat_setup_info(ct, &newrange, HOOK2MANIP(par->hooknum));
+}
+
+static int netmap_tg6_checkentry(const struct xt_tgchk_param *par)
+{
+       const struct nf_nat_range *range = par->targinfo;
+
+       if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
+               return -EINVAL;
+       return 0;
+}
+
+static unsigned int
+netmap_tg4(struct sk_buff *skb, const struct xt_action_param *par)
+{
+       struct nf_conn *ct;
+       enum ip_conntrack_info ctinfo;
+       __be32 new_ip, netmask;
+       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
+       struct nf_nat_range newrange;
+
+       NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING ||
+                    par->hooknum == NF_INET_POST_ROUTING ||
+                    par->hooknum == NF_INET_LOCAL_OUT ||
+                    par->hooknum == NF_INET_LOCAL_IN);
+       ct = nf_ct_get(skb, &ctinfo);
+
+       netmask = ~(mr->range[0].min_ip ^ mr->range[0].max_ip);
+
+       if (par->hooknum == NF_INET_PRE_ROUTING ||
+           par->hooknum == NF_INET_LOCAL_OUT)
+               new_ip = ip_hdr(skb)->daddr & ~netmask;
+       else
+               new_ip = ip_hdr(skb)->saddr & ~netmask;
+       new_ip |= mr->range[0].min_ip & netmask;
+
+       memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
+       memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
+       newrange.flags       = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS;
+       newrange.min_addr.ip = new_ip;
+       newrange.max_addr.ip = new_ip;
+       newrange.min_proto   = mr->range[0].min;
+       newrange.max_proto   = mr->range[0].max;
+
+       /* Hand modified range to generic setup. */
+       return nf_nat_setup_info(ct, &newrange, HOOK2MANIP(par->hooknum));
+}
+
+static int netmap_tg4_check(const struct xt_tgchk_param *par)
+{
+       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
+
+       if (!(mr->range[0].flags & NF_NAT_RANGE_MAP_IPS)) {
+               pr_debug("bad MAP_IPS.\n");
+               return -EINVAL;
+       }
+       if (mr->rangesize != 1) {
+               pr_debug("bad rangesize %u.\n", mr->rangesize);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static struct xt_target netmap_tg_reg[] __read_mostly = {
+       {
+               .name       = "NETMAP",
+               .family     = NFPROTO_IPV6,
+               .revision   = 0,
+               .target     = netmap_tg6,
+               .targetsize = sizeof(struct nf_nat_range),
+               .table      = "nat",
+               .hooks      = (1 << NF_INET_PRE_ROUTING) |
+                             (1 << NF_INET_POST_ROUTING) |
+                             (1 << NF_INET_LOCAL_OUT) |
+                             (1 << NF_INET_LOCAL_IN),
+               .checkentry = netmap_tg6_checkentry,
+               .me         = THIS_MODULE,
+       },
+       {
+               .name       = "NETMAP",
+               .family     = NFPROTO_IPV4,
+               .revision   = 0,
+               .target     = netmap_tg4,
+               .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat),
+               .table      = "nat",
+               .hooks      = (1 << NF_INET_PRE_ROUTING) |
+                             (1 << NF_INET_POST_ROUTING) |
+                             (1 << NF_INET_LOCAL_OUT) |
+                             (1 << NF_INET_LOCAL_IN),
+               .checkentry = netmap_tg4_check,
+               .me         = THIS_MODULE,
+       },
+};
+
+static int __init netmap_tg_init(void)
+{
+       return xt_register_targets(netmap_tg_reg, ARRAY_SIZE(netmap_tg_reg));
+}
+
+static void netmap_tg_exit(void)
+{
+       xt_unregister_targets(netmap_tg_reg, ARRAY_SIZE(netmap_tg_reg));
+}
+
+module_init(netmap_tg_init);
+module_exit(netmap_tg_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Xtables: 1:1 NAT mapping of subnets");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS("ip6t_NETMAP");
+MODULE_ALIAS("ipt_NETMAP");
index 7babe7d687169d6231fa17149bfd10cb52ee7ebe..817f9e9f2b16c70930df5e51d62f5fd606e76f69 100644 (file)
@@ -43,7 +43,7 @@ static u32 hash_v4(const struct sk_buff *skb)
        const struct iphdr *iph = ip_hdr(skb);
 
        /* packets in either direction go into same queue */
-       if (iph->saddr < iph->daddr)
+       if ((__force u32)iph->saddr < (__force u32)iph->daddr)
                return jhash_3words((__force u32)iph->saddr,
                        (__force u32)iph->daddr, iph->protocol, jhash_initval);
 
@@ -57,7 +57,8 @@ static u32 hash_v6(const struct sk_buff *skb)
        const struct ipv6hdr *ip6h = ipv6_hdr(skb);
        u32 a, b, c;
 
-       if (ip6h->saddr.s6_addr32[3] < ip6h->daddr.s6_addr32[3]) {
+       if ((__force u32)ip6h->saddr.s6_addr32[3] <
+           (__force u32)ip6h->daddr.s6_addr32[3]) {
                a = (__force u32) ip6h->saddr.s6_addr32[3];
                b = (__force u32) ip6h->daddr.s6_addr32[3];
        } else {
@@ -65,7 +66,8 @@ static u32 hash_v6(const struct sk_buff *skb)
                a = (__force u32) ip6h->daddr.s6_addr32[3];
        }
 
-       if (ip6h->saddr.s6_addr32[1] < ip6h->daddr.s6_addr32[1])
+       if ((__force u32)ip6h->saddr.s6_addr32[1] <
+           (__force u32)ip6h->daddr.s6_addr32[1])
                c = (__force u32) ip6h->saddr.s6_addr32[1];
        else
                c = (__force u32) ip6h->daddr.s6_addr32[1];
diff --git a/net/netfilter/xt_NOTRACK.c b/net/netfilter/xt_NOTRACK.c
deleted file mode 100644 (file)
index 9d78218..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-/* This is a module which is used for setting up fake conntracks
- * on packets so that they are not seen by the conntrack/NAT code.
- */
-#include <linux/module.h>
-#include <linux/skbuff.h>
-
-#include <linux/netfilter/x_tables.h>
-#include <net/netfilter/nf_conntrack.h>
-
-MODULE_DESCRIPTION("Xtables: Disabling connection tracking for packets");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("ipt_NOTRACK");
-MODULE_ALIAS("ip6t_NOTRACK");
-
-static unsigned int
-notrack_tg(struct sk_buff *skb, const struct xt_action_param *par)
-{
-       /* Previously seen (loopback)? Ignore. */
-       if (skb->nfct != NULL)
-               return XT_CONTINUE;
-
-       /* Attach fake conntrack entry.
-          If there is a real ct entry correspondig to this packet,
-          it'll hang aroun till timing out. We don't deal with it
-          for performance reasons. JK */
-       skb->nfct = &nf_ct_untracked_get()->ct_general;
-       skb->nfctinfo = IP_CT_NEW;
-       nf_conntrack_get(skb->nfct);
-
-       return XT_CONTINUE;
-}
-
-static struct xt_target notrack_tg_reg __read_mostly = {
-       .name     = "NOTRACK",
-       .revision = 0,
-       .family   = NFPROTO_UNSPEC,
-       .target   = notrack_tg,
-       .table    = "raw",
-       .me       = THIS_MODULE,
-};
-
-static int __init notrack_tg_init(void)
-{
-       return xt_register_target(&notrack_tg_reg);
-}
-
-static void __exit notrack_tg_exit(void)
-{
-       xt_unregister_target(&notrack_tg_reg);
-}
-
-module_init(notrack_tg_init);
-module_exit(notrack_tg_exit);
diff --git a/net/netfilter/xt_REDIRECT.c b/net/netfilter/xt_REDIRECT.c
new file mode 100644 (file)
index 0000000..22a1030
--- /dev/null
@@ -0,0 +1,190 @@
+/*
+ * (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based on Rusty Russell's IPv4 REDIRECT target. Development of IPv6
+ * NAT funded by Astaro.
+ */
+
+#include <linux/if.h>
+#include <linux/inetdevice.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/netfilter.h>
+#include <linux/types.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter/x_tables.h>
+#include <net/addrconf.h>
+#include <net/checksum.h>
+#include <net/protocol.h>
+#include <net/netfilter/nf_nat.h>
+
+static const struct in6_addr loopback_addr = IN6ADDR_LOOPBACK_INIT;
+
+static unsigned int
+redirect_tg6(struct sk_buff *skb, const struct xt_action_param *par)
+{
+       const struct nf_nat_range *range = par->targinfo;
+       struct nf_nat_range newrange;
+       struct in6_addr newdst;
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct;
+
+       ct = nf_ct_get(skb, &ctinfo);
+       if (par->hooknum == NF_INET_LOCAL_OUT)
+               newdst = loopback_addr;
+       else {
+               struct inet6_dev *idev;
+               struct inet6_ifaddr *ifa;
+               bool addr = false;
+
+               rcu_read_lock();
+               idev = __in6_dev_get(skb->dev);
+               if (idev != NULL) {
+                       list_for_each_entry(ifa, &idev->addr_list, if_list) {
+                               newdst = ifa->addr;
+                               addr = true;
+                               break;
+                       }
+               }
+               rcu_read_unlock();
+
+               if (!addr)
+                       return NF_DROP;
+       }
+
+       newrange.flags          = range->flags | NF_NAT_RANGE_MAP_IPS;
+       newrange.min_addr.in6   = newdst;
+       newrange.max_addr.in6   = newdst;
+       newrange.min_proto      = range->min_proto;
+       newrange.max_proto      = range->max_proto;
+
+       return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
+}
+
+static int redirect_tg6_checkentry(const struct xt_tgchk_param *par)
+{
+       const struct nf_nat_range *range = par->targinfo;
+
+       if (range->flags & NF_NAT_RANGE_MAP_IPS)
+               return -EINVAL;
+       return 0;
+}
+
+/* FIXME: Take multiple ranges --RR */
+static int redirect_tg4_check(const struct xt_tgchk_param *par)
+{
+       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
+
+       if (mr->range[0].flags & NF_NAT_RANGE_MAP_IPS) {
+               pr_debug("bad MAP_IPS.\n");
+               return -EINVAL;
+       }
+       if (mr->rangesize != 1) {
+               pr_debug("bad rangesize %u.\n", mr->rangesize);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static unsigned int
+redirect_tg4(struct sk_buff *skb, const struct xt_action_param *par)
+{
+       struct nf_conn *ct;
+       enum ip_conntrack_info ctinfo;
+       __be32 newdst;
+       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
+       struct nf_nat_range newrange;
+
+       NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING ||
+                    par->hooknum == NF_INET_LOCAL_OUT);
+
+       ct = nf_ct_get(skb, &ctinfo);
+       NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED));
+
+       /* Local packets: make them go to loopback */
+       if (par->hooknum == NF_INET_LOCAL_OUT)
+               newdst = htonl(0x7F000001);
+       else {
+               struct in_device *indev;
+               struct in_ifaddr *ifa;
+
+               newdst = 0;
+
+               rcu_read_lock();
+               indev = __in_dev_get_rcu(skb->dev);
+               if (indev && (ifa = indev->ifa_list))
+                       newdst = ifa->ifa_local;
+               rcu_read_unlock();
+
+               if (!newdst)
+                       return NF_DROP;
+       }
+
+       /* Transfer from original range. */
+       memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
+       memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
+       newrange.flags       = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS;
+       newrange.min_addr.ip = newdst;
+       newrange.max_addr.ip = newdst;
+       newrange.min_proto   = mr->range[0].min;
+       newrange.max_proto   = mr->range[0].max;
+
+       /* Hand modified range to generic setup. */
+       return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
+}
+
+static struct xt_target redirect_tg_reg[] __read_mostly = {
+       {
+               .name       = "REDIRECT",
+               .family     = NFPROTO_IPV6,
+               .revision   = 0,
+               .table      = "nat",
+               .checkentry = redirect_tg6_checkentry,
+               .target     = redirect_tg6,
+               .targetsize = sizeof(struct nf_nat_range),
+               .hooks      = (1 << NF_INET_PRE_ROUTING) |
+                             (1 << NF_INET_LOCAL_OUT),
+               .me         = THIS_MODULE,
+       },
+       {
+               .name       = "REDIRECT",
+               .family     = NFPROTO_IPV4,
+               .revision   = 0,
+               .table      = "nat",
+               .target     = redirect_tg4,
+               .checkentry = redirect_tg4_check,
+               .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat),
+               .hooks      = (1 << NF_INET_PRE_ROUTING) |
+                             (1 << NF_INET_LOCAL_OUT),
+               .me         = THIS_MODULE,
+       },
+};
+
+static int __init redirect_tg_init(void)
+{
+       return xt_register_targets(redirect_tg_reg,
+                                  ARRAY_SIZE(redirect_tg_reg));
+}
+
+static void __exit redirect_tg_exit(void)
+{
+       xt_unregister_targets(redirect_tg_reg, ARRAY_SIZE(redirect_tg_reg));
+}
+
+module_init(redirect_tg_init);
+module_exit(redirect_tg_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("Xtables: Connection redirection to localhost");
+MODULE_ALIAS("ip6t_REDIRECT");
+MODULE_ALIAS("ipt_REDIRECT");
diff --git a/net/netfilter/xt_nat.c b/net/netfilter/xt_nat.c
new file mode 100644 (file)
index 0000000..81aafa8
--- /dev/null
@@ -0,0 +1,170 @@
+/*
+ * (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ * (C) 2011 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/x_tables.h>
+#include <net/netfilter/nf_nat_core.h>
+
+static int xt_nat_checkentry_v0(const struct xt_tgchk_param *par)
+{
+       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
+
+       if (mr->rangesize != 1) {
+               pr_info("%s: multiple ranges no longer supported\n",
+                       par->target->name);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void xt_nat_convert_range(struct nf_nat_range *dst,
+                                const struct nf_nat_ipv4_range *src)
+{
+       memset(&dst->min_addr, 0, sizeof(dst->min_addr));
+       memset(&dst->max_addr, 0, sizeof(dst->max_addr));
+
+       dst->flags       = src->flags;
+       dst->min_addr.ip = src->min_ip;
+       dst->max_addr.ip = src->max_ip;
+       dst->min_proto   = src->min;
+       dst->max_proto   = src->max;
+}
+
+static unsigned int
+xt_snat_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
+{
+       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
+       struct nf_nat_range range;
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct;
+
+       ct = nf_ct_get(skb, &ctinfo);
+       NF_CT_ASSERT(ct != NULL &&
+                    (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
+                     ctinfo == IP_CT_RELATED_REPLY));
+
+       xt_nat_convert_range(&range, &mr->range[0]);
+       return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
+}
+
+static unsigned int
+xt_dnat_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
+{
+       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
+       struct nf_nat_range range;
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct;
+
+       ct = nf_ct_get(skb, &ctinfo);
+       NF_CT_ASSERT(ct != NULL &&
+                    (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED));
+
+       xt_nat_convert_range(&range, &mr->range[0]);
+       return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
+}
+
+static unsigned int
+xt_snat_target_v1(struct sk_buff *skb, const struct xt_action_param *par)
+{
+       const struct nf_nat_range *range = par->targinfo;
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct;
+
+       ct = nf_ct_get(skb, &ctinfo);
+       NF_CT_ASSERT(ct != NULL &&
+                    (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
+                     ctinfo == IP_CT_RELATED_REPLY));
+
+       return nf_nat_setup_info(ct, range, NF_NAT_MANIP_SRC);
+}
+
+static unsigned int
+xt_dnat_target_v1(struct sk_buff *skb, const struct xt_action_param *par)
+{
+       const struct nf_nat_range *range = par->targinfo;
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct;
+
+       ct = nf_ct_get(skb, &ctinfo);
+       NF_CT_ASSERT(ct != NULL &&
+                    (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED));
+
+       return nf_nat_setup_info(ct, range, NF_NAT_MANIP_DST);
+}
+
+static struct xt_target xt_nat_target_reg[] __read_mostly = {
+       {
+               .name           = "SNAT",
+               .revision       = 0,
+               .checkentry     = xt_nat_checkentry_v0,
+               .target         = xt_snat_target_v0,
+               .targetsize     = sizeof(struct nf_nat_ipv4_multi_range_compat),
+               .family         = NFPROTO_IPV4,
+               .table          = "nat",
+               .hooks          = (1 << NF_INET_POST_ROUTING) |
+                                 (1 << NF_INET_LOCAL_OUT),
+               .me             = THIS_MODULE,
+       },
+       {
+               .name           = "DNAT",
+               .revision       = 0,
+               .checkentry     = xt_nat_checkentry_v0,
+               .target         = xt_dnat_target_v0,
+               .targetsize     = sizeof(struct nf_nat_ipv4_multi_range_compat),
+               .family         = NFPROTO_IPV4,
+               .table          = "nat",
+               .hooks          = (1 << NF_INET_PRE_ROUTING) |
+                                 (1 << NF_INET_LOCAL_IN),
+               .me             = THIS_MODULE,
+       },
+       {
+               .name           = "SNAT",
+               .revision       = 1,
+               .target         = xt_snat_target_v1,
+               .targetsize     = sizeof(struct nf_nat_range),
+               .table          = "nat",
+               .hooks          = (1 << NF_INET_POST_ROUTING) |
+                                 (1 << NF_INET_LOCAL_OUT),
+               .me             = THIS_MODULE,
+       },
+       {
+               .name           = "DNAT",
+               .revision       = 1,
+               .target         = xt_dnat_target_v1,
+               .targetsize     = sizeof(struct nf_nat_range),
+               .table          = "nat",
+               .hooks          = (1 << NF_INET_PRE_ROUTING) |
+                                 (1 << NF_INET_LOCAL_IN),
+               .me             = THIS_MODULE,
+       },
+};
+
+static int __init xt_nat_init(void)
+{
+       return xt_register_targets(xt_nat_target_reg,
+                                  ARRAY_SIZE(xt_nat_target_reg));
+}
+
+static void __exit xt_nat_exit(void)
+{
+       xt_unregister_targets(xt_nat_target_reg, ARRAY_SIZE(xt_nat_target_reg));
+}
+
+module_init(xt_nat_init);
+module_exit(xt_nat_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS("ipt_SNAT");
+MODULE_ALIAS("ipt_DNAT");
+MODULE_ALIAS("ip6t_SNAT");
+MODULE_ALIAS("ip6t_DNAT");
index 846f895cb656ddf48989d28900c55b2bae9f475e..a5e673d32bdaec2dec9fb7fce26905fff1459b77 100644 (file)
@@ -269,7 +269,7 @@ xt_osf_match_packet(const struct sk_buff *skb, struct xt_action_param *p)
                                                mss <<= 8;
                                                mss |= optp[2];
 
-                                               mss = ntohs(mss);
+                                               mss = ntohs((__force __be16)mss);
                                                break;
                                        case OSFOPT_TS:
                                                loop_cont = 1;
index c6f7db720d84f4650e975a952054c04d15868fc3..865a9e54f3ad85477e8b3d5dc481f1ea444fe978 100644 (file)
@@ -356,6 +356,27 @@ static struct xt_match set_matches[] __read_mostly = {
                .destroy        = set_match_v1_destroy,
                .me             = THIS_MODULE
        },
+       /* --return-nomatch flag support */
+       {
+               .name           = "set",
+               .family         = NFPROTO_IPV4,
+               .revision       = 2,
+               .match          = set_match_v1,
+               .matchsize      = sizeof(struct xt_set_info_match_v1),
+               .checkentry     = set_match_v1_checkentry,
+               .destroy        = set_match_v1_destroy,
+               .me             = THIS_MODULE
+       },
+       {
+               .name           = "set",
+               .family         = NFPROTO_IPV6,
+               .revision       = 2,
+               .match          = set_match_v1,
+               .matchsize      = sizeof(struct xt_set_info_match_v1),
+               .checkentry     = set_match_v1_checkentry,
+               .destroy        = set_match_v1_destroy,
+               .me             = THIS_MODULE
+       },
 };
 
 static struct xt_target set_targets[] __read_mostly = {
@@ -389,6 +410,7 @@ static struct xt_target set_targets[] __read_mostly = {
                .destroy        = set_target_v1_destroy,
                .me             = THIS_MODULE
        },
+       /* --timeout and --exist flags support */
        {
                .name           = "SET",
                .revision       = 2,
index 9ea482d08cf7e53d8e8b398fd449ca75f05f8538..63b2bdb59e955fd012de3787f9e52279169fe4bc 100644 (file)
@@ -108,9 +108,9 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
        const struct iphdr *iph = ip_hdr(skb);
        struct udphdr _hdr, *hp = NULL;
        struct sock *sk;
-       __be32 daddr, saddr;
-       __be16 dport, sport;
-       u8 protocol;
+       __be32 uninitialized_var(daddr), uninitialized_var(saddr);
+       __be16 uninitialized_var(dport), uninitialized_var(sport);
+       u8 uninitialized_var(protocol);
 #ifdef XT_SOCKET_HAVE_CONNTRACK
        struct nf_conn const *ct;
        enum ip_conntrack_info ctinfo;
@@ -261,9 +261,9 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
        struct ipv6hdr *iph = ipv6_hdr(skb);
        struct udphdr _hdr, *hp = NULL;
        struct sock *sk;
-       struct in6_addr *daddr, *saddr;
-       __be16 dport, sport;
-       int thoff = 0, tproto;
+       struct in6_addr *daddr = NULL, *saddr = NULL;
+       __be16 uninitialized_var(dport), uninitialized_var(sport);
+       int thoff = 0, uninitialized_var(tproto);
        const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
 
        tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL);
index c48975ff8ea27c4d1e7d99ae60f67f1da94d8003..0ae55a36f492902ef19753c336160b453ef0cd34 100644 (file)
@@ -42,6 +42,7 @@ static const u_int16_t days_since_leapyear[] = {
  */
 enum {
        DSE_FIRST = 2039,
+       SECONDS_PER_DAY = 86400,
 };
 static const u_int16_t days_since_epoch[] = {
        /* 2039 - 2030 */
@@ -78,7 +79,7 @@ static inline unsigned int localtime_1(struct xtm *r, time_t time)
        unsigned int v, w;
 
        /* Each day has 86400s, so finding the hour/minute is actually easy. */
-       v         = time % 86400;
+       v         = time % SECONDS_PER_DAY;
        r->second = v % 60;
        w         = v / 60;
        r->minute = w % 60;
@@ -199,6 +200,18 @@ time_mt(const struct sk_buff *skb, struct xt_action_param *par)
                if (packet_time < info->daytime_start &&
                    packet_time > info->daytime_stop)
                        return false;
+
+               /** if user asked to ignore 'next day', then e.g.
+                *  '1 PM Wed, August 1st' should be treated
+                *  like 'Tue 1 PM July 31st'.
+                *
+                * This also causes
+                * 'Monday, "23:00 to 01:00", to match for 2 hours, starting
+                * Monday 23:00 to Tuesday 01:00.
+                */
+               if ((info->flags & XT_TIME_CONTIGUOUS) &&
+                    packet_time <= info->daytime_stop)
+                       stamp -= SECONDS_PER_DAY;
        }
 
        localtime_2(&current_time, stamp);
@@ -227,6 +240,15 @@ static int time_mt_check(const struct xt_mtchk_param *par)
                return -EDOM;
        }
 
+       if (info->flags & ~XT_TIME_ALL_FLAGS) {
+               pr_info("unknown flags 0x%x\n", info->flags & ~XT_TIME_ALL_FLAGS);
+               return -EINVAL;
+       }
+
+       if ((info->flags & XT_TIME_CONTIGUOUS) &&
+            info->daytime_start < info->daytime_stop)
+               return -EINVAL;
+
        return 0;
 }
 
index 6bf878335d9436d40b1619c37c8918f3bd12beb0..c15042f987bd8f5697338b88cc77ed6553a0d609 100644 (file)
@@ -627,7 +627,7 @@ static int netlbl_cipsov4_listall_cb(struct cipso_v4_doi *doi_def, void *arg)
        struct netlbl_cipsov4_doiwalk_arg *cb_arg = arg;
        void *data;
 
-       data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).pid,
+       data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).portid,
                           cb_arg->seq, &netlbl_cipsov4_gnl_family,
                           NLM_F_MULTI, NLBL_CIPSOV4_C_LISTALL);
        if (data == NULL)
index 4809e2e48b02542931d436188680f9663a6dd699..c5384ffc61469a422f1e5f4a9519131d95b56236 100644 (file)
@@ -448,7 +448,7 @@ static int netlbl_mgmt_listall_cb(struct netlbl_dom_map *entry, void *arg)
        struct netlbl_domhsh_walk_arg *cb_arg = arg;
        void *data;
 
-       data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).pid,
+       data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).portid,
                           cb_arg->seq, &netlbl_mgmt_gnl_family,
                           NLM_F_MULTI, NLBL_MGMT_C_LISTALL);
        if (data == NULL)
@@ -613,7 +613,7 @@ static int netlbl_mgmt_protocols_cb(struct sk_buff *skb,
        int ret_val = -ENOMEM;
        void *data;
 
-       data = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
+       data = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
                           &netlbl_mgmt_gnl_family, NLM_F_MULTI,
                           NLBL_MGMT_C_PROTOCOLS);
        if (data == NULL)
index 729a345c75a4347babec4c0c94aa64c900372abc..847d495cd4de0bee797f994fcaf491ed6efb30ce 100644 (file)
@@ -1096,7 +1096,7 @@ static int netlbl_unlabel_staticlist_gen(u32 cmd,
        char *secctx;
        u32 secctx_len;
 
-       data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).pid,
+       data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).portid,
                           cb_arg->seq, &netlbl_unlabel_gnl_family,
                           NLM_F_MULTI, cmd);
        if (data == NULL)
index 3821199171660f3a16a94cab75453aee52d9c89c..0f2e3ad69c473afb5f7fcdbc1cbb05b5c0be46f7 100644 (file)
@@ -67,8 +67,8 @@
 struct netlink_sock {
        /* struct sock has to be the first member of netlink_sock */
        struct sock             sk;
-       u32                     pid;
-       u32                     dst_pid;
+       u32                     portid;
+       u32                     dst_portid;
        u32                     dst_group;
        u32                     flags;
        u32                     subscriptions;
@@ -104,7 +104,7 @@ static inline int netlink_is_kernel(struct sock *sk)
        return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
 }
 
-struct nl_pid_hash {
+struct nl_portid_hash {
        struct hlist_head       *table;
        unsigned long           rehash_time;
 
@@ -118,10 +118,10 @@ struct nl_pid_hash {
 };
 
 struct netlink_table {
-       struct nl_pid_hash      hash;
+       struct nl_portid_hash   hash;
        struct hlist_head       mc_list;
        struct listeners __rcu  *listeners;
-       unsigned int            nl_nonroot;
+       unsigned int            flags;
        unsigned int            groups;
        struct mutex            *cb_mutex;
        struct module           *module;
@@ -145,9 +145,9 @@ static inline u32 netlink_group_mask(u32 group)
        return group ? 1 << (group - 1) : 0;
 }
 
-static inline struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
+static inline struct hlist_head *nl_portid_hashfn(struct nl_portid_hash *hash, u32 portid)
 {
-       return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
+       return &hash->table[jhash_1word(portid, hash->rnd) & hash->mask];
 }
 
 static void netlink_destroy_callback(struct netlink_callback *cb)
@@ -239,17 +239,17 @@ netlink_unlock_table(void)
                wake_up(&nl_table_wait);
 }
 
-static struct sock *netlink_lookup(struct net *net, int protocol, u32 pid)
+static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
 {
-       struct nl_pid_hash *hash = &nl_table[protocol].hash;
+       struct nl_portid_hash *hash = &nl_table[protocol].hash;
        struct hlist_head *head;
        struct sock *sk;
        struct hlist_node *node;
 
        read_lock(&nl_table_lock);
-       head = nl_pid_hashfn(hash, pid);
+       head = nl_portid_hashfn(hash, portid);
        sk_for_each(sk, node, head) {
-               if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->pid == pid)) {
+               if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->portid == portid)) {
                        sock_hold(sk);
                        goto found;
                }
@@ -260,7 +260,7 @@ found:
        return sk;
 }
 
-static struct hlist_head *nl_pid_hash_zalloc(size_t size)
+static struct hlist_head *nl_portid_hash_zalloc(size_t size)
 {
        if (size <= PAGE_SIZE)
                return kzalloc(size, GFP_ATOMIC);
@@ -270,7 +270,7 @@ static struct hlist_head *nl_pid_hash_zalloc(size_t size)
                                         get_order(size));
 }
 
-static void nl_pid_hash_free(struct hlist_head *table, size_t size)
+static void nl_portid_hash_free(struct hlist_head *table, size_t size)
 {
        if (size <= PAGE_SIZE)
                kfree(table);
@@ -278,7 +278,7 @@ static void nl_pid_hash_free(struct hlist_head *table, size_t size)
                free_pages((unsigned long)table, get_order(size));
 }
 
-static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
+static int nl_portid_hash_rehash(struct nl_portid_hash *hash, int grow)
 {
        unsigned int omask, mask, shift;
        size_t osize, size;
@@ -296,7 +296,7 @@ static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
                size *= 2;
        }
 
-       table = nl_pid_hash_zalloc(size);
+       table = nl_portid_hash_zalloc(size);
        if (!table)
                return 0;
 
@@ -311,23 +311,23 @@ static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
                struct hlist_node *node, *tmp;
 
                sk_for_each_safe(sk, node, tmp, &otable[i])
-                       __sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid));
+                       __sk_add_node(sk, nl_portid_hashfn(hash, nlk_sk(sk)->portid));
        }
 
-       nl_pid_hash_free(otable, osize);
+       nl_portid_hash_free(otable, osize);
        hash->rehash_time = jiffies + 10 * 60 * HZ;
        return 1;
 }
 
-static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len)
+static inline int nl_portid_hash_dilute(struct nl_portid_hash *hash, int len)
 {
        int avg = hash->entries >> hash->shift;
 
-       if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1))
+       if (unlikely(avg > 1) && nl_portid_hash_rehash(hash, 1))
                return 1;
 
        if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
-               nl_pid_hash_rehash(hash, 0);
+               nl_portid_hash_rehash(hash, 0);
                return 1;
        }
 
@@ -356,9 +356,9 @@ netlink_update_listeners(struct sock *sk)
         * makes sure updates are visible before bind or setsockopt return. */
 }
 
-static int netlink_insert(struct sock *sk, struct net *net, u32 pid)
+static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
 {
-       struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
+       struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash;
        struct hlist_head *head;
        int err = -EADDRINUSE;
        struct sock *osk;
@@ -366,10 +366,10 @@ static int netlink_insert(struct sock *sk, struct net *net, u32 pid)
        int len;
 
        netlink_table_grab();
-       head = nl_pid_hashfn(hash, pid);
+       head = nl_portid_hashfn(hash, portid);
        len = 0;
        sk_for_each(osk, node, head) {
-               if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->pid == pid))
+               if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->portid == portid))
                        break;
                len++;
        }
@@ -377,17 +377,17 @@ static int netlink_insert(struct sock *sk, struct net *net, u32 pid)
                goto err;
 
        err = -EBUSY;
-       if (nlk_sk(sk)->pid)
+       if (nlk_sk(sk)->portid)
                goto err;
 
        err = -ENOMEM;
        if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
                goto err;
 
-       if (len && nl_pid_hash_dilute(hash, len))
-               head = nl_pid_hashfn(hash, pid);
+       if (len && nl_portid_hash_dilute(hash, len))
+               head = nl_portid_hashfn(hash, portid);
        hash->entries++;
-       nlk_sk(sk)->pid = pid;
+       nlk_sk(sk)->portid = portid;
        sk_add_node(sk, head);
        err = 0;
 
@@ -518,11 +518,11 @@ static int netlink_release(struct socket *sock)
 
        skb_queue_purge(&sk->sk_write_queue);
 
-       if (nlk->pid) {
+       if (nlk->portid) {
                struct netlink_notify n = {
                                                .net = sock_net(sk),
                                                .protocol = sk->sk_protocol,
-                                               .pid = nlk->pid,
+                                               .portid = nlk->portid,
                                          };
                atomic_notifier_call_chain(&netlink_chain,
                                NETLINK_URELEASE, &n);
@@ -536,6 +536,8 @@ static int netlink_release(struct socket *sock)
                if (--nl_table[sk->sk_protocol].registered == 0) {
                        kfree(nl_table[sk->sk_protocol].listeners);
                        nl_table[sk->sk_protocol].module = NULL;
+                       nl_table[sk->sk_protocol].bind = NULL;
+                       nl_table[sk->sk_protocol].flags = 0;
                        nl_table[sk->sk_protocol].registered = 0;
                }
        } else if (nlk->subscriptions) {
@@ -557,24 +559,24 @@ static int netlink_autobind(struct socket *sock)
 {
        struct sock *sk = sock->sk;
        struct net *net = sock_net(sk);
-       struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
+       struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash;
        struct hlist_head *head;
        struct sock *osk;
        struct hlist_node *node;
-       s32 pid = task_tgid_vnr(current);
+       s32 portid = task_tgid_vnr(current);
        int err;
        static s32 rover = -4097;
 
 retry:
        cond_resched();
        netlink_table_grab();
-       head = nl_pid_hashfn(hash, pid);
+       head = nl_portid_hashfn(hash, portid);
        sk_for_each(osk, node, head) {
                if (!net_eq(sock_net(osk), net))
                        continue;
-               if (nlk_sk(osk)->pid == pid) {
-                       /* Bind collision, search negative pid values. */
-                       pid = rover--;
+               if (nlk_sk(osk)->portid == portid) {
+                       /* Bind collision, search negative portid values. */
+                       portid = rover--;
                        if (rover > -4097)
                                rover = -4097;
                        netlink_table_ungrab();
@@ -583,7 +585,7 @@ retry:
        }
        netlink_table_ungrab();
 
-       err = netlink_insert(sk, net, pid);
+       err = netlink_insert(sk, net, portid);
        if (err == -EADDRINUSE)
                goto retry;
 
@@ -596,7 +598,7 @@ retry:
 
 static inline int netlink_capable(const struct socket *sock, unsigned int flag)
 {
-       return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) ||
+       return (nl_table[sock->sk->sk_protocol].flags & flag) ||
               capable(CAP_NET_ADMIN);
 }
 
@@ -659,15 +661,15 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
 
        /* Only superuser is allowed to listen multicasts */
        if (nladdr->nl_groups) {
-               if (!netlink_capable(sock, NL_NONROOT_RECV))
+               if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
                        return -EPERM;
                err = netlink_realloc_groups(sk);
                if (err)
                        return err;
        }
 
-       if (nlk->pid) {
-               if (nladdr->nl_pid != nlk->pid)
+       if (nlk->portid) {
+               if (nladdr->nl_pid != nlk->portid)
                        return -EINVAL;
        } else {
                err = nladdr->nl_pid ?
@@ -713,7 +715,7 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
 
        if (addr->sa_family == AF_UNSPEC) {
                sk->sk_state    = NETLINK_UNCONNECTED;
-               nlk->dst_pid    = 0;
+               nlk->dst_portid = 0;
                nlk->dst_group  = 0;
                return 0;
        }
@@ -721,15 +723,15 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
                return -EINVAL;
 
        /* Only superuser is allowed to send multicasts */
-       if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
+       if (nladdr->nl_groups && !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
                return -EPERM;
 
-       if (!nlk->pid)
+       if (!nlk->portid)
                err = netlink_autobind(sock);
 
        if (err == 0) {
                sk->sk_state    = NETLINK_CONNECTED;
-               nlk->dst_pid    = nladdr->nl_pid;
+               nlk->dst_portid = nladdr->nl_pid;
                nlk->dst_group  = ffs(nladdr->nl_groups);
        }
 
@@ -748,10 +750,10 @@ static int netlink_getname(struct socket *sock, struct sockaddr *addr,
        *addr_len = sizeof(*nladdr);
 
        if (peer) {
-               nladdr->nl_pid = nlk->dst_pid;
+               nladdr->nl_pid = nlk->dst_portid;
                nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
        } else {
-               nladdr->nl_pid = nlk->pid;
+               nladdr->nl_pid = nlk->portid;
                nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
        }
        return 0;
@@ -770,19 +772,19 @@ static void netlink_overrun(struct sock *sk)
        atomic_inc(&sk->sk_drops);
 }
 
-static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
+static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
 {
        struct sock *sock;
        struct netlink_sock *nlk;
 
-       sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, pid);
+       sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
        if (!sock)
                return ERR_PTR(-ECONNREFUSED);
 
        /* Don't bother queuing skb if kernel socket has no input function */
        nlk = nlk_sk(sock);
        if (sock->sk_state == NETLINK_CONNECTED &&
-           nlk->dst_pid != nlk_sk(ssk)->pid) {
+           nlk->dst_portid != nlk_sk(ssk)->portid) {
                sock_put(sock);
                return ERR_PTR(-ECONNREFUSED);
        }
@@ -933,7 +935,7 @@ static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
 }
 
 int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
-                   u32 pid, int nonblock)
+                   u32 portid, int nonblock)
 {
        struct sock *sk;
        int err;
@@ -943,7 +945,7 @@ int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
 
        timeo = sock_sndtimeo(ssk, nonblock);
 retry:
-       sk = netlink_getsockbypid(ssk, pid);
+       sk = netlink_getsockbyportid(ssk, portid);
        if (IS_ERR(sk)) {
                kfree_skb(skb);
                return PTR_ERR(sk);
@@ -1003,7 +1005,7 @@ static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
 struct netlink_broadcast_data {
        struct sock *exclude_sk;
        struct net *net;
-       u32 pid;
+       u32 portid;
        u32 group;
        int failure;
        int delivery_failure;
@@ -1024,7 +1026,7 @@ static int do_one_broadcast(struct sock *sk,
        if (p->exclude_sk == sk)
                goto out;
 
-       if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
+       if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
            !test_bit(p->group - 1, nlk->groups))
                goto out;
 
@@ -1076,7 +1078,7 @@ out:
        return 0;
 }
 
-int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 pid,
+int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
        u32 group, gfp_t allocation,
        int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
        void *filter_data)
@@ -1090,7 +1092,7 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 pid,
 
        info.exclude_sk = ssk;
        info.net = net;
-       info.pid = pid;
+       info.portid = portid;
        info.group = group;
        info.failure = 0;
        info.delivery_failure = 0;
@@ -1128,17 +1130,17 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 pid,
 }
 EXPORT_SYMBOL(netlink_broadcast_filtered);
 
-int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
+int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
                      u32 group, gfp_t allocation)
 {
-       return netlink_broadcast_filtered(ssk, skb, pid, group, allocation,
+       return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
                NULL, NULL);
 }
 EXPORT_SYMBOL(netlink_broadcast);
 
 struct netlink_set_err_data {
        struct sock *exclude_sk;
-       u32 pid;
+       u32 portid;
        u32 group;
        int code;
 };
@@ -1154,7 +1156,7 @@ static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
        if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
                goto out;
 
-       if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
+       if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
            !test_bit(p->group - 1, nlk->groups))
                goto out;
 
@@ -1172,14 +1174,14 @@ out:
 /**
  * netlink_set_err - report error to broadcast listeners
  * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
- * @pid: the PID of a process that we want to skip (if any)
+ * @portid: the PORTID of a process that we want to skip (if any)
  * @groups: the broadcast group that will notice the error
  * @code: error code, must be negative (as usual in kernelspace)
  *
  * This function returns the number of broadcast listeners that have set the
  * NETLINK_RECV_NO_ENOBUFS socket option.
  */
-int netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
+int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
 {
        struct netlink_set_err_data info;
        struct hlist_node *node;
@@ -1187,7 +1189,7 @@ int netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
        int ret = 0;
 
        info.exclude_sk = ssk;
-       info.pid = pid;
+       info.portid = portid;
        info.group = group;
        /* sk->sk_err wants a positive error value */
        info.code = -code;
@@ -1244,7 +1246,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
                break;
        case NETLINK_ADD_MEMBERSHIP:
        case NETLINK_DROP_MEMBERSHIP: {
-               if (!netlink_capable(sock, NL_NONROOT_RECV))
+               if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
                        return -EPERM;
                err = netlink_realloc_groups(sk);
                if (err)
@@ -1352,7 +1354,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
        struct sock *sk = sock->sk;
        struct netlink_sock *nlk = nlk_sk(sk);
        struct sockaddr_nl *addr = msg->msg_name;
-       u32 dst_pid;
+       u32 dst_portid;
        u32 dst_group;
        struct sk_buff *skb;
        int err;
@@ -1372,18 +1374,18 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
                err = -EINVAL;
                if (addr->nl_family != AF_NETLINK)
                        goto out;
-               dst_pid = addr->nl_pid;
+               dst_portid = addr->nl_pid;
                dst_group = ffs(addr->nl_groups);
                err =  -EPERM;
-               if ((dst_group || dst_pid) &&
-                   !netlink_capable(sock, NL_NONROOT_SEND))
+               if ((dst_group || dst_portid) &&
+                   !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
                        goto out;
        } else {
-               dst_pid = nlk->dst_pid;
+               dst_portid = nlk->dst_portid;
                dst_group = nlk->dst_group;
        }
 
-       if (!nlk->pid) {
+       if (!nlk->portid) {
                err = netlink_autobind(sock);
                if (err)
                        goto out;
@@ -1397,9 +1399,9 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
        if (skb == NULL)
                goto out;
 
-       NETLINK_CB(skb).pid     = nlk->pid;
+       NETLINK_CB(skb).portid  = nlk->portid;
        NETLINK_CB(skb).dst_group = dst_group;
-       memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
+       NETLINK_CB(skb).creds   = siocb->scm->creds;
 
        err = -EFAULT;
        if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
@@ -1415,9 +1417,9 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
 
        if (dst_group) {
                atomic_inc(&skb->users);
-               netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL);
+               netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
        }
-       err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
+       err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
 
 out:
        scm_destroy(siocb->scm);
@@ -1480,7 +1482,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
                struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name;
                addr->nl_family = AF_NETLINK;
                addr->nl_pad    = 0;
-               addr->nl_pid    = NETLINK_CB(skb).pid;
+               addr->nl_pid    = NETLINK_CB(skb).portid;
                addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
                msg->msg_namelen = sizeof(*addr);
        }
@@ -1524,9 +1526,8 @@ static void netlink_data_ready(struct sock *sk, int len)
  */
 
 struct sock *
-netlink_kernel_create(struct net *net, int unit,
-                     struct module *module,
-                     struct netlink_kernel_cfg *cfg)
+__netlink_kernel_create(struct net *net, int unit, struct module *module,
+                       struct netlink_kernel_cfg *cfg)
 {
        struct socket *sock;
        struct sock *sk;
@@ -1580,7 +1581,10 @@ netlink_kernel_create(struct net *net, int unit,
                rcu_assign_pointer(nl_table[unit].listeners, listeners);
                nl_table[unit].cb_mutex = cb_mutex;
                nl_table[unit].module = module;
-               nl_table[unit].bind = cfg ? cfg->bind : NULL;
+               if (cfg) {
+                       nl_table[unit].bind = cfg->bind;
+                       nl_table[unit].flags = cfg->flags;
+               }
                nl_table[unit].registered = 1;
        } else {
                kfree(listeners);
@@ -1598,8 +1602,7 @@ out_sock_release_nosk:
        sock_release(sock);
        return NULL;
 }
-EXPORT_SYMBOL(netlink_kernel_create);
-
+EXPORT_SYMBOL(__netlink_kernel_create);
 
 void
 netlink_kernel_release(struct sock *sk)
@@ -1679,15 +1682,8 @@ void netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
        netlink_table_ungrab();
 }
 
-void netlink_set_nonroot(int protocol, unsigned int flags)
-{
-       if ((unsigned int)protocol < MAX_LINKS)
-               nl_table[protocol].nl_nonroot = flags;
-}
-EXPORT_SYMBOL(netlink_set_nonroot);
-
 struct nlmsghdr *
-__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
+__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
 {
        struct nlmsghdr *nlh;
        int size = NLMSG_LENGTH(len);
@@ -1696,7 +1692,7 @@ __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
        nlh->nlmsg_type = type;
        nlh->nlmsg_len = size;
        nlh->nlmsg_flags = flags;
-       nlh->nlmsg_pid = pid;
+       nlh->nlmsg_pid = portid;
        nlh->nlmsg_seq = seq;
        if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
                memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size);
@@ -1792,7 +1788,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
        atomic_inc(&skb->users);
        cb->skb = skb;
 
-       sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).pid);
+       sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
        if (sk == NULL) {
                netlink_destroy_callback(cb);
                return -ECONNREFUSED;
@@ -1840,7 +1836,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
 
                sk = netlink_lookup(sock_net(in_skb->sk),
                                    in_skb->sk->sk_protocol,
-                                   NETLINK_CB(in_skb).pid);
+                                   NETLINK_CB(in_skb).portid);
                if (sk) {
                        sk->sk_err = ENOBUFS;
                        sk->sk_error_report(sk);
@@ -1849,12 +1845,12 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
                return;
        }
 
-       rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
+       rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
                          NLMSG_ERROR, payload, 0);
        errmsg = nlmsg_data(rep);
        errmsg->error = err;
        memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
-       netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
+       netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
 }
 EXPORT_SYMBOL(netlink_ack);
 
@@ -1904,33 +1900,33 @@ EXPORT_SYMBOL(netlink_rcv_skb);
  * nlmsg_notify - send a notification netlink message
  * @sk: netlink socket to use
  * @skb: notification message
- * @pid: destination netlink pid for reports or 0
+ * @portid: destination netlink portid for reports or 0
  * @group: destination multicast group or 0
  * @report: 1 to report back, 0 to disable
  * @flags: allocation flags
  */
-int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 pid,
+int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
                 unsigned int group, int report, gfp_t flags)
 {
        int err = 0;
 
        if (group) {
-               int exclude_pid = 0;
+               int exclude_portid = 0;
 
                if (report) {
                        atomic_inc(&skb->users);
-                       exclude_pid = pid;
+                       exclude_portid = portid;
                }
 
                /* errors reported via destination sk->sk_err, but propagate
                 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
-               err = nlmsg_multicast(sk, skb, exclude_pid, group, flags);
+               err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
        }
 
        if (report) {
                int err2;
 
-               err2 = nlmsg_unicast(sk, skb, pid);
+               err2 = nlmsg_unicast(sk, skb, portid);
                if (!err || err == -ESRCH)
                        err = err2;
        }
@@ -1955,7 +1951,7 @@ static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
        loff_t off = 0;
 
        for (i = 0; i < MAX_LINKS; i++) {
-               struct nl_pid_hash *hash = &nl_table[i].hash;
+               struct nl_portid_hash *hash = &nl_table[i].hash;
 
                for (j = 0; j <= hash->mask; j++) {
                        sk_for_each(s, node, &hash->table[j]) {
@@ -2003,7 +1999,7 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
        j = iter->hash_idx + 1;
 
        do {
-               struct nl_pid_hash *hash = &nl_table[i].hash;
+               struct nl_portid_hash *hash = &nl_table[i].hash;
 
                for (; j <= hash->mask; j++) {
                        s = sk_head(&hash->table[j]);
@@ -2042,7 +2038,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
                seq_printf(seq, "%pK %-3d %-6d %08x %-8d %-8d %pK %-8d %-8d %-8lu\n",
                           s,
                           s->sk_protocol,
-                          nlk->pid,
+                          nlk->portid,
                           nlk->groups ? (u32)nlk->groups[0] : 0,
                           sk_rmem_alloc_get(s),
                           sk_wmem_alloc_get(s),
@@ -2150,7 +2146,7 @@ static void __init netlink_add_usersock_entry(void)
        rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
        nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
        nl_table[NETLINK_USERSOCK].registered = 1;
-       nl_table[NETLINK_USERSOCK].nl_nonroot = NL_NONROOT_SEND;
+       nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
 
        netlink_table_ungrab();
 }
@@ -2187,12 +2183,12 @@ static int __init netlink_proto_init(void)
        order = get_bitmask_order(min(limit, (unsigned long)UINT_MAX)) - 1;
 
        for (i = 0; i < MAX_LINKS; i++) {
-               struct nl_pid_hash *hash = &nl_table[i].hash;
+               struct nl_portid_hash *hash = &nl_table[i].hash;
 
-               hash->table = nl_pid_hash_zalloc(1 * sizeof(*hash->table));
+               hash->table = nl_portid_hash_zalloc(1 * sizeof(*hash->table));
                if (!hash->table) {
                        while (i-- > 0)
-                               nl_pid_hash_free(nl_table[i].hash.table,
+                               nl_portid_hash_free(nl_table[i].hash.table,
                                                 1 * sizeof(*hash->table));
                        kfree(nl_table);
                        goto panic;
index fda497412fc34a5b10aa25a1c7599118d5e832a8..f2aabb6f410582439604d7a3c0f379a5cd798621 100644 (file)
@@ -501,7 +501,7 @@ EXPORT_SYMBOL(genl_unregister_family);
 /**
  * genlmsg_put - Add generic netlink header to netlink message
  * @skb: socket buffer holding the message
- * @pid: netlink pid the message is addressed to
+ * @portid: netlink portid the message is addressed to
  * @seq: sequence number (usually the one of the sender)
  * @family: generic netlink family
  * @flags: netlink message flags
@@ -509,13 +509,13 @@ EXPORT_SYMBOL(genl_unregister_family);
  *
  * Returns pointer to user specific header
  */
-void *genlmsg_put(struct sk_buff *skb, u32 pid, u32 seq,
+void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
                                struct genl_family *family, int flags, u8 cmd)
 {
        struct nlmsghdr *nlh;
        struct genlmsghdr *hdr;
 
-       nlh = nlmsg_put(skb, pid, seq, family->id, GENL_HDRLEN +
+       nlh = nlmsg_put(skb, portid, seq, family->id, GENL_HDRLEN +
                        family->hdrsize, flags);
        if (nlh == NULL)
                return NULL;
@@ -585,7 +585,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
        }
 
        info.snd_seq = nlh->nlmsg_seq;
-       info.snd_pid = NETLINK_CB(skb).pid;
+       info.snd_portid = NETLINK_CB(skb).portid;
        info.nlhdr = nlh;
        info.genlhdr = nlmsg_data(nlh);
        info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN;
@@ -626,12 +626,12 @@ static struct genl_family genl_ctrl = {
        .netnsok = true,
 };
 
-static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq,
+static int ctrl_fill_info(struct genl_family *family, u32 portid, u32 seq,
                          u32 flags, struct sk_buff *skb, u8 cmd)
 {
        void *hdr;
 
-       hdr = genlmsg_put(skb, pid, seq, &genl_ctrl, flags, cmd);
+       hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
        if (hdr == NULL)
                return -1;
 
@@ -701,7 +701,7 @@ nla_put_failure:
        return -EMSGSIZE;
 }
 
-static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid,
+static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 portid,
                                u32 seq, u32 flags, struct sk_buff *skb,
                                u8 cmd)
 {
@@ -709,7 +709,7 @@ static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid,
        struct nlattr *nla_grps;
        struct nlattr *nest;
 
-       hdr = genlmsg_put(skb, pid, seq, &genl_ctrl, flags, cmd);
+       hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
        if (hdr == NULL)
                return -1;
 
@@ -756,7 +756,7 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
                                continue;
                        if (++n < fams_to_skip)
                                continue;
-                       if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).pid,
+                       if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
                                           cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                           skb, CTRL_CMD_NEWFAMILY) < 0)
                                goto errout;
@@ -773,7 +773,7 @@ errout:
 }
 
 static struct sk_buff *ctrl_build_family_msg(struct genl_family *family,
-                                            u32 pid, int seq, u8 cmd)
+                                            u32 portid, int seq, u8 cmd)
 {
        struct sk_buff *skb;
        int err;
@@ -782,7 +782,7 @@ static struct sk_buff *ctrl_build_family_msg(struct genl_family *family,
        if (skb == NULL)
                return ERR_PTR(-ENOBUFS);
 
-       err = ctrl_fill_info(family, pid, seq, 0, skb, cmd);
+       err = ctrl_fill_info(family, portid, seq, 0, skb, cmd);
        if (err < 0) {
                nlmsg_free(skb);
                return ERR_PTR(err);
@@ -792,7 +792,7 @@ static struct sk_buff *ctrl_build_family_msg(struct genl_family *family,
 }
 
 static struct sk_buff *ctrl_build_mcgrp_msg(struct genl_multicast_group *grp,
-                                           u32 pid, int seq, u8 cmd)
+                                           u32 portid, int seq, u8 cmd)
 {
        struct sk_buff *skb;
        int err;
@@ -801,7 +801,7 @@ static struct sk_buff *ctrl_build_mcgrp_msg(struct genl_multicast_group *grp,
        if (skb == NULL)
                return ERR_PTR(-ENOBUFS);
 
-       err = ctrl_fill_mcgrp_info(grp, pid, seq, 0, skb, cmd);
+       err = ctrl_fill_mcgrp_info(grp, portid, seq, 0, skb, cmd);
        if (err < 0) {
                nlmsg_free(skb);
                return ERR_PTR(err);
@@ -853,7 +853,7 @@ static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
                return -ENOENT;
        }
 
-       msg = ctrl_build_family_msg(res, info->snd_pid, info->snd_seq,
+       msg = ctrl_build_family_msg(res, info->snd_portid, info->snd_seq,
                                    CTRL_CMD_NEWFAMILY);
        if (IS_ERR(msg))
                return PTR_ERR(msg);
@@ -918,11 +918,11 @@ static int __net_init genl_pernet_init(struct net *net)
        struct netlink_kernel_cfg cfg = {
                .input          = genl_rcv,
                .cb_mutex       = &genl_mutex,
+               .flags          = NL_CFG_F_NONROOT_RECV,
        };
 
        /* we'll bump the group number right afterwards */
-       net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC,
-                                              THIS_MODULE, &cfg);
+       net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, &cfg);
 
        if (!net->genl_sock && net_eq(net, &init_net))
                panic("GENL: Cannot initialize generic netlink\n");
@@ -955,8 +955,6 @@ static int __init genl_init(void)
        if (err < 0)
                goto problem;
 
-       netlink_set_nonroot(NETLINK_GENERIC, NL_NONROOT_RECV);
-
        err = register_pernet_subsys(&genl_pernet_ops);
        if (err)
                goto problem;
@@ -973,7 +971,7 @@ problem:
 
 subsys_initcall(genl_init);
 
-static int genlmsg_mcast(struct sk_buff *skb, u32 pid, unsigned long group,
+static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
                         gfp_t flags)
 {
        struct sk_buff *tmp;
@@ -988,7 +986,7 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 pid, unsigned long group,
                                goto error;
                        }
                        err = nlmsg_multicast(prev->genl_sock, tmp,
-                                             pid, group, flags);
+                                             portid, group, flags);
                        if (err)
                                goto error;
                }
@@ -996,20 +994,20 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 pid, unsigned long group,
                prev = net;
        }
 
-       return nlmsg_multicast(prev->genl_sock, skb, pid, group, flags);
+       return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
  error:
        kfree_skb(skb);
        return err;
 }
 
-int genlmsg_multicast_allns(struct sk_buff *skb, u32 pid, unsigned int group,
+int genlmsg_multicast_allns(struct sk_buff *skb, u32 portid, unsigned int group,
                            gfp_t flags)
 {
-       return genlmsg_mcast(skb, pid, group, flags);
+       return genlmsg_mcast(skb, portid, group, flags);
 }
 EXPORT_SYMBOL(genlmsg_multicast_allns);
 
-void genl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
+void genl_notify(struct sk_buff *skb, struct net *net, u32 portid, u32 group,
                 struct nlmsghdr *nlh, gfp_t flags)
 {
        struct sock *sk = net->genl_sock;
@@ -1018,6 +1016,6 @@ void genl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
        if (nlh)
                report = nlmsg_report(nlh);
 
-       nlmsg_notify(sk, skb, pid, group, report, flags);
+       nlmsg_notify(sk, skb, portid, group, report, flags);
 }
 EXPORT_SYMBOL(genl_notify);
index ff749794bc5b87d0b72e36e48cf92caa9487d130..c9eacc1f145f8e6da7990c56822b44dfedc80fe2 100644 (file)
@@ -679,7 +679,7 @@ static void nfc_release(struct device *d)
 
        if (dev->ops->check_presence) {
                del_timer_sync(&dev->check_pres_timer);
-               destroy_workqueue(dev->check_pres_wq);
+               cancel_work_sync(&dev->check_pres_work);
        }
 
        nfc_genl_data_exit(&dev->genl_data);
@@ -715,7 +715,7 @@ static void nfc_check_pres_timeout(unsigned long data)
 {
        struct nfc_dev *dev = (struct nfc_dev *)data;
 
-       queue_work(dev->check_pres_wq, &dev->check_pres_work);
+       queue_work(system_nrt_wq, &dev->check_pres_work);
 }
 
 struct class nfc_class = {
@@ -784,20 +784,11 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
        dev->targets_generation = 1;
 
        if (ops->check_presence) {
-               char name[32];
                init_timer(&dev->check_pres_timer);
                dev->check_pres_timer.data = (unsigned long)dev;
                dev->check_pres_timer.function = nfc_check_pres_timeout;
 
                INIT_WORK(&dev->check_pres_work, nfc_check_pres_work);
-               snprintf(name, sizeof(name), "nfc%d_check_pres_wq", dev->idx);
-               dev->check_pres_wq = alloc_workqueue(name, WQ_NON_REENTRANT |
-                                                    WQ_UNBOUND |
-                                                    WQ_MEM_RECLAIM, 1);
-               if (dev->check_pres_wq == NULL) {
-                       kfree(dev);
-                       return NULL;
-               }
        }
 
        return dev;
index f9c44b2fb065de5b7f753390caeb4b3ab5466b68..c5dbb6891b24bcce74821a648b04345d02dfcc39 100644 (file)
@@ -4,5 +4,5 @@
 
 obj-$(CONFIG_NFC_HCI) += hci.o
 
-hci-y                  := core.o hcp.o command.o
-hci-$(CONFIG_NFC_SHDLC)        += shdlc.o
+hci-y                  := core.o hcp.o command.o llc.o llc_nop.o
+hci-$(CONFIG_NFC_SHDLC) += llc_shdlc.o
index 46362ef979db1ca88b4ad69d4b6adb7bd440f844..71c6a7086b8f04b4e6264969d21f1ba5a362fcb8 100644 (file)
 
 #include "hci.h"
 
-static void nfc_hci_execute_cb(struct nfc_hci_dev *hdev, int err,
-                              struct sk_buff *skb, void *cb_data)
+static int nfc_hci_execute_cmd_async(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
+                              const u8 *param, size_t param_len,
+                              data_exchange_cb_t cb, void *cb_context)
 {
-       struct hcp_exec_waiter *hcp_ew = (struct hcp_exec_waiter *)cb_data;
+       pr_debug("exec cmd async through pipe=%d, cmd=%d, plen=%zd\n", pipe,
+                cmd, param_len);
+
+       /* TODO: Define hci cmd execution delay. Should it be the same
+        * for all commands?
+        */
+       return nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_COMMAND, cmd,
+                                     param, param_len, cb, cb_context, 3000);
+}
+
+/*
+ * HCI command execution completion callback.
+ * err will be a standard linux error (may be converted from HCI response)
+ * skb contains the response data and must be disposed, or may be NULL if
+ * an error occured
+ */
+static void nfc_hci_execute_cb(void *context, struct sk_buff *skb, int err)
+{
+       struct hcp_exec_waiter *hcp_ew = (struct hcp_exec_waiter *)context;
 
        pr_debug("HCI Cmd completed with result=%d\n", err);
 
@@ -55,7 +74,8 @@ static int nfc_hci_execute_cmd(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
        hcp_ew.exec_complete = false;
        hcp_ew.result_skb = NULL;
 
-       pr_debug("through pipe=%d, cmd=%d, plen=%zd\n", pipe, cmd, param_len);
+       pr_debug("exec cmd sync through pipe=%d, cmd=%d, plen=%zd\n", pipe,
+                cmd, param_len);
 
        /* TODO: Define hci cmd execution delay. Should it be the same
         * for all commands?
@@ -133,6 +153,23 @@ int nfc_hci_send_cmd(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
 }
 EXPORT_SYMBOL(nfc_hci_send_cmd);
 
+int nfc_hci_send_cmd_async(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
+                          const u8 *param, size_t param_len,
+                          data_exchange_cb_t cb, void *cb_context)
+{
+       u8 pipe;
+
+       pr_debug("\n");
+
+       pipe = hdev->gate2pipe[gate];
+       if (pipe == NFC_HCI_INVALID_PIPE)
+               return -EADDRNOTAVAIL;
+
+       return nfc_hci_execute_cmd_async(hdev, pipe, cmd, param, param_len,
+                                        cb, cb_context);
+}
+EXPORT_SYMBOL(nfc_hci_send_cmd_async);
+
 int nfc_hci_set_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx,
                      const u8 *param, size_t param_len)
 {
index 1ac7b3fac6c9bb9e12030efb67d540821f690988..d378d93de62e8825d31166c567ececbfd1564153 100644 (file)
@@ -26,6 +26,7 @@
 
 #include <net/nfc/nfc.h>
 #include <net/nfc/hci.h>
+#include <net/nfc/llc.h>
 
 #include "hci.h"
 
@@ -57,12 +58,11 @@ static void nfc_hci_msg_tx_work(struct work_struct *work)
        if (hdev->cmd_pending_msg) {
                if (timer_pending(&hdev->cmd_timer) == 0) {
                        if (hdev->cmd_pending_msg->cb)
-                               hdev->cmd_pending_msg->cb(hdev,
-                                                         -ETIME,
-                                                         NULL,
-                                                         hdev->
+                               hdev->cmd_pending_msg->cb(hdev->
                                                          cmd_pending_msg->
-                                                         cb_context);
+                                                         cb_context,
+                                                         NULL,
+                                                         -ETIME);
                        kfree(hdev->cmd_pending_msg);
                        hdev->cmd_pending_msg = NULL;
                } else
@@ -78,12 +78,12 @@ next_msg:
 
        pr_debug("msg_tx_queue has a cmd to send\n");
        while ((skb = skb_dequeue(&msg->msg_frags)) != NULL) {
-               r = hdev->ops->xmit(hdev, skb);
+               r = nfc_llc_xmit_from_hci(hdev->llc, skb);
                if (r < 0) {
                        kfree_skb(skb);
                        skb_queue_purge(&msg->msg_frags);
                        if (msg->cb)
-                               msg->cb(hdev, r, NULL, msg->cb_context);
+                               msg->cb(msg->cb_context, NULL, r);
                        kfree(msg);
                        break;
                }
@@ -133,15 +133,15 @@ static void __nfc_hci_cmd_completion(struct nfc_hci_dev *hdev, int err,
        del_timer_sync(&hdev->cmd_timer);
 
        if (hdev->cmd_pending_msg->cb)
-               hdev->cmd_pending_msg->cb(hdev, err, skb,
-                                         hdev->cmd_pending_msg->cb_context);
+               hdev->cmd_pending_msg->cb(hdev->cmd_pending_msg->cb_context,
+                                         skb, err);
        else
                kfree_skb(skb);
 
        kfree(hdev->cmd_pending_msg);
        hdev->cmd_pending_msg = NULL;
 
-       queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work);
+       queue_work(system_nrt_wq, &hdev->msg_tx_work);
 }
 
 void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result,
@@ -326,7 +326,7 @@ static void nfc_hci_cmd_timeout(unsigned long data)
 {
        struct nfc_hci_dev *hdev = (struct nfc_hci_dev *)data;
 
-       queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work);
+       queue_work(system_nrt_wq, &hdev->msg_tx_work);
 }
 
 static int hci_dev_connect_gates(struct nfc_hci_dev *hdev, u8 gate_count,
@@ -398,8 +398,7 @@ disconnect_all:
        nfc_hci_disconnect_all_gates(hdev);
 
 exit:
-       if (skb)
-               kfree_skb(skb);
+       kfree_skb(skb);
 
        return r;
 }
@@ -470,29 +469,38 @@ static int hci_dev_up(struct nfc_dev *nfc_dev)
                        return r;
        }
 
+       r = nfc_llc_start(hdev->llc);
+       if (r < 0)
+               goto exit_close;
+
        r = hci_dev_session_init(hdev);
        if (r < 0)
-               goto exit;
+               goto exit_llc;
 
        r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
                               NFC_HCI_EVT_END_OPERATION, NULL, 0);
        if (r < 0)
-               goto exit;
+               goto exit_llc;
 
        if (hdev->ops->hci_ready) {
                r = hdev->ops->hci_ready(hdev);
                if (r < 0)
-                       goto exit;
+                       goto exit_llc;
        }
 
        r = hci_dev_version(hdev);
        if (r < 0)
-               goto exit;
+               goto exit_llc;
+
+       return 0;
+
+exit_llc:
+       nfc_llc_stop(hdev->llc);
+
+exit_close:
+       if (hdev->ops->close)
+               hdev->ops->close(hdev);
 
-exit:
-       if (r < 0)
-               if (hdev->ops->close)
-                       hdev->ops->close(hdev);
        return r;
 }
 
@@ -500,6 +508,8 @@ static int hci_dev_down(struct nfc_dev *nfc_dev)
 {
        struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
 
+       nfc_llc_stop(hdev->llc);
+
        if (hdev->ops->close)
                hdev->ops->close(hdev);
 
@@ -539,13 +549,37 @@ static void hci_deactivate_target(struct nfc_dev *nfc_dev,
 {
 }
 
+#define HCI_CB_TYPE_TRANSCEIVE 1
+
+static void hci_transceive_cb(void *context, struct sk_buff *skb, int err)
+{
+       struct nfc_hci_dev *hdev = context;
+
+       switch (hdev->async_cb_type) {
+       case HCI_CB_TYPE_TRANSCEIVE:
+               /*
+                * TODO: Check RF Error indicator to make sure data is valid.
+                * It seems that HCI cmd can complete without error, but data
+                * can be invalid if an RF error occured? Ignore for now.
+                */
+               if (err == 0)
+                       skb_trim(skb, skb->len - 1); /* RF Err ind */
+
+               hdev->async_cb(hdev->async_cb_context, skb, err);
+               break;
+       default:
+               if (err == 0)
+                       kfree_skb(skb);
+               break;
+       }
+}
+
 static int hci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
                          struct sk_buff *skb, data_exchange_cb_t cb,
                          void *cb_context)
 {
        struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
        int r;
-       struct sk_buff *res_skb = NULL;
 
        pr_debug("target_idx=%d\n", target->idx);
 
@@ -553,40 +587,37 @@ static int hci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
        case NFC_HCI_RF_READER_A_GATE:
        case NFC_HCI_RF_READER_B_GATE:
                if (hdev->ops->data_exchange) {
-                       r = hdev->ops->data_exchange(hdev, target, skb,
-                                                    &res_skb);
+                       r = hdev->ops->data_exchange(hdev, target, skb, cb,
+                                                    cb_context);
                        if (r <= 0)     /* handled */
                                break;
                }
 
                *skb_push(skb, 1) = 0;  /* CTR, see spec:10.2.2.1 */
-               r = nfc_hci_send_cmd(hdev, target->hci_reader_gate,
-                                    NFC_HCI_WR_XCHG_DATA,
-                                    skb->data, skb->len, &res_skb);
-               /*
-                * TODO: Check RF Error indicator to make sure data is valid.
-                * It seems that HCI cmd can complete without error, but data
-                * can be invalid if an RF error occured? Ignore for now.
-                */
-               if (r == 0)
-                       skb_trim(res_skb, res_skb->len - 1); /* RF Err ind */
+
+               hdev->async_cb_type = HCI_CB_TYPE_TRANSCEIVE;
+               hdev->async_cb = cb;
+               hdev->async_cb_context = cb_context;
+
+               r = nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
+                                          NFC_HCI_WR_XCHG_DATA, skb->data,
+                                          skb->len, hci_transceive_cb, hdev);
                break;
        default:
                if (hdev->ops->data_exchange) {
-                       r = hdev->ops->data_exchange(hdev, target, skb,
-                                                    &res_skb);
+                       r = hdev->ops->data_exchange(hdev, target, skb, cb,
+                                                    cb_context);
                        if (r == 1)
                                r = -ENOTSUPP;
                }
                else
                        r = -ENOTSUPP;
+               break;
        }
 
        kfree_skb(skb);
 
-       cb(cb_context, res_skb, r);
-
-       return 0;
+       return r;
 }
 
 static int hci_check_presence(struct nfc_dev *nfc_dev,
@@ -600,6 +631,93 @@ static int hci_check_presence(struct nfc_dev *nfc_dev,
        return 0;
 }
 
+static void nfc_hci_failure(struct nfc_hci_dev *hdev, int err)
+{
+       mutex_lock(&hdev->msg_tx_mutex);
+
+       if (hdev->cmd_pending_msg == NULL) {
+               nfc_driver_failure(hdev->ndev, err);
+               goto exit;
+       }
+
+       __nfc_hci_cmd_completion(hdev, err, NULL);
+
+exit:
+       mutex_unlock(&hdev->msg_tx_mutex);
+}
+
+static void nfc_hci_llc_failure(struct nfc_hci_dev *hdev, int err)
+{
+       nfc_hci_failure(hdev, err);
+}
+
+static void nfc_hci_recv_from_llc(struct nfc_hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hcp_packet *packet;
+       u8 type;
+       u8 instruction;
+       struct sk_buff *hcp_skb;
+       u8 pipe;
+       struct sk_buff *frag_skb;
+       int msg_len;
+
+       packet = (struct hcp_packet *)skb->data;
+       if ((packet->header & ~NFC_HCI_FRAGMENT) == 0) {
+               skb_queue_tail(&hdev->rx_hcp_frags, skb);
+               return;
+       }
+
+       /* it's the last fragment. Does it need re-aggregation? */
+       if (skb_queue_len(&hdev->rx_hcp_frags)) {
+               pipe = packet->header & NFC_HCI_FRAGMENT;
+               skb_queue_tail(&hdev->rx_hcp_frags, skb);
+
+               msg_len = 0;
+               skb_queue_walk(&hdev->rx_hcp_frags, frag_skb) {
+                       msg_len += (frag_skb->len -
+                                   NFC_HCI_HCP_PACKET_HEADER_LEN);
+               }
+
+               hcp_skb = nfc_alloc_recv_skb(NFC_HCI_HCP_PACKET_HEADER_LEN +
+                                            msg_len, GFP_KERNEL);
+               if (hcp_skb == NULL) {
+                       nfc_hci_failure(hdev, -ENOMEM);
+                       return;
+               }
+
+               *skb_put(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN) = pipe;
+
+               skb_queue_walk(&hdev->rx_hcp_frags, frag_skb) {
+                       msg_len = frag_skb->len - NFC_HCI_HCP_PACKET_HEADER_LEN;
+                       memcpy(skb_put(hcp_skb, msg_len),
+                              frag_skb->data + NFC_HCI_HCP_PACKET_HEADER_LEN,
+                              msg_len);
+               }
+
+               skb_queue_purge(&hdev->rx_hcp_frags);
+       } else {
+               packet->header &= NFC_HCI_FRAGMENT;
+               hcp_skb = skb;
+       }
+
+       /* if this is a response, dispatch immediately to
+        * unblock waiting cmd context. Otherwise, enqueue to dispatch
+        * in separate context where handler can also execute command.
+        */
+       packet = (struct hcp_packet *)hcp_skb->data;
+       type = HCP_MSG_GET_TYPE(packet->message.header);
+       if (type == NFC_HCI_HCP_RESPONSE) {
+               pipe = packet->header;
+               instruction = HCP_MSG_GET_CMD(packet->message.header);
+               skb_pull(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN +
+                        NFC_HCI_HCP_MESSAGE_HEADER_LEN);
+               nfc_hci_hcp_message_rx(hdev, pipe, type, instruction, hcp_skb);
+       } else {
+               skb_queue_tail(&hdev->msg_rx_queue, hcp_skb);
+               queue_work(system_nrt_wq, &hdev->msg_rx_work);
+       }
+}
+
 static struct nfc_ops hci_nfc_ops = {
        .dev_up = hci_dev_up,
        .dev_down = hci_dev_down,
@@ -614,6 +732,7 @@ static struct nfc_ops hci_nfc_ops = {
 struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
                                            struct nfc_hci_init_data *init_data,
                                            u32 protocols,
+                                           const char *llc_name,
                                            int tx_headroom,
                                            int tx_tailroom,
                                            int max_link_payload)
@@ -630,10 +749,19 @@ struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
        if (hdev == NULL)
                return NULL;
 
+       hdev->llc = nfc_llc_allocate(llc_name, hdev, ops->xmit,
+                                    nfc_hci_recv_from_llc, tx_headroom,
+                                    tx_tailroom, nfc_hci_llc_failure);
+       if (hdev->llc == NULL) {
+               kfree(hdev);
+               return NULL;
+       }
+
        hdev->ndev = nfc_allocate_device(&hci_nfc_ops, protocols,
                                         tx_headroom + HCI_CMDS_HEADROOM,
                                         tx_tailroom);
        if (!hdev->ndev) {
+               nfc_llc_free(hdev->llc);
                kfree(hdev);
                return NULL;
        }
@@ -653,29 +781,18 @@ EXPORT_SYMBOL(nfc_hci_allocate_device);
 void nfc_hci_free_device(struct nfc_hci_dev *hdev)
 {
        nfc_free_device(hdev->ndev);
+       nfc_llc_free(hdev->llc);
        kfree(hdev);
 }
 EXPORT_SYMBOL(nfc_hci_free_device);
 
 int nfc_hci_register_device(struct nfc_hci_dev *hdev)
 {
-       struct device *dev = &hdev->ndev->dev;
-       const char *devname = dev_name(dev);
-       char name[32];
-       int r = 0;
-
        mutex_init(&hdev->msg_tx_mutex);
 
        INIT_LIST_HEAD(&hdev->msg_tx_queue);
 
        INIT_WORK(&hdev->msg_tx_work, nfc_hci_msg_tx_work);
-       snprintf(name, sizeof(name), "%s_hci_msg_tx_wq", devname);
-       hdev->msg_tx_wq = alloc_workqueue(name, WQ_NON_REENTRANT | WQ_UNBOUND |
-                                         WQ_MEM_RECLAIM, 1);
-       if (hdev->msg_tx_wq == NULL) {
-               r = -ENOMEM;
-               goto exit;
-       }
 
        init_timer(&hdev->cmd_timer);
        hdev->cmd_timer.data = (unsigned long)hdev;
@@ -684,27 +801,10 @@ int nfc_hci_register_device(struct nfc_hci_dev *hdev)
        skb_queue_head_init(&hdev->rx_hcp_frags);
 
        INIT_WORK(&hdev->msg_rx_work, nfc_hci_msg_rx_work);
-       snprintf(name, sizeof(name), "%s_hci_msg_rx_wq", devname);
-       hdev->msg_rx_wq = alloc_workqueue(name, WQ_NON_REENTRANT | WQ_UNBOUND |
-                                         WQ_MEM_RECLAIM, 1);
-       if (hdev->msg_rx_wq == NULL) {
-               r = -ENOMEM;
-               goto exit;
-       }
 
        skb_queue_head_init(&hdev->msg_rx_queue);
 
-       r = nfc_register_device(hdev->ndev);
-
-exit:
-       if (r < 0) {
-               if (hdev->msg_tx_wq)
-                       destroy_workqueue(hdev->msg_tx_wq);
-               if (hdev->msg_rx_wq)
-                       destroy_workqueue(hdev->msg_rx_wq);
-       }
-
-       return r;
+       return nfc_register_device(hdev->ndev);
 }
 EXPORT_SYMBOL(nfc_hci_register_device);
 
@@ -725,9 +825,8 @@ void nfc_hci_unregister_device(struct nfc_hci_dev *hdev)
 
        nfc_unregister_device(hdev->ndev);
 
-       destroy_workqueue(hdev->msg_tx_wq);
-
-       destroy_workqueue(hdev->msg_rx_wq);
+       cancel_work_sync(&hdev->msg_tx_work);
+       cancel_work_sync(&hdev->msg_rx_work);
 }
 EXPORT_SYMBOL(nfc_hci_unregister_device);
 
@@ -743,93 +842,30 @@ void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev)
 }
 EXPORT_SYMBOL(nfc_hci_get_clientdata);
 
-static void nfc_hci_failure(struct nfc_hci_dev *hdev, int err)
-{
-       mutex_lock(&hdev->msg_tx_mutex);
-
-       if (hdev->cmd_pending_msg == NULL) {
-               nfc_driver_failure(hdev->ndev, err);
-               goto exit;
-       }
-
-       __nfc_hci_cmd_completion(hdev, err, NULL);
-
-exit:
-       mutex_unlock(&hdev->msg_tx_mutex);
-}
-
 void nfc_hci_driver_failure(struct nfc_hci_dev *hdev, int err)
 {
        nfc_hci_failure(hdev, err);
 }
 EXPORT_SYMBOL(nfc_hci_driver_failure);
 
-void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb)
+void inline nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb)
 {
-       struct hcp_packet *packet;
-       u8 type;
-       u8 instruction;
-       struct sk_buff *hcp_skb;
-       u8 pipe;
-       struct sk_buff *frag_skb;
-       int msg_len;
-
-       packet = (struct hcp_packet *)skb->data;
-       if ((packet->header & ~NFC_HCI_FRAGMENT) == 0) {
-               skb_queue_tail(&hdev->rx_hcp_frags, skb);
-               return;
-       }
-
-       /* it's the last fragment. Does it need re-aggregation? */
-       if (skb_queue_len(&hdev->rx_hcp_frags)) {
-               pipe = packet->header & NFC_HCI_FRAGMENT;
-               skb_queue_tail(&hdev->rx_hcp_frags, skb);
-
-               msg_len = 0;
-               skb_queue_walk(&hdev->rx_hcp_frags, frag_skb) {
-                       msg_len += (frag_skb->len -
-                                   NFC_HCI_HCP_PACKET_HEADER_LEN);
-               }
-
-               hcp_skb = nfc_alloc_recv_skb(NFC_HCI_HCP_PACKET_HEADER_LEN +
-                                            msg_len, GFP_KERNEL);
-               if (hcp_skb == NULL) {
-                       nfc_hci_failure(hdev, -ENOMEM);
-                       return;
-               }
-
-               *skb_put(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN) = pipe;
-
-               skb_queue_walk(&hdev->rx_hcp_frags, frag_skb) {
-                       msg_len = frag_skb->len - NFC_HCI_HCP_PACKET_HEADER_LEN;
-                       memcpy(skb_put(hcp_skb, msg_len),
-                              frag_skb->data + NFC_HCI_HCP_PACKET_HEADER_LEN,
-                              msg_len);
-               }
+       nfc_llc_rcv_from_drv(hdev->llc, skb);
+}
+EXPORT_SYMBOL(nfc_hci_recv_frame);
 
-               skb_queue_purge(&hdev->rx_hcp_frags);
-       } else {
-               packet->header &= NFC_HCI_FRAGMENT;
-               hcp_skb = skb;
-       }
+static int __init nfc_hci_init(void)
+{
+       return nfc_llc_init();
+}
 
-       /* if this is a response, dispatch immediately to
-        * unblock waiting cmd context. Otherwise, enqueue to dispatch
-        * in separate context where handler can also execute command.
-        */
-       packet = (struct hcp_packet *)hcp_skb->data;
-       type = HCP_MSG_GET_TYPE(packet->message.header);
-       if (type == NFC_HCI_HCP_RESPONSE) {
-               pipe = packet->header;
-               instruction = HCP_MSG_GET_CMD(packet->message.header);
-               skb_pull(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN +
-                        NFC_HCI_HCP_MESSAGE_HEADER_LEN);
-               nfc_hci_hcp_message_rx(hdev, pipe, type, instruction, hcp_skb);
-       } else {
-               skb_queue_tail(&hdev->msg_rx_queue, hcp_skb);
-               queue_work(hdev->msg_rx_wq, &hdev->msg_rx_work);
-       }
+static void __exit nfc_hci_exit(void)
+{
+       nfc_llc_exit();
 }
-EXPORT_SYMBOL(nfc_hci_recv_frame);
+
+subsys_initcall(nfc_hci_init);
+module_exit(nfc_hci_exit);
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("NFC HCI Core");
index fa9a21e922396bd396dc11a3b9dd916282b39ef9..b274d12c18ac5d7b5ac3a6aa4e4fc943738d6eea 100644 (file)
@@ -20,6 +20,8 @@
 #ifndef __LOCAL_HCI_H
 #define __LOCAL_HCI_H
 
+#include <net/nfc/hci.h>
+
 struct gate_pipe_map {
        u8 gate;
        u8 pipe;
@@ -35,15 +37,6 @@ struct hcp_packet {
        struct hcp_message message;
 } __packed;
 
-/*
- * HCI command execution completion callback.
- * result will be a standard linux error (may be converted from HCI response)
- * skb contains the response data and must be disposed, or may be NULL if
- * an error occured
- */
-typedef void (*hci_cmd_cb_t) (struct nfc_hci_dev *hdev, int result,
-                             struct sk_buff *skb, void *cb_data);
-
 struct hcp_exec_waiter {
        wait_queue_head_t *wq;
        bool exec_complete;
@@ -55,7 +48,7 @@ struct hci_msg {
        struct list_head msg_l;
        struct sk_buff_head msg_frags;
        bool wait_response;
-       hci_cmd_cb_t cb;
+       data_exchange_cb_t cb;
        void *cb_context;
        unsigned long completion_delay;
 };
@@ -83,7 +76,7 @@ struct hci_create_pipe_resp {
 int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
                           u8 type, u8 instruction,
                           const u8 *payload, size_t payload_len,
-                          hci_cmd_cb_t cb, void *cb_data,
+                          data_exchange_cb_t cb, void *cb_context,
                           unsigned long completion_delay);
 
 u8 nfc_hci_pipe2gate(struct nfc_hci_dev *hdev, u8 pipe);
index f4dad1a8974078864fd43f650e52598a6b46c566..208eedd07ee39d1e56aa3d919362979cf71d3bbe 100644 (file)
@@ -35,7 +35,7 @@
 int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
                           u8 type, u8 instruction,
                           const u8 *payload, size_t payload_len,
-                          hci_cmd_cb_t cb, void *cb_data,
+                          data_exchange_cb_t cb, void *cb_context,
                           unsigned long completion_delay)
 {
        struct nfc_dev *ndev = hdev->ndev;
@@ -52,7 +52,7 @@ int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
        skb_queue_head_init(&cmd->msg_frags);
        cmd->wait_response = (type == NFC_HCI_HCP_COMMAND) ? true : false;
        cmd->cb = cb;
-       cmd->cb_context = cb_data;
+       cmd->cb_context = cb_context;
        cmd->completion_delay = completion_delay;
 
        hci_len = payload_len + 1;
@@ -108,7 +108,7 @@ int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
        list_add_tail(&cmd->msg_l, &hdev->msg_tx_queue);
        mutex_unlock(&hdev->msg_tx_mutex);
 
-       queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work);
+       queue_work(system_nrt_wq, &hdev->msg_tx_work);
 
        return 0;
 
diff --git a/net/nfc/hci/llc.c b/net/nfc/hci/llc.c
new file mode 100644 (file)
index 0000000..ae1205d
--- /dev/null
@@ -0,0 +1,170 @@
+/*
+ * Link Layer Control manager
+ *
+ * Copyright (C) 2012  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <net/nfc/llc.h>
+
+#include "llc.h"
+
+static struct list_head llc_engines;
+
+int nfc_llc_init(void)
+{
+       int r;
+
+       INIT_LIST_HEAD(&llc_engines);
+
+       r = nfc_llc_nop_register();
+       if (r)
+               goto exit;
+
+       r = nfc_llc_shdlc_register();
+       if (r)
+               goto exit;
+
+       return 0;
+
+exit:
+       nfc_llc_exit();
+       return r;
+}
+
+void nfc_llc_exit(void)
+{
+       struct nfc_llc_engine *llc_engine, *n;
+
+       list_for_each_entry_safe(llc_engine, n, &llc_engines, entry) {
+               list_del(&llc_engine->entry);
+               kfree(llc_engine->name);
+               kfree(llc_engine);
+       }
+}
+
+int nfc_llc_register(const char *name, struct nfc_llc_ops *ops)
+{
+       struct nfc_llc_engine *llc_engine;
+
+       llc_engine = kzalloc(sizeof(struct nfc_llc_engine), GFP_KERNEL);
+       if (llc_engine == NULL)
+               return -ENOMEM;
+
+       llc_engine->name = kstrdup(name, GFP_KERNEL);
+       if (llc_engine->name == NULL) {
+               kfree(llc_engine);
+               return -ENOMEM;
+       }
+       llc_engine->ops = ops;
+
+       INIT_LIST_HEAD(&llc_engine->entry);
+       list_add_tail (&llc_engine->entry, &llc_engines);
+
+       return 0;
+}
+
+static struct nfc_llc_engine *nfc_llc_name_to_engine(const char *name)
+{
+       struct nfc_llc_engine *llc_engine;
+
+       list_for_each_entry(llc_engine, &llc_engines, entry) {
+               if (strcmp(llc_engine->name, name) == 0)
+                       return llc_engine;
+       }
+
+       return NULL;
+}
+
+void nfc_llc_unregister(const char *name)
+{
+       struct nfc_llc_engine *llc_engine;
+
+       llc_engine = nfc_llc_name_to_engine(name);
+       if (llc_engine == NULL)
+               return;
+
+       list_del(&llc_engine->entry);
+       kfree(llc_engine->name);
+       kfree(llc_engine);
+}
+
+struct nfc_llc *nfc_llc_allocate(const char *name, struct nfc_hci_dev *hdev,
+                                xmit_to_drv_t xmit_to_drv,
+                                rcv_to_hci_t rcv_to_hci, int tx_headroom,
+                                int tx_tailroom, llc_failure_t llc_failure)
+{
+       struct nfc_llc_engine *llc_engine;
+       struct nfc_llc *llc;
+
+       llc_engine = nfc_llc_name_to_engine(name);
+       if (llc_engine == NULL)
+               return NULL;
+
+       llc = kzalloc(sizeof(struct nfc_llc), GFP_KERNEL);
+       if (llc == NULL)
+               return NULL;
+
+       llc->data = llc_engine->ops->init(hdev, xmit_to_drv, rcv_to_hci,
+                                         tx_headroom, tx_tailroom,
+                                         &llc->rx_headroom, &llc->rx_tailroom,
+                                         llc_failure);
+       if (llc->data == NULL) {
+               kfree(llc);
+               return NULL;
+       }
+       llc->ops = llc_engine->ops;
+
+       return llc;
+}
+
+void nfc_llc_free(struct nfc_llc *llc)
+{
+       llc->ops->deinit(llc);
+       kfree(llc);
+}
+
+inline void nfc_llc_get_rx_head_tail_room(struct nfc_llc *llc, int *rx_headroom,
+                                         int *rx_tailroom)
+{
+       *rx_headroom = llc->rx_headroom;
+       *rx_tailroom = llc->rx_tailroom;
+}
+
+inline int nfc_llc_start(struct nfc_llc *llc)
+{
+       return llc->ops->start(llc);
+}
+
+inline int nfc_llc_stop(struct nfc_llc *llc)
+{
+       return llc->ops->stop(llc);
+}
+
+inline void nfc_llc_rcv_from_drv(struct nfc_llc *llc, struct sk_buff *skb)
+{
+       llc->ops->rcv_from_drv(llc, skb);
+}
+
+inline int nfc_llc_xmit_from_hci(struct nfc_llc *llc, struct sk_buff *skb)
+{
+       return llc->ops->xmit_from_hci(llc, skb);
+}
+
+inline void *nfc_llc_get_data(struct nfc_llc *llc)
+{
+       return llc->data;
+}
diff --git a/net/nfc/hci/llc.h b/net/nfc/hci/llc.h
new file mode 100644 (file)
index 0000000..7be0b7f
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * Link Layer Control manager
+ *
+ * Copyright (C) 2012  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __LOCAL_LLC_H_
+#define __LOCAL_LLC_H_
+
+#include <net/nfc/hci.h>
+#include <net/nfc/llc.h>
+#include <linux/skbuff.h>
+
+struct nfc_llc_ops {
+       void *(*init) (struct nfc_hci_dev *hdev, xmit_to_drv_t xmit_to_drv,
+                      rcv_to_hci_t rcv_to_hci, int tx_headroom,
+                      int tx_tailroom, int *rx_headroom, int *rx_tailroom,
+                      llc_failure_t llc_failure);
+       void (*deinit) (struct nfc_llc *llc);
+       int (*start) (struct nfc_llc *llc);
+       int (*stop) (struct nfc_llc *llc);
+       void (*rcv_from_drv) (struct nfc_llc *llc, struct sk_buff *skb);
+       int (*xmit_from_hci) (struct nfc_llc *llc, struct sk_buff *skb);
+};
+
+struct nfc_llc_engine {
+       const char *name;
+       struct nfc_llc_ops *ops;
+       struct list_head entry;
+};
+
+struct nfc_llc {
+       void *data;
+       struct nfc_llc_ops *ops;
+       int rx_headroom;
+       int rx_tailroom;
+};
+
+void *nfc_llc_get_data(struct nfc_llc *llc);
+
+int nfc_llc_register(const char *name, struct nfc_llc_ops *ops);
+void nfc_llc_unregister(const char *name);
+
+int nfc_llc_nop_register(void);
+
+#if defined(CONFIG_NFC_SHDLC)
+int nfc_llc_shdlc_register(void);
+#else
+static inline int nfc_llc_shdlc_register(void)
+{
+       return 0;
+}
+#endif
+
+#endif /* __LOCAL_LLC_H_ */
diff --git a/net/nfc/hci/llc_nop.c b/net/nfc/hci/llc_nop.c
new file mode 100644 (file)
index 0000000..87b1029
--- /dev/null
@@ -0,0 +1,99 @@
+/*
+ * nop (passthrough) Link Layer Control
+ *
+ * Copyright (C) 2012  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/types.h>
+
+#include "llc.h"
+
+struct llc_nop {
+       struct nfc_hci_dev *hdev;
+       xmit_to_drv_t xmit_to_drv;
+       rcv_to_hci_t rcv_to_hci;
+       int tx_headroom;
+       int tx_tailroom;
+       llc_failure_t llc_failure;
+};
+
+static void *llc_nop_init(struct nfc_hci_dev *hdev, xmit_to_drv_t xmit_to_drv,
+                         rcv_to_hci_t rcv_to_hci, int tx_headroom,
+                         int tx_tailroom, int *rx_headroom, int *rx_tailroom,
+                         llc_failure_t llc_failure)
+{
+       struct llc_nop *llc_nop;
+
+       *rx_headroom = 0;
+       *rx_tailroom = 0;
+
+       llc_nop = kzalloc(sizeof(struct llc_nop), GFP_KERNEL);
+       if (llc_nop == NULL)
+               return NULL;
+
+       llc_nop->hdev = hdev;
+       llc_nop->xmit_to_drv = xmit_to_drv;
+       llc_nop->rcv_to_hci = rcv_to_hci;
+       llc_nop->tx_headroom = tx_headroom;
+       llc_nop->tx_tailroom = tx_tailroom;
+       llc_nop->llc_failure = llc_failure;
+
+       return llc_nop;
+}
+
+static void llc_nop_deinit(struct nfc_llc *llc)
+{
+       kfree(nfc_llc_get_data(llc));
+}
+
+static int llc_nop_start(struct nfc_llc *llc)
+{
+       return 0;
+}
+
+static int llc_nop_stop(struct nfc_llc *llc)
+{
+       return 0;
+}
+
+static void llc_nop_rcv_from_drv(struct nfc_llc *llc, struct sk_buff *skb)
+{
+       struct llc_nop *llc_nop = nfc_llc_get_data(llc);
+
+       llc_nop->rcv_to_hci(llc_nop->hdev, skb);
+}
+
+static int llc_nop_xmit_from_hci(struct nfc_llc *llc, struct sk_buff *skb)
+{
+       struct llc_nop *llc_nop = nfc_llc_get_data(llc);
+
+       return llc_nop->xmit_to_drv(llc_nop->hdev, skb);
+}
+
+static struct nfc_llc_ops llc_nop_ops = {
+       .init = llc_nop_init,
+       .deinit = llc_nop_deinit,
+       .start = llc_nop_start,
+       .stop = llc_nop_stop,
+       .rcv_from_drv = llc_nop_rcv_from_drv,
+       .xmit_from_hci = llc_nop_xmit_from_hci,
+};
+
+int nfc_llc_nop_register(void)
+{
+       return nfc_llc_register(LLC_NOP_NAME, &llc_nop_ops);
+}
similarity index 54%
rename from net/nfc/hci/shdlc.c
rename to net/nfc/hci/llc_shdlc.c
index 6f840c18c892e351adc19f30e3a16513751d4f77..8f69d791dcb38f20a386f36654298b4f92c4a275 100644 (file)
@@ -1,10 +1,11 @@
 /*
+ * shdlc Link Layer Control
+ *
  * Copyright (C) 2012  Intel Corporation. All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
  *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 
 #define pr_fmt(fmt) "shdlc: %s: " fmt, __func__
 
+#include <linux/types.h>
 #include <linux/sched.h>
-#include <linux/export.h>
 #include <linux/wait.h>
-#include <linux/crc-ccitt.h>
 #include <linux/slab.h>
 #include <linux/skbuff.h>
 
-#include <net/nfc/hci.h>
-#include <net/nfc/shdlc.h>
+#include "llc.h"
+
+enum shdlc_state {
+       SHDLC_DISCONNECTED = 0,
+       SHDLC_CONNECTING = 1,
+       SHDLC_NEGOTIATING = 2,
+       SHDLC_HALF_CONNECTED = 3,
+       SHDLC_CONNECTED = 4
+};
+
+struct llc_shdlc {
+       struct nfc_hci_dev *hdev;
+       xmit_to_drv_t xmit_to_drv;
+       rcv_to_hci_t rcv_to_hci;
+
+       struct mutex state_mutex;
+       enum shdlc_state state;
+       int hard_fault;
+
+       wait_queue_head_t *connect_wq;
+       int connect_tries;
+       int connect_result;
+       struct timer_list connect_timer;/* aka T3 in spec 10.6.1 */
+
+       u8 w;                           /* window size */
+       bool srej_support;
+
+       struct timer_list t1_timer;     /* send ack timeout */
+       bool t1_active;
+
+       struct timer_list t2_timer;     /* guard/retransmit timeout */
+       bool t2_active;
+
+       int ns;                         /* next seq num for send */
+       int nr;                         /* next expected seq num for receive */
+       int dnr;                        /* oldest sent unacked seq num */
+
+       struct sk_buff_head rcv_q;
+
+       struct sk_buff_head send_q;
+       bool rnr;                       /* other side is not ready to receive */
+
+       struct sk_buff_head ack_pending_q;
+
+       struct work_struct sm_work;
+
+       int tx_headroom;
+       int tx_tailroom;
+
+       llc_failure_t llc_failure;
+};
 
 #define SHDLC_LLC_HEAD_ROOM    2
-#define SHDLC_LLC_TAIL_ROOM    2
 
 #define SHDLC_MAX_WINDOW       4
 #define SHDLC_SREJ_SUPPORT     false
@@ -71,7 +119,7 @@ do {                                                           \
 } while (0)
 
 /* checks x < y <= z modulo 8 */
-static bool nfc_shdlc_x_lt_y_lteq_z(int x, int y, int z)
+static bool llc_shdlc_x_lt_y_lteq_z(int x, int y, int z)
 {
        if (x < z)
                return ((x < y) && (y <= z)) ? true : false;
@@ -80,7 +128,7 @@ static bool nfc_shdlc_x_lt_y_lteq_z(int x, int y, int z)
 }
 
 /* checks x <= y < z modulo 8 */
-static bool nfc_shdlc_x_lteq_y_lt_z(int x, int y, int z)
+static bool llc_shdlc_x_lteq_y_lt_z(int x, int y, int z)
 {
        if (x <= z)
                return ((x <= y) && (y < z)) ? true : false;
@@ -88,36 +136,21 @@ static bool nfc_shdlc_x_lteq_y_lt_z(int x, int y, int z)
                return ((y >= x) || (y < z)) ? true : false;
 }
 
-static struct sk_buff *nfc_shdlc_alloc_skb(struct nfc_shdlc *shdlc,
+static struct sk_buff *llc_shdlc_alloc_skb(struct llc_shdlc *shdlc,
                                           int payload_len)
 {
        struct sk_buff *skb;
 
-       skb = alloc_skb(shdlc->client_headroom + SHDLC_LLC_HEAD_ROOM +
-                       shdlc->client_tailroom + SHDLC_LLC_TAIL_ROOM +
-                       payload_len, GFP_KERNEL);
+       skb = alloc_skb(shdlc->tx_headroom + SHDLC_LLC_HEAD_ROOM +
+                       shdlc->tx_tailroom + payload_len, GFP_KERNEL);
        if (skb)
-               skb_reserve(skb, shdlc->client_headroom + SHDLC_LLC_HEAD_ROOM);
+               skb_reserve(skb, shdlc->tx_headroom + SHDLC_LLC_HEAD_ROOM);
 
        return skb;
 }
 
-static void nfc_shdlc_add_len_crc(struct sk_buff *skb)
-{
-       u16 crc;
-       int len;
-
-       len = skb->len + 2;
-       *skb_push(skb, 1) = len;
-
-       crc = crc_ccitt(0xffff, skb->data, skb->len);
-       crc = ~crc;
-       *skb_put(skb, 1) = crc & 0xff;
-       *skb_put(skb, 1) = crc >> 8;
-}
-
 /* immediately sends an S frame. */
-static int nfc_shdlc_send_s_frame(struct nfc_shdlc *shdlc,
+static int llc_shdlc_send_s_frame(struct llc_shdlc *shdlc,
                                  enum sframe_type sframe_type, int nr)
 {
        int r;
@@ -125,15 +158,13 @@ static int nfc_shdlc_send_s_frame(struct nfc_shdlc *shdlc,
 
        pr_debug("sframe_type=%d nr=%d\n", sframe_type, nr);
 
-       skb = nfc_shdlc_alloc_skb(shdlc, 0);
+       skb = llc_shdlc_alloc_skb(shdlc, 0);
        if (skb == NULL)
                return -ENOMEM;
 
        *skb_push(skb, 1) = SHDLC_CONTROL_HEAD_S | (sframe_type << 3) | nr;
 
-       nfc_shdlc_add_len_crc(skb);
-
-       r = shdlc->ops->xmit(shdlc, skb);
+       r = shdlc->xmit_to_drv(shdlc->hdev, skb);
 
        kfree_skb(skb);
 
@@ -141,7 +172,7 @@ static int nfc_shdlc_send_s_frame(struct nfc_shdlc *shdlc,
 }
 
 /* immediately sends an U frame. skb may contain optional payload */
-static int nfc_shdlc_send_u_frame(struct nfc_shdlc *shdlc,
+static int llc_shdlc_send_u_frame(struct llc_shdlc *shdlc,
                                  struct sk_buff *skb,
                                  enum uframe_modifier uframe_modifier)
 {
@@ -151,9 +182,7 @@ static int nfc_shdlc_send_u_frame(struct nfc_shdlc *shdlc,
 
        *skb_push(skb, 1) = SHDLC_CONTROL_HEAD_U | uframe_modifier;
 
-       nfc_shdlc_add_len_crc(skb);
-
-       r = shdlc->ops->xmit(shdlc, skb);
+       r = shdlc->xmit_to_drv(shdlc->hdev, skb);
 
        kfree_skb(skb);
 
@@ -164,7 +193,7 @@ static int nfc_shdlc_send_u_frame(struct nfc_shdlc *shdlc,
  * Free ack_pending frames until y_nr - 1, and reset t2 according to
  * the remaining oldest ack_pending frame sent time
  */
-static void nfc_shdlc_reset_t2(struct nfc_shdlc *shdlc, int y_nr)
+static void llc_shdlc_reset_t2(struct llc_shdlc *shdlc, int y_nr)
 {
        struct sk_buff *skb;
        int dnr = shdlc->dnr;   /* MUST initially be < y_nr */
@@ -204,7 +233,7 @@ static void nfc_shdlc_reset_t2(struct nfc_shdlc *shdlc, int y_nr)
  * Receive validated frames from lower layer. skb contains HCI payload only.
  * Handle according to algorithm at spec:10.8.2
  */
-static void nfc_shdlc_rcv_i_frame(struct nfc_shdlc *shdlc,
+static void llc_shdlc_rcv_i_frame(struct llc_shdlc *shdlc,
                                  struct sk_buff *skb, int ns, int nr)
 {
        int x_ns = ns;
@@ -216,66 +245,64 @@ static void nfc_shdlc_rcv_i_frame(struct nfc_shdlc *shdlc,
                goto exit;
 
        if (x_ns != shdlc->nr) {
-               nfc_shdlc_send_s_frame(shdlc, S_FRAME_REJ, shdlc->nr);
+               llc_shdlc_send_s_frame(shdlc, S_FRAME_REJ, shdlc->nr);
                goto exit;
        }
 
        if (shdlc->t1_active == false) {
                shdlc->t1_active = true;
-               mod_timer(&shdlc->t1_timer,
+               mod_timer(&shdlc->t1_timer, jiffies +
                          msecs_to_jiffies(SHDLC_T1_VALUE_MS(shdlc->w)));
                pr_debug("(re)Start T1(send ack)\n");
        }
 
        if (skb->len) {
-               nfc_hci_recv_frame(shdlc->hdev, skb);
+               shdlc->rcv_to_hci(shdlc->hdev, skb);
                skb = NULL;
        }
 
        shdlc->nr = (shdlc->nr + 1) % 8;
 
-       if (nfc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) {
-               nfc_shdlc_reset_t2(shdlc, y_nr);
+       if (llc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) {
+               llc_shdlc_reset_t2(shdlc, y_nr);
 
                shdlc->dnr = y_nr;
        }
 
 exit:
-       if (skb)
-               kfree_skb(skb);
+       kfree_skb(skb);
 }
 
-static void nfc_shdlc_rcv_ack(struct nfc_shdlc *shdlc, int y_nr)
+static void llc_shdlc_rcv_ack(struct llc_shdlc *shdlc, int y_nr)
 {
        pr_debug("remote acked up to frame %d excluded\n", y_nr);
 
-       if (nfc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) {
-               nfc_shdlc_reset_t2(shdlc, y_nr);
+       if (llc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) {
+               llc_shdlc_reset_t2(shdlc, y_nr);
                shdlc->dnr = y_nr;
        }
 }
 
-static void nfc_shdlc_requeue_ack_pending(struct nfc_shdlc *shdlc)
+static void llc_shdlc_requeue_ack_pending(struct llc_shdlc *shdlc)
 {
        struct sk_buff *skb;
 
        pr_debug("ns reset to %d\n", shdlc->dnr);
 
        while ((skb = skb_dequeue_tail(&shdlc->ack_pending_q))) {
-               skb_pull(skb, 2);       /* remove len+control */
-               skb_trim(skb, skb->len - 2);    /* remove crc */
+               skb_pull(skb, 1);       /* remove control field */
                skb_queue_head(&shdlc->send_q, skb);
        }
        shdlc->ns = shdlc->dnr;
 }
 
-static void nfc_shdlc_rcv_rej(struct nfc_shdlc *shdlc, int y_nr)
+static void llc_shdlc_rcv_rej(struct llc_shdlc *shdlc, int y_nr)
 {
        struct sk_buff *skb;
 
        pr_debug("remote asks retransmition from frame %d\n", y_nr);
 
-       if (nfc_shdlc_x_lteq_y_lt_z(shdlc->dnr, y_nr, shdlc->ns)) {
+       if (llc_shdlc_x_lteq_y_lt_z(shdlc->dnr, y_nr, shdlc->ns)) {
                if (shdlc->t2_active) {
                        del_timer_sync(&shdlc->t2_timer);
                        shdlc->t2_active = false;
@@ -289,12 +316,12 @@ static void nfc_shdlc_rcv_rej(struct nfc_shdlc *shdlc, int y_nr)
                        }
                }
 
-               nfc_shdlc_requeue_ack_pending(shdlc);
+               llc_shdlc_requeue_ack_pending(shdlc);
        }
 }
 
 /* See spec RR:10.8.3 REJ:10.8.4 */
-static void nfc_shdlc_rcv_s_frame(struct nfc_shdlc *shdlc,
+static void llc_shdlc_rcv_s_frame(struct llc_shdlc *shdlc,
                                  enum sframe_type s_frame_type, int nr)
 {
        struct sk_buff *skb;
@@ -304,21 +331,21 @@ static void nfc_shdlc_rcv_s_frame(struct nfc_shdlc *shdlc,
 
        switch (s_frame_type) {
        case S_FRAME_RR:
-               nfc_shdlc_rcv_ack(shdlc, nr);
+               llc_shdlc_rcv_ack(shdlc, nr);
                if (shdlc->rnr == true) {       /* see SHDLC 10.7.7 */
                        shdlc->rnr = false;
                        if (shdlc->send_q.qlen == 0) {
-                               skb = nfc_shdlc_alloc_skb(shdlc, 0);
+                               skb = llc_shdlc_alloc_skb(shdlc, 0);
                                if (skb)
                                        skb_queue_tail(&shdlc->send_q, skb);
                        }
                }
                break;
        case S_FRAME_REJ:
-               nfc_shdlc_rcv_rej(shdlc, nr);
+               llc_shdlc_rcv_rej(shdlc, nr);
                break;
        case S_FRAME_RNR:
-               nfc_shdlc_rcv_ack(shdlc, nr);
+               llc_shdlc_rcv_ack(shdlc, nr);
                shdlc->rnr = true;
                break;
        default:
@@ -326,7 +353,7 @@ static void nfc_shdlc_rcv_s_frame(struct nfc_shdlc *shdlc,
        }
 }
 
-static void nfc_shdlc_connect_complete(struct nfc_shdlc *shdlc, int r)
+static void llc_shdlc_connect_complete(struct llc_shdlc *shdlc, int r)
 {
        pr_debug("result=%d\n", r);
 
@@ -337,7 +364,7 @@ static void nfc_shdlc_connect_complete(struct nfc_shdlc *shdlc, int r)
                shdlc->nr = 0;
                shdlc->dnr = 0;
 
-               shdlc->state = SHDLC_CONNECTED;
+               shdlc->state = SHDLC_HALF_CONNECTED;
        } else {
                shdlc->state = SHDLC_DISCONNECTED;
        }
@@ -347,36 +374,36 @@ static void nfc_shdlc_connect_complete(struct nfc_shdlc *shdlc, int r)
        wake_up(shdlc->connect_wq);
 }
 
-static int nfc_shdlc_connect_initiate(struct nfc_shdlc *shdlc)
+static int llc_shdlc_connect_initiate(struct llc_shdlc *shdlc)
 {
        struct sk_buff *skb;
 
        pr_debug("\n");
 
-       skb = nfc_shdlc_alloc_skb(shdlc, 2);
+       skb = llc_shdlc_alloc_skb(shdlc, 2);
        if (skb == NULL)
                return -ENOMEM;
 
        *skb_put(skb, 1) = SHDLC_MAX_WINDOW;
        *skb_put(skb, 1) = SHDLC_SREJ_SUPPORT ? 1 : 0;
 
-       return nfc_shdlc_send_u_frame(shdlc, skb, U_FRAME_RSET);
+       return llc_shdlc_send_u_frame(shdlc, skb, U_FRAME_RSET);
 }
 
-static int nfc_shdlc_connect_send_ua(struct nfc_shdlc *shdlc)
+static int llc_shdlc_connect_send_ua(struct llc_shdlc *shdlc)
 {
        struct sk_buff *skb;
 
        pr_debug("\n");
 
-       skb = nfc_shdlc_alloc_skb(shdlc, 0);
+       skb = llc_shdlc_alloc_skb(shdlc, 0);
        if (skb == NULL)
                return -ENOMEM;
 
-       return nfc_shdlc_send_u_frame(shdlc, skb, U_FRAME_UA);
+       return llc_shdlc_send_u_frame(shdlc, skb, U_FRAME_UA);
 }
 
-static void nfc_shdlc_rcv_u_frame(struct nfc_shdlc *shdlc,
+static void llc_shdlc_rcv_u_frame(struct llc_shdlc *shdlc,
                                  struct sk_buff *skb,
                                  enum uframe_modifier u_frame_modifier)
 {
@@ -388,8 +415,13 @@ static void nfc_shdlc_rcv_u_frame(struct nfc_shdlc *shdlc,
 
        switch (u_frame_modifier) {
        case U_FRAME_RSET:
-               if (shdlc->state == SHDLC_NEGOCIATING) {
-                       /* we sent RSET, but chip wants to negociate */
+               switch (shdlc->state) {
+               case SHDLC_NEGOTIATING:
+               case SHDLC_CONNECTING:
+                       /*
+                        * We sent RSET, but chip wants to negociate or we
+                        * got RSET before we managed to send out our.
+                        */
                        if (skb->len > 0)
                                w = skb->data[0];
 
@@ -401,22 +433,34 @@ static void nfc_shdlc_rcv_u_frame(struct nfc_shdlc *shdlc,
                            (SHDLC_SREJ_SUPPORT || (srej_support == false))) {
                                shdlc->w = w;
                                shdlc->srej_support = srej_support;
-                               r = nfc_shdlc_connect_send_ua(shdlc);
-                               nfc_shdlc_connect_complete(shdlc, r);
+                               r = llc_shdlc_connect_send_ua(shdlc);
+                               llc_shdlc_connect_complete(shdlc, r);
                        }
-               } else if (shdlc->state == SHDLC_CONNECTED) {
+                       break;
+               case SHDLC_HALF_CONNECTED:
+                       /*
+                        * Chip resent RSET due to its timeout - Ignote it
+                        * as we already sent UA.
+                        */
+                       break;
+               case SHDLC_CONNECTED:
                        /*
                         * Chip wants to reset link. This is unexpected and
                         * unsupported.
                         */
                        shdlc->hard_fault = -ECONNRESET;
+                       break;
+               default:
+                       break;
                }
                break;
        case U_FRAME_UA:
                if ((shdlc->state == SHDLC_CONNECTING &&
                     shdlc->connect_tries > 0) ||
-                   (shdlc->state == SHDLC_NEGOCIATING))
-                       nfc_shdlc_connect_complete(shdlc, 0);
+                   (shdlc->state == SHDLC_NEGOTIATING)) {
+                       llc_shdlc_connect_complete(shdlc, 0);
+                       shdlc->state = SHDLC_CONNECTED;
+               }
                break;
        default:
                break;
@@ -425,7 +469,7 @@ static void nfc_shdlc_rcv_u_frame(struct nfc_shdlc *shdlc,
        kfree_skb(skb);
 }
 
-static void nfc_shdlc_handle_rcv_queue(struct nfc_shdlc *shdlc)
+static void llc_shdlc_handle_rcv_queue(struct llc_shdlc *shdlc)
 {
        struct sk_buff *skb;
        u8 control;
@@ -443,19 +487,25 @@ static void nfc_shdlc_handle_rcv_queue(struct nfc_shdlc *shdlc)
                switch (control & SHDLC_CONTROL_HEAD_MASK) {
                case SHDLC_CONTROL_HEAD_I:
                case SHDLC_CONTROL_HEAD_I2:
+                       if (shdlc->state == SHDLC_HALF_CONNECTED)
+                               shdlc->state = SHDLC_CONNECTED;
+
                        ns = (control & SHDLC_CONTROL_NS_MASK) >> 3;
                        nr = control & SHDLC_CONTROL_NR_MASK;
-                       nfc_shdlc_rcv_i_frame(shdlc, skb, ns, nr);
+                       llc_shdlc_rcv_i_frame(shdlc, skb, ns, nr);
                        break;
                case SHDLC_CONTROL_HEAD_S:
+                       if (shdlc->state == SHDLC_HALF_CONNECTED)
+                               shdlc->state = SHDLC_CONNECTED;
+
                        s_frame_type = (control & SHDLC_CONTROL_TYPE_MASK) >> 3;
                        nr = control & SHDLC_CONTROL_NR_MASK;
-                       nfc_shdlc_rcv_s_frame(shdlc, s_frame_type, nr);
+                       llc_shdlc_rcv_s_frame(shdlc, s_frame_type, nr);
                        kfree_skb(skb);
                        break;
                case SHDLC_CONTROL_HEAD_U:
                        u_frame_modifier = control & SHDLC_CONTROL_M_MASK;
-                       nfc_shdlc_rcv_u_frame(shdlc, skb, u_frame_modifier);
+                       llc_shdlc_rcv_u_frame(shdlc, skb, u_frame_modifier);
                        break;
                default:
                        pr_err("UNKNOWN Control=%d\n", control);
@@ -465,7 +515,7 @@ static void nfc_shdlc_handle_rcv_queue(struct nfc_shdlc *shdlc)
        }
 }
 
-static int nfc_shdlc_w_used(int ns, int dnr)
+static int llc_shdlc_w_used(int ns, int dnr)
 {
        int unack_count;
 
@@ -478,7 +528,7 @@ static int nfc_shdlc_w_used(int ns, int dnr)
 }
 
 /* Send frames according to algorithm at spec:10.8.1 */
-static void nfc_shdlc_handle_send_queue(struct nfc_shdlc *shdlc)
+static void llc_shdlc_handle_send_queue(struct llc_shdlc *shdlc)
 {
        struct sk_buff *skb;
        int r;
@@ -489,7 +539,7 @@ static void nfc_shdlc_handle_send_queue(struct nfc_shdlc *shdlc)
                    ("sendQlen=%d ns=%d dnr=%d rnr=%s w_room=%d unackQlen=%d\n",
                     shdlc->send_q.qlen, shdlc->ns, shdlc->dnr,
                     shdlc->rnr == false ? "false" : "true",
-                    shdlc->w - nfc_shdlc_w_used(shdlc->ns, shdlc->dnr),
+                    shdlc->w - llc_shdlc_w_used(shdlc->ns, shdlc->dnr),
                     shdlc->ack_pending_q.qlen);
 
        while (shdlc->send_q.qlen && shdlc->ack_pending_q.qlen < shdlc->w &&
@@ -508,11 +558,9 @@ static void nfc_shdlc_handle_send_queue(struct nfc_shdlc *shdlc)
 
                pr_debug("Sending I-Frame %d, waiting to rcv %d\n", shdlc->ns,
                         shdlc->nr);
-       /*      SHDLC_DUMP_SKB("shdlc frame written", skb); */
-
-               nfc_shdlc_add_len_crc(skb);
+               SHDLC_DUMP_SKB("shdlc frame written", skb);
 
-               r = shdlc->ops->xmit(shdlc, skb);
+               r = shdlc->xmit_to_drv(shdlc->hdev, skb);
                if (r < 0) {
                        shdlc->hard_fault = r;
                        break;
@@ -534,36 +582,36 @@ static void nfc_shdlc_handle_send_queue(struct nfc_shdlc *shdlc)
        }
 }
 
-static void nfc_shdlc_connect_timeout(unsigned long data)
+static void llc_shdlc_connect_timeout(unsigned long data)
 {
-       struct nfc_shdlc *shdlc = (struct nfc_shdlc *)data;
+       struct llc_shdlc *shdlc = (struct llc_shdlc *)data;
 
        pr_debug("\n");
 
-       queue_work(shdlc->sm_wq, &shdlc->sm_work);
+       queue_work(system_nrt_wq, &shdlc->sm_work);
 }
 
-static void nfc_shdlc_t1_timeout(unsigned long data)
+static void llc_shdlc_t1_timeout(unsigned long data)
 {
-       struct nfc_shdlc *shdlc = (struct nfc_shdlc *)data;
+       struct llc_shdlc *shdlc = (struct llc_shdlc *)data;
 
        pr_debug("SoftIRQ: need to send ack\n");
 
-       queue_work(shdlc->sm_wq, &shdlc->sm_work);
+       queue_work(system_nrt_wq, &shdlc->sm_work);
 }
 
-static void nfc_shdlc_t2_timeout(unsigned long data)
+static void llc_shdlc_t2_timeout(unsigned long data)
 {
-       struct nfc_shdlc *shdlc = (struct nfc_shdlc *)data;
+       struct llc_shdlc *shdlc = (struct llc_shdlc *)data;
 
        pr_debug("SoftIRQ: need to retransmit\n");
 
-       queue_work(shdlc->sm_wq, &shdlc->sm_work);
+       queue_work(system_nrt_wq, &shdlc->sm_work);
 }
 
-static void nfc_shdlc_sm_work(struct work_struct *work)
+static void llc_shdlc_sm_work(struct work_struct *work)
 {
-       struct nfc_shdlc *shdlc = container_of(work, struct nfc_shdlc, sm_work);
+       struct llc_shdlc *shdlc = container_of(work, struct llc_shdlc, sm_work);
        int r;
 
        pr_debug("\n");
@@ -578,46 +626,47 @@ static void nfc_shdlc_sm_work(struct work_struct *work)
                break;
        case SHDLC_CONNECTING:
                if (shdlc->hard_fault) {
-                       nfc_shdlc_connect_complete(shdlc, shdlc->hard_fault);
+                       llc_shdlc_connect_complete(shdlc, shdlc->hard_fault);
                        break;
                }
 
                if (shdlc->connect_tries++ < 5)
-                       r = nfc_shdlc_connect_initiate(shdlc);
+                       r = llc_shdlc_connect_initiate(shdlc);
                else
                        r = -ETIME;
                if (r < 0)
-                       nfc_shdlc_connect_complete(shdlc, r);
+                       llc_shdlc_connect_complete(shdlc, r);
                else {
                        mod_timer(&shdlc->connect_timer, jiffies +
                                  msecs_to_jiffies(SHDLC_CONNECT_VALUE_MS));
 
-                       shdlc->state = SHDLC_NEGOCIATING;
+                       shdlc->state = SHDLC_NEGOTIATING;
                }
                break;
-       case SHDLC_NEGOCIATING:
+       case SHDLC_NEGOTIATING:
                if (timer_pending(&shdlc->connect_timer) == 0) {
                        shdlc->state = SHDLC_CONNECTING;
-                       queue_work(shdlc->sm_wq, &shdlc->sm_work);
+                       queue_work(system_nrt_wq, &shdlc->sm_work);
                }
 
-               nfc_shdlc_handle_rcv_queue(shdlc);
+               llc_shdlc_handle_rcv_queue(shdlc);
 
                if (shdlc->hard_fault) {
-                       nfc_shdlc_connect_complete(shdlc, shdlc->hard_fault);
+                       llc_shdlc_connect_complete(shdlc, shdlc->hard_fault);
                        break;
                }
                break;
+       case SHDLC_HALF_CONNECTED:
        case SHDLC_CONNECTED:
-               nfc_shdlc_handle_rcv_queue(shdlc);
-               nfc_shdlc_handle_send_queue(shdlc);
+               llc_shdlc_handle_rcv_queue(shdlc);
+               llc_shdlc_handle_send_queue(shdlc);
 
                if (shdlc->t1_active && timer_pending(&shdlc->t1_timer) == 0) {
                        pr_debug
                            ("Handle T1(send ack) elapsed (T1 now inactive)\n");
 
                        shdlc->t1_active = false;
-                       r = nfc_shdlc_send_s_frame(shdlc, S_FRAME_RR,
+                       r = llc_shdlc_send_s_frame(shdlc, S_FRAME_RR,
                                                   shdlc->nr);
                        if (r < 0)
                                shdlc->hard_fault = r;
@@ -629,12 +678,12 @@ static void nfc_shdlc_sm_work(struct work_struct *work)
 
                        shdlc->t2_active = false;
 
-                       nfc_shdlc_requeue_ack_pending(shdlc);
-                       nfc_shdlc_handle_send_queue(shdlc);
+                       llc_shdlc_requeue_ack_pending(shdlc);
+                       llc_shdlc_handle_send_queue(shdlc);
                }
 
                if (shdlc->hard_fault) {
-                       nfc_hci_driver_failure(shdlc->hdev, shdlc->hard_fault);
+                       shdlc->llc_failure(shdlc->hdev, shdlc->hard_fault);
                }
                break;
        default:
@@ -647,7 +696,7 @@ static void nfc_shdlc_sm_work(struct work_struct *work)
  * Called from syscall context to establish shdlc link. Sleeps until
  * link is ready or failure.
  */
-static int nfc_shdlc_connect(struct nfc_shdlc *shdlc)
+static int llc_shdlc_connect(struct llc_shdlc *shdlc)
 {
        DECLARE_WAIT_QUEUE_HEAD_ONSTACK(connect_wq);
 
@@ -662,14 +711,14 @@ static int nfc_shdlc_connect(struct nfc_shdlc *shdlc)
 
        mutex_unlock(&shdlc->state_mutex);
 
-       queue_work(shdlc->sm_wq, &shdlc->sm_work);
+       queue_work(system_nrt_wq, &shdlc->sm_work);
 
        wait_event(connect_wq, shdlc->connect_result != 1);
 
        return shdlc->connect_result;
 }
 
-static void nfc_shdlc_disconnect(struct nfc_shdlc *shdlc)
+static void llc_shdlc_disconnect(struct llc_shdlc *shdlc)
 {
        pr_debug("\n");
 
@@ -679,7 +728,7 @@ static void nfc_shdlc_disconnect(struct nfc_shdlc *shdlc)
 
        mutex_unlock(&shdlc->state_mutex);
 
-       queue_work(shdlc->sm_wq, &shdlc->sm_work);
+       queue_work(system_nrt_wq, &shdlc->sm_work);
 }
 
 /*
@@ -687,7 +736,7 @@ static void nfc_shdlc_disconnect(struct nfc_shdlc *shdlc)
  * skb contains only LLC header and payload.
  * If skb == NULL, it is a notification that the link below is dead.
  */
-void nfc_shdlc_recv_frame(struct nfc_shdlc *shdlc, struct sk_buff *skb)
+static void llc_shdlc_recv_frame(struct llc_shdlc *shdlc, struct sk_buff *skb)
 {
        if (skb == NULL) {
                pr_err("NULL Frame -> link is dead\n");
@@ -697,176 +746,37 @@ void nfc_shdlc_recv_frame(struct nfc_shdlc *shdlc, struct sk_buff *skb)
                skb_queue_tail(&shdlc->rcv_q, skb);
        }
 
-       queue_work(shdlc->sm_wq, &shdlc->sm_work);
-}
-EXPORT_SYMBOL(nfc_shdlc_recv_frame);
-
-static int nfc_shdlc_open(struct nfc_hci_dev *hdev)
-{
-       struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
-       int r;
-
-       pr_debug("\n");
-
-       if (shdlc->ops->open) {
-               r = shdlc->ops->open(shdlc);
-               if (r < 0)
-                       return r;
-       }
-
-       r = nfc_shdlc_connect(shdlc);
-       if (r < 0 && shdlc->ops->close)
-               shdlc->ops->close(shdlc);
-
-       return r;
-}
-
-static void nfc_shdlc_close(struct nfc_hci_dev *hdev)
-{
-       struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
-
-       pr_debug("\n");
-
-       nfc_shdlc_disconnect(shdlc);
-
-       if (shdlc->ops->close)
-               shdlc->ops->close(shdlc);
+       queue_work(system_nrt_wq, &shdlc->sm_work);
 }
 
-static int nfc_shdlc_hci_ready(struct nfc_hci_dev *hdev)
+static void *llc_shdlc_init(struct nfc_hci_dev *hdev, xmit_to_drv_t xmit_to_drv,
+                           rcv_to_hci_t rcv_to_hci, int tx_headroom,
+                           int tx_tailroom, int *rx_headroom, int *rx_tailroom,
+                           llc_failure_t llc_failure)
 {
-       struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
-       int r = 0;
-
-       pr_debug("\n");
+       struct llc_shdlc *shdlc;
 
-       if (shdlc->ops->hci_ready)
-               r = shdlc->ops->hci_ready(shdlc);
-
-       return r;
-}
-
-static int nfc_shdlc_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb)
-{
-       struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
-
-       SHDLC_DUMP_SKB("queuing HCP packet to shdlc", skb);
-
-       skb_queue_tail(&shdlc->send_q, skb);
+       *rx_headroom = SHDLC_LLC_HEAD_ROOM;
+       *rx_tailroom = 0;
 
-       queue_work(shdlc->sm_wq, &shdlc->sm_work);
-
-       return 0;
-}
-
-static int nfc_shdlc_start_poll(struct nfc_hci_dev *hdev,
-                               u32 im_protocols, u32 tm_protocols)
-{
-       struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
-
-       pr_debug("\n");
-
-       if (shdlc->ops->start_poll)
-               return shdlc->ops->start_poll(shdlc,
-                                             im_protocols, tm_protocols);
-
-       return 0;
-}
-
-static int nfc_shdlc_target_from_gate(struct nfc_hci_dev *hdev, u8 gate,
-                                     struct nfc_target *target)
-{
-       struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
-
-       if (shdlc->ops->target_from_gate)
-               return shdlc->ops->target_from_gate(shdlc, gate, target);
-
-       return -EPERM;
-}
-
-static int nfc_shdlc_complete_target_discovered(struct nfc_hci_dev *hdev,
-                                               u8 gate,
-                                               struct nfc_target *target)
-{
-       struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
-
-       pr_debug("\n");
-
-       if (shdlc->ops->complete_target_discovered)
-               return shdlc->ops->complete_target_discovered(shdlc, gate,
-                                                             target);
-
-       return 0;
-}
-
-static int nfc_shdlc_data_exchange(struct nfc_hci_dev *hdev,
-                                  struct nfc_target *target,
-                                  struct sk_buff *skb,
-                                  struct sk_buff **res_skb)
-{
-       struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
-
-       if (shdlc->ops->data_exchange)
-               return shdlc->ops->data_exchange(shdlc, target, skb, res_skb);
-
-       return -EPERM;
-}
-
-static int nfc_shdlc_check_presence(struct nfc_hci_dev *hdev,
-                                   struct nfc_target *target)
-{
-       struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
-
-       if (shdlc->ops->check_presence)
-               return shdlc->ops->check_presence(shdlc, target);
-
-       return 0;
-}
-
-static struct nfc_hci_ops shdlc_ops = {
-       .open = nfc_shdlc_open,
-       .close = nfc_shdlc_close,
-       .hci_ready = nfc_shdlc_hci_ready,
-       .xmit = nfc_shdlc_xmit,
-       .start_poll = nfc_shdlc_start_poll,
-       .target_from_gate = nfc_shdlc_target_from_gate,
-       .complete_target_discovered = nfc_shdlc_complete_target_discovered,
-       .data_exchange = nfc_shdlc_data_exchange,
-       .check_presence = nfc_shdlc_check_presence,
-};
-
-struct nfc_shdlc *nfc_shdlc_allocate(struct nfc_shdlc_ops *ops,
-                                    struct nfc_hci_init_data *init_data,
-                                    u32 protocols,
-                                    int tx_headroom, int tx_tailroom,
-                                    int max_link_payload, const char *devname)
-{
-       struct nfc_shdlc *shdlc;
-       int r;
-       char name[32];
-
-       if (ops->xmit == NULL)
-               return NULL;
-
-       shdlc = kzalloc(sizeof(struct nfc_shdlc), GFP_KERNEL);
+       shdlc = kzalloc(sizeof(struct llc_shdlc), GFP_KERNEL);
        if (shdlc == NULL)
                return NULL;
 
        mutex_init(&shdlc->state_mutex);
-       shdlc->ops = ops;
        shdlc->state = SHDLC_DISCONNECTED;
 
        init_timer(&shdlc->connect_timer);
        shdlc->connect_timer.data = (unsigned long)shdlc;
-       shdlc->connect_timer.function = nfc_shdlc_connect_timeout;
+       shdlc->connect_timer.function = llc_shdlc_connect_timeout;
 
        init_timer(&shdlc->t1_timer);
        shdlc->t1_timer.data = (unsigned long)shdlc;
-       shdlc->t1_timer.function = nfc_shdlc_t1_timeout;
+       shdlc->t1_timer.function = llc_shdlc_t1_timeout;
 
        init_timer(&shdlc->t2_timer);
        shdlc->t2_timer.data = (unsigned long)shdlc;
-       shdlc->t2_timer.function = nfc_shdlc_t2_timeout;
+       shdlc->t2_timer.function = llc_shdlc_t2_timeout;
 
        shdlc->w = SHDLC_MAX_WINDOW;
        shdlc->srej_support = SHDLC_SREJ_SUPPORT;
@@ -875,77 +785,73 @@ struct nfc_shdlc *nfc_shdlc_allocate(struct nfc_shdlc_ops *ops,
        skb_queue_head_init(&shdlc->send_q);
        skb_queue_head_init(&shdlc->ack_pending_q);
 
-       INIT_WORK(&shdlc->sm_work, nfc_shdlc_sm_work);
-       snprintf(name, sizeof(name), "%s_shdlc_sm_wq", devname);
-       shdlc->sm_wq = alloc_workqueue(name, WQ_NON_REENTRANT | WQ_UNBOUND |
-                                      WQ_MEM_RECLAIM, 1);
-       if (shdlc->sm_wq == NULL)
-               goto err_allocwq;
+       INIT_WORK(&shdlc->sm_work, llc_shdlc_sm_work);
 
-       shdlc->client_headroom = tx_headroom;
-       shdlc->client_tailroom = tx_tailroom;
-
-       shdlc->hdev = nfc_hci_allocate_device(&shdlc_ops, init_data, protocols,
-                                             tx_headroom + SHDLC_LLC_HEAD_ROOM,
-                                             tx_tailroom + SHDLC_LLC_TAIL_ROOM,
-                                             max_link_payload);
-       if (shdlc->hdev == NULL)
-               goto err_allocdev;
-
-       nfc_hci_set_clientdata(shdlc->hdev, shdlc);
-
-       r = nfc_hci_register_device(shdlc->hdev);
-       if (r < 0)
-               goto err_regdev;
+       shdlc->hdev = hdev;
+       shdlc->xmit_to_drv = xmit_to_drv;
+       shdlc->rcv_to_hci = rcv_to_hci;
+       shdlc->tx_headroom = tx_headroom;
+       shdlc->tx_tailroom = tx_tailroom;
+       shdlc->llc_failure = llc_failure;
 
        return shdlc;
+}
 
-err_regdev:
-       nfc_hci_free_device(shdlc->hdev);
+static void llc_shdlc_deinit(struct nfc_llc *llc)
+{
+       struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
 
-err_allocdev:
-       destroy_workqueue(shdlc->sm_wq);
+       skb_queue_purge(&shdlc->rcv_q);
+       skb_queue_purge(&shdlc->send_q);
+       skb_queue_purge(&shdlc->ack_pending_q);
 
-err_allocwq:
        kfree(shdlc);
-
-       return NULL;
 }
-EXPORT_SYMBOL(nfc_shdlc_allocate);
 
-void nfc_shdlc_free(struct nfc_shdlc *shdlc)
+static int llc_shdlc_start(struct nfc_llc *llc)
 {
-       pr_debug("\n");
+       struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
 
-       nfc_hci_unregister_device(shdlc->hdev);
-       nfc_hci_free_device(shdlc->hdev);
+       return llc_shdlc_connect(shdlc);
+}
 
-       destroy_workqueue(shdlc->sm_wq);
+static int llc_shdlc_stop(struct nfc_llc *llc)
+{
+       struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
 
-       skb_queue_purge(&shdlc->rcv_q);
-       skb_queue_purge(&shdlc->send_q);
-       skb_queue_purge(&shdlc->ack_pending_q);
+       llc_shdlc_disconnect(shdlc);
 
-       kfree(shdlc);
+       return 0;
 }
-EXPORT_SYMBOL(nfc_shdlc_free);
 
-void nfc_shdlc_set_clientdata(struct nfc_shdlc *shdlc, void *clientdata)
+static void llc_shdlc_rcv_from_drv(struct nfc_llc *llc, struct sk_buff *skb)
 {
-       pr_debug("\n");
+       struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
 
-       shdlc->clientdata = clientdata;
+       llc_shdlc_recv_frame(shdlc, skb);
 }
-EXPORT_SYMBOL(nfc_shdlc_set_clientdata);
 
-void *nfc_shdlc_get_clientdata(struct nfc_shdlc *shdlc)
+static int llc_shdlc_xmit_from_hci(struct nfc_llc *llc, struct sk_buff *skb)
 {
-       return shdlc->clientdata;
+       struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
+
+       skb_queue_tail(&shdlc->send_q, skb);
+
+       queue_work(system_nrt_wq, &shdlc->sm_work);
+
+       return 0;
 }
-EXPORT_SYMBOL(nfc_shdlc_get_clientdata);
 
-struct nfc_hci_dev *nfc_shdlc_get_hci_dev(struct nfc_shdlc *shdlc)
+static struct nfc_llc_ops llc_shdlc_ops = {
+       .init = llc_shdlc_init,
+       .deinit = llc_shdlc_deinit,
+       .start = llc_shdlc_start,
+       .stop = llc_shdlc_stop,
+       .rcv_from_drv = llc_shdlc_rcv_from_drv,
+       .xmit_from_hci = llc_shdlc_xmit_from_hci,
+};
+
+int nfc_llc_shdlc_register(void)
 {
-       return shdlc->hdev;
+       return nfc_llc_register(LLC_SHDLC_NAME, &llc_shdlc_ops);
 }
-EXPORT_SYMBOL(nfc_shdlc_get_hci_dev);
index b982b5b890d73da30a315567851d5d91e7ec9285..c45ccd6c094c5b16f258be4ffd7a1cff4433f6dc 100644 (file)
@@ -312,6 +312,8 @@ int nfc_llcp_send_symm(struct nfc_dev *dev)
 
        skb = llcp_add_header(skb, 0, 0, LLCP_PDU_SYMM);
 
+       nfc_llcp_send_to_raw_sock(local, skb, NFC_LLCP_DIRECTION_TX);
+
        return nfc_data_exchange(dev, local->target_idx, skb,
                                 nfc_llcp_recv, local);
 }
index 82f0f7588b463d8ba0a8e1931124c03f94de29ed..c12c5ef3d036e468e3dc4905dfd00531a8406b64 100644 (file)
@@ -56,7 +56,7 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
        sk_for_each_safe(sk, node, tmp, &local->sockets.head) {
                llcp_sock = nfc_llcp_sock(sk);
 
-               lock_sock(sk);
+               bh_lock_sock(sk);
 
                if (sk->sk_state == LLCP_CONNECTED)
                        nfc_put_device(llcp_sock->dev);
@@ -68,26 +68,26 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
                        list_for_each_entry_safe(lsk, n, &llcp_sock->accept_queue,
                                                 accept_queue) {
                                accept_sk = &lsk->sk;
-                               lock_sock(accept_sk);
+                               bh_lock_sock(accept_sk);
 
                                nfc_llcp_accept_unlink(accept_sk);
 
                                accept_sk->sk_state = LLCP_CLOSED;
 
-                               release_sock(accept_sk);
+                               bh_unlock_sock(accept_sk);
 
                                sock_orphan(accept_sk);
                        }
 
                        if (listen == true) {
-                               release_sock(sk);
+                               bh_unlock_sock(sk);
                                continue;
                        }
                }
 
                sk->sk_state = LLCP_CLOSED;
 
-               release_sock(sk);
+               bh_unlock_sock(sk);
 
                sock_orphan(sk);
 
@@ -114,9 +114,9 @@ static void local_release(struct kref *ref)
        nfc_llcp_socket_release(local, false);
        del_timer_sync(&local->link_timer);
        skb_queue_purge(&local->tx_queue);
-       destroy_workqueue(local->tx_wq);
-       destroy_workqueue(local->rx_wq);
-       destroy_workqueue(local->timeout_wq);
+       cancel_work_sync(&local->tx_work);
+       cancel_work_sync(&local->rx_work);
+       cancel_work_sync(&local->timeout_work);
        kfree_skb(local->rx_pending);
        kfree(local);
 }
@@ -181,7 +181,7 @@ static void nfc_llcp_symm_timer(unsigned long data)
 
        pr_err("SYMM timeout\n");
 
-       queue_work(local->timeout_wq, &local->timeout_work);
+       queue_work(system_nrt_wq, &local->timeout_work);
 }
 
 struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev)
@@ -426,6 +426,7 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
        u8 *miux_tlv, miux_length;
        __be16 miux;
        u8 gb_len = 0;
+       int ret = 0;
 
        version = LLCP_VERSION_11;
        version_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &version,
@@ -450,8 +451,8 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
        gb_len += ARRAY_SIZE(llcp_magic);
 
        if (gb_len > NFC_MAX_GT_LEN) {
-               kfree(version_tlv);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
 
        gb_cur = local->gb;
@@ -471,12 +472,15 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
        memcpy(gb_cur, miux_tlv, miux_length);
        gb_cur += miux_length;
 
+       local->gb_len = gb_len;
+
+out:
        kfree(version_tlv);
        kfree(lto_tlv);
+       kfree(wks_tlv);
+       kfree(miux_tlv);
 
-       local->gb_len = gb_len;
-
-       return 0;
+       return ret;
 }
 
 u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len)
@@ -554,6 +558,46 @@ static void nfc_llcp_set_nrns(struct nfc_llcp_sock *sock, struct sk_buff *pdu)
        sock->recv_ack_n = (sock->recv_n - 1) % 16;
 }
 
+void nfc_llcp_send_to_raw_sock(struct nfc_llcp_local *local,
+                              struct sk_buff *skb, u8 direction)
+{
+       struct hlist_node *node;
+       struct sk_buff *skb_copy = NULL, *nskb;
+       struct sock *sk;
+       u8 *data;
+
+       read_lock(&local->raw_sockets.lock);
+
+       sk_for_each(sk, node, &local->raw_sockets.head) {
+               if (sk->sk_state != LLCP_BOUND)
+                       continue;
+
+               if (skb_copy == NULL) {
+                       skb_copy = __pskb_copy(skb, NFC_LLCP_RAW_HEADER_SIZE,
+                                              GFP_ATOMIC);
+
+                       if (skb_copy == NULL)
+                               continue;
+
+                       data = skb_push(skb_copy, NFC_LLCP_RAW_HEADER_SIZE);
+
+                       data[0] = local->dev ? local->dev->idx : 0xFF;
+                       data[1] = direction;
+               }
+
+               nskb = skb_clone(skb_copy, GFP_ATOMIC);
+               if (!nskb)
+                       continue;
+
+               if (sock_queue_rcv_skb(sk, nskb))
+                       kfree_skb(nskb);
+       }
+
+       read_unlock(&local->raw_sockets.lock);
+
+       kfree_skb(skb_copy);
+}
+
 static void nfc_llcp_tx_work(struct work_struct *work)
 {
        struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
@@ -574,6 +618,9 @@ static void nfc_llcp_tx_work(struct work_struct *work)
                                       DUMP_PREFIX_OFFSET, 16, 1,
                                       skb->data, skb->len, true);
 
+                       nfc_llcp_send_to_raw_sock(local, skb,
+                                                 NFC_LLCP_DIRECTION_TX);
+
                        ret = nfc_data_exchange(local->dev, local->target_idx,
                                                skb, nfc_llcp_recv, local);
 
@@ -1018,6 +1065,8 @@ static void nfc_llcp_rx_work(struct work_struct *work)
                print_hex_dump(KERN_DEBUG, "LLCP Rx: ", DUMP_PREFIX_OFFSET,
                               16, 1, skb->data, skb->len, true);
 
+       nfc_llcp_send_to_raw_sock(local, skb, NFC_LLCP_DIRECTION_RX);
+
        switch (ptype) {
        case LLCP_PDU_SYMM:
                pr_debug("SYMM\n");
@@ -1052,7 +1101,7 @@ static void nfc_llcp_rx_work(struct work_struct *work)
 
        }
 
-       queue_work(local->tx_wq, &local->tx_work);
+       queue_work(system_nrt_wq, &local->tx_work);
        kfree_skb(local->rx_pending);
        local->rx_pending = NULL;
 
@@ -1071,7 +1120,7 @@ void nfc_llcp_recv(void *data, struct sk_buff *skb, int err)
 
        local->rx_pending = skb_get(skb);
        del_timer(&local->link_timer);
-       queue_work(local->rx_wq, &local->rx_work);
+       queue_work(system_nrt_wq, &local->rx_work);
 
        return;
 }
@@ -1086,7 +1135,7 @@ int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb)
 
        local->rx_pending = skb_get(skb);
        del_timer(&local->link_timer);
-       queue_work(local->rx_wq, &local->rx_work);
+       queue_work(system_nrt_wq, &local->rx_work);
 
        return 0;
 }
@@ -1121,7 +1170,7 @@ void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
        if (rf_mode == NFC_RF_INITIATOR) {
                pr_debug("Queueing Tx work\n");
 
-               queue_work(local->tx_wq, &local->tx_work);
+               queue_work(system_nrt_wq, &local->tx_work);
        } else {
                mod_timer(&local->link_timer,
                          jiffies + msecs_to_jiffies(local->remote_lto));
@@ -1130,10 +1179,7 @@ void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
 
 int nfc_llcp_register_device(struct nfc_dev *ndev)
 {
-       struct device *dev = &ndev->dev;
        struct nfc_llcp_local *local;
-       char name[32];
-       int err;
 
        local = kzalloc(sizeof(struct nfc_llcp_local), GFP_KERNEL);
        if (local == NULL)
@@ -1149,41 +1195,15 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
 
        skb_queue_head_init(&local->tx_queue);
        INIT_WORK(&local->tx_work, nfc_llcp_tx_work);
-       snprintf(name, sizeof(name), "%s_llcp_tx_wq", dev_name(dev));
-       local->tx_wq =
-               alloc_workqueue(name,
-                               WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM,
-                               1);
-       if (local->tx_wq == NULL) {
-               err = -ENOMEM;
-               goto err_local;
-       }
 
        local->rx_pending = NULL;
        INIT_WORK(&local->rx_work, nfc_llcp_rx_work);
-       snprintf(name, sizeof(name), "%s_llcp_rx_wq", dev_name(dev));
-       local->rx_wq =
-               alloc_workqueue(name,
-                               WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM,
-                               1);
-       if (local->rx_wq == NULL) {
-               err = -ENOMEM;
-               goto err_tx_wq;
-       }
 
        INIT_WORK(&local->timeout_work, nfc_llcp_timeout_work);
-       snprintf(name, sizeof(name), "%s_llcp_timeout_wq", dev_name(dev));
-       local->timeout_wq =
-               alloc_workqueue(name,
-                               WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM,
-                               1);
-       if (local->timeout_wq == NULL) {
-               err = -ENOMEM;
-               goto err_rx_wq;
-       }
 
-       local->sockets.lock = __RW_LOCK_UNLOCKED(local->sockets.lock);
-       local->connecting_sockets.lock = __RW_LOCK_UNLOCKED(local->connecting_sockets.lock);
+       rwlock_init(&local->sockets.lock);
+       rwlock_init(&local->connecting_sockets.lock);
+       rwlock_init(&local->raw_sockets.lock);
 
        nfc_llcp_build_gb(local);
 
@@ -1192,17 +1212,6 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
 
        list_add(&llcp_devices, &local->list);
 
-       return 0;
-
-err_rx_wq:
-       destroy_workqueue(local->rx_wq);
-
-err_tx_wq:
-       destroy_workqueue(local->tx_wq);
-
-err_local:
-       kfree(local);
-
        return 0;
 }
 
index 83b8bba5a2803d2cc4bafb05d7e553221434ae0e..fdb2d24e60bda5fe85d8225c38b25a1caed7c0de 100644 (file)
@@ -56,12 +56,9 @@ struct nfc_llcp_local {
 
        struct timer_list link_timer;
        struct sk_buff_head tx_queue;
-       struct workqueue_struct *tx_wq;
        struct work_struct       tx_work;
-       struct workqueue_struct *rx_wq;
        struct work_struct       rx_work;
        struct sk_buff *rx_pending;
-       struct workqueue_struct *timeout_wq;
        struct work_struct       timeout_work;
 
        u32 target_idx;
@@ -89,6 +86,7 @@ struct nfc_llcp_local {
        /* sockets array */
        struct llcp_sock_list sockets;
        struct llcp_sock_list connecting_sockets;
+       struct llcp_sock_list raw_sockets;
 };
 
 struct nfc_llcp_sock {
@@ -187,6 +185,8 @@ u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
 u8 nfc_llcp_get_local_ssap(struct nfc_llcp_local *local);
 void nfc_llcp_put_ssap(struct nfc_llcp_local *local, u8 ssap);
 int nfc_llcp_queue_i_frames(struct nfc_llcp_sock *sock);
+void nfc_llcp_send_to_raw_sock(struct nfc_llcp_local *local,
+                              struct sk_buff *skb, u8 direction);
 
 /* Sock API */
 struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp);
index ddeb9aa398f0ced280fed93b853319923cf43c69..40f056debf9aaecd1fc7993a186a21230a111856 100644 (file)
@@ -142,6 +142,60 @@ error:
        return ret;
 }
 
+static int llcp_raw_sock_bind(struct socket *sock, struct sockaddr *addr,
+                             int alen)
+{
+       struct sock *sk = sock->sk;
+       struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
+       struct nfc_llcp_local *local;
+       struct nfc_dev *dev;
+       struct sockaddr_nfc_llcp llcp_addr;
+       int len, ret = 0;
+
+       if (!addr || addr->sa_family != AF_NFC)
+               return -EINVAL;
+
+       pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family);
+
+       memset(&llcp_addr, 0, sizeof(llcp_addr));
+       len = min_t(unsigned int, sizeof(llcp_addr), alen);
+       memcpy(&llcp_addr, addr, len);
+
+       lock_sock(sk);
+
+       if (sk->sk_state != LLCP_CLOSED) {
+               ret = -EBADFD;
+               goto error;
+       }
+
+       dev = nfc_get_device(llcp_addr.dev_idx);
+       if (dev == NULL) {
+               ret = -ENODEV;
+               goto error;
+       }
+
+       local = nfc_llcp_find_local(dev);
+       if (local == NULL) {
+               ret = -ENODEV;
+               goto put_dev;
+       }
+
+       llcp_sock->dev = dev;
+       llcp_sock->local = nfc_llcp_local_get(local);
+       llcp_sock->nfc_protocol = llcp_addr.nfc_protocol;
+
+       nfc_llcp_sock_link(&local->raw_sockets, sk);
+
+       sk->sk_state = LLCP_BOUND;
+
+put_dev:
+       nfc_put_device(dev);
+
+error:
+       release_sock(sk);
+       return ret;
+}
+
 static int llcp_sock_listen(struct socket *sock, int backlog)
 {
        struct sock *sk = sock->sk;
@@ -300,9 +354,6 @@ static int llcp_sock_getname(struct socket *sock, struct sockaddr *uaddr,
        pr_debug("%p %d %d %d\n", sk, llcp_sock->target_idx,
                 llcp_sock->dsap, llcp_sock->ssap);
 
-       if (llcp_sock == NULL || llcp_sock->dev == NULL)
-               return -EBADFD;
-
        uaddr->sa_family = AF_NFC;
 
        *len = sizeof(struct sockaddr_nfc_llcp);
@@ -421,7 +472,10 @@ static int llcp_sock_release(struct socket *sock)
 
        release_sock(sk);
 
-       nfc_llcp_sock_unlink(&local->sockets, sk);
+       if (sock->type == SOCK_RAW)
+               nfc_llcp_sock_unlink(&local->raw_sockets, sk);
+       else
+               nfc_llcp_sock_unlink(&local->sockets, sk);
 
 out:
        sock_orphan(sk);
@@ -617,7 +671,7 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
        if (!(flags & MSG_PEEK)) {
 
                /* SOCK_STREAM: re-queue skb if it contains unreceived data */
-               if (sk->sk_type == SOCK_STREAM) {
+               if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_RAW) {
                        skb_pull(skb, copied);
                        if (skb->len) {
                                skb_queue_head(&sk->sk_receive_queue, skb);
@@ -658,6 +712,26 @@ static const struct proto_ops llcp_sock_ops = {
        .mmap           = sock_no_mmap,
 };
 
+static const struct proto_ops llcp_rawsock_ops = {
+       .family         = PF_NFC,
+       .owner          = THIS_MODULE,
+       .bind           = llcp_raw_sock_bind,
+       .connect        = sock_no_connect,
+       .release        = llcp_sock_release,
+       .socketpair     = sock_no_socketpair,
+       .accept         = sock_no_accept,
+       .getname        = llcp_sock_getname,
+       .poll           = llcp_sock_poll,
+       .ioctl          = sock_no_ioctl,
+       .listen         = sock_no_listen,
+       .shutdown       = sock_no_shutdown,
+       .setsockopt     = sock_no_setsockopt,
+       .getsockopt     = sock_no_getsockopt,
+       .sendmsg        = sock_no_sendmsg,
+       .recvmsg        = llcp_sock_recvmsg,
+       .mmap           = sock_no_mmap,
+};
+
 static void llcp_sock_destruct(struct sock *sk)
 {
        struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
@@ -735,10 +809,15 @@ static int llcp_sock_create(struct net *net, struct socket *sock,
 
        pr_debug("%p\n", sock);
 
-       if (sock->type != SOCK_STREAM && sock->type != SOCK_DGRAM)
+       if (sock->type != SOCK_STREAM &&
+           sock->type != SOCK_DGRAM &&
+           sock->type != SOCK_RAW)
                return -ESOCKTNOSUPPORT;
 
-       sock->ops = &llcp_sock_ops;
+       if (sock->type == SOCK_RAW)
+               sock->ops = &llcp_rawsock_ops;
+       else
+               sock->ops = &llcp_sock_ops;
 
        sk = nfc_llcp_sock_alloc(sock, sock->type, GFP_ATOMIC);
        if (sk == NULL)
index f81efe13985a71e2bd9c3bc16e0799a750ccee3d..acf9abb7d99badc592592f83b42f4acbb14ef4de 100644 (file)
@@ -176,6 +176,27 @@ static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
                     (1 + ((*num) * sizeof(struct disc_map_config))), &cmd);
 }
 
+struct nci_set_config_param {
+       __u8    id;
+       size_t  len;
+       __u8    *val;
+};
+
+static void nci_set_config_req(struct nci_dev *ndev, unsigned long opt)
+{
+       struct nci_set_config_param *param = (struct nci_set_config_param *)opt;
+       struct nci_core_set_config_cmd cmd;
+
+       BUG_ON(param->len > NCI_MAX_PARAM_LEN);
+
+       cmd.num_params = 1;
+       cmd.param.id = param->id;
+       cmd.param.len = param->len;
+       memcpy(cmd.param.val, param->val, param->len);
+
+       nci_send_cmd(ndev, NCI_OP_CORE_SET_CONFIG_CMD, (3 + param->len), &cmd);
+}
+
 static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
 {
        struct nci_rf_disc_cmd cmd;
@@ -388,6 +409,32 @@ static int nci_dev_down(struct nfc_dev *nfc_dev)
        return nci_close_device(ndev);
 }
 
+static int nci_set_local_general_bytes(struct nfc_dev *nfc_dev)
+{
+       struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+       struct nci_set_config_param param;
+       __u8 local_gb[NFC_MAX_GT_LEN];
+       int i, rc = 0;
+
+       param.val = nfc_get_local_general_bytes(nfc_dev, &param.len);
+       if ((param.val == NULL) || (param.len == 0))
+               return rc;
+
+       if (param.len > NCI_MAX_PARAM_LEN)
+               return -EINVAL;
+
+       for (i = 0; i < param.len; i++)
+               local_gb[param.len-1-i] = param.val[i];
+
+       param.id = NCI_PN_ATR_REQ_GEN_BYTES;
+       param.val = local_gb;
+
+       rc = nci_request(ndev, nci_set_config_req, (unsigned long)&param,
+                        msecs_to_jiffies(NCI_SET_CONFIG_TIMEOUT));
+
+       return rc;
+}
+
 static int nci_start_poll(struct nfc_dev *nfc_dev,
                          __u32 im_protocols, __u32 tm_protocols)
 {
@@ -415,6 +462,14 @@ static int nci_start_poll(struct nfc_dev *nfc_dev,
                        return -EBUSY;
        }
 
+       if (im_protocols & NFC_PROTO_NFC_DEP_MASK) {
+               rc = nci_set_local_general_bytes(nfc_dev);
+               if (rc) {
+                       pr_err("failed to set local general bytes\n");
+                       return rc;
+               }
+       }
+
        rc = nci_request(ndev, nci_rf_discover_req, im_protocols,
                         msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
 
@@ -509,7 +564,7 @@ static void nci_deactivate_target(struct nfc_dev *nfc_dev,
 {
        struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 
-       pr_debug("target_idx %d\n", target->idx);
+       pr_debug("entry\n");
 
        if (!ndev->target_active_prot) {
                pr_err("unable to deactivate target, no active target\n");
@@ -524,6 +579,38 @@ static void nci_deactivate_target(struct nfc_dev *nfc_dev,
        }
 }
 
+
+static int nci_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
+                          __u8 comm_mode, __u8 *gb, size_t gb_len)
+{
+       struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+       int rc;
+
+       pr_debug("target_idx %d, comm_mode %d\n", target->idx, comm_mode);
+
+       rc = nci_activate_target(nfc_dev, target, NFC_PROTO_NFC_DEP);
+       if (rc)
+               return rc;
+
+       rc = nfc_set_remote_general_bytes(nfc_dev, ndev->remote_gb,
+                                         ndev->remote_gb_len);
+       if (!rc)
+               rc = nfc_dep_link_is_up(nfc_dev, target->idx, NFC_COMM_PASSIVE,
+                                       NFC_RF_INITIATOR);
+
+       return rc;
+}
+
+static int nci_dep_link_down(struct nfc_dev *nfc_dev)
+{
+       pr_debug("entry\n");
+
+       nci_deactivate_target(nfc_dev, NULL);
+
+       return 0;
+}
+
+
 static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
                          struct sk_buff *skb,
                          data_exchange_cb_t cb, void *cb_context)
@@ -557,6 +644,8 @@ static struct nfc_ops nci_nfc_ops = {
        .dev_down = nci_dev_down,
        .start_poll = nci_start_poll,
        .stop_poll = nci_stop_poll,
+       .dep_link_up = nci_dep_link_up,
+       .dep_link_down = nci_dep_link_down,
        .activate_target = nci_activate_target,
        .deactivate_target = nci_deactivate_target,
        .im_transceive = nci_transceive,
index af7a93b04393a1ff3cbc088390f4972592dd3b1c..b2aa98ef0927cb5760f72fb40d016a0b1dcf5575 100644 (file)
@@ -176,6 +176,8 @@ static int nci_add_new_protocol(struct nci_dev *ndev,
                        protocol = NFC_PROTO_ISO14443_B_MASK;
        else if (rf_protocol == NCI_RF_PROTOCOL_T3T)
                protocol = NFC_PROTO_FELICA_MASK;
+       else if (rf_protocol == NCI_RF_PROTOCOL_NFC_DEP)
+               protocol = NFC_PROTO_NFC_DEP_MASK;
        else
                protocol = 0;
 
@@ -361,6 +363,33 @@ static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
        return NCI_STATUS_OK;
 }
 
+static int nci_extract_activation_params_nfc_dep(struct nci_dev *ndev,
+                       struct nci_rf_intf_activated_ntf *ntf, __u8 *data)
+{
+       struct activation_params_poll_nfc_dep *poll;
+       int i;
+
+       switch (ntf->activation_rf_tech_and_mode) {
+       case NCI_NFC_A_PASSIVE_POLL_MODE:
+       case NCI_NFC_F_PASSIVE_POLL_MODE:
+               poll = &ntf->activation_params.poll_nfc_dep;
+               poll->atr_res_len = min_t(__u8, *data++, 63);
+               pr_debug("atr_res_len %d\n", poll->atr_res_len);
+               if (poll->atr_res_len > 0) {
+                       for (i = 0; i < poll->atr_res_len; i++)
+                               poll->atr_res[poll->atr_res_len-1-i] = data[i];
+               }
+               break;
+
+       default:
+               pr_err("unsupported activation_rf_tech_and_mode 0x%x\n",
+                      ntf->activation_rf_tech_and_mode);
+               return NCI_STATUS_RF_PROTOCOL_ERROR;
+       }
+
+       return NCI_STATUS_OK;
+}
+
 static void nci_target_auto_activated(struct nci_dev *ndev,
                                      struct nci_rf_intf_activated_ntf *ntf)
 {
@@ -454,6 +483,11 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
                                                                    &ntf, data);
                        break;
 
+               case NCI_RF_INTERFACE_NFC_DEP:
+                       err = nci_extract_activation_params_nfc_dep(ndev,
+                                                                   &ntf, data);
+                       break;
+
                case NCI_RF_INTERFACE_FRAME:
                        /* no activation params */
                        break;
@@ -473,6 +507,24 @@ exit:
 
                /* set the available credits to initial value */
                atomic_set(&ndev->credits_cnt, ndev->initial_num_credits);
+
+               /* store general bytes to be reported later in dep_link_up */
+               if (ntf.rf_interface == NCI_RF_INTERFACE_NFC_DEP) {
+                       ndev->remote_gb_len = 0;
+
+                       if (ntf.activation_params_len > 0) {
+                               /* ATR_RES general bytes at offset 15 */
+                               ndev->remote_gb_len = min_t(__u8,
+                                       (ntf.activation_params
+                                       .poll_nfc_dep.atr_res_len
+                                       - NFC_ATR_RES_GT_OFFSET),
+                                       NFC_MAX_GT_LEN);
+                               memcpy(ndev->remote_gb,
+                                      (ntf.activation_params.poll_nfc_dep
+                                      .atr_res + NFC_ATR_RES_GT_OFFSET),
+                                      ndev->remote_gb_len);
+                       }
+               }
        }
 
        if (atomic_read(&ndev->state) == NCI_DISCOVERY) {
index 3003c3390e492c18907cc7f5076949439dd7273d..dd072f38ad00f5241d2cdc1299965d8bcc3966b9 100644 (file)
@@ -119,6 +119,16 @@ exit:
        nci_req_complete(ndev, rsp_1->status);
 }
 
+static void nci_core_set_config_rsp_packet(struct nci_dev *ndev,
+                                          struct sk_buff *skb)
+{
+       struct nci_core_set_config_rsp *rsp = (void *) skb->data;
+
+       pr_debug("status 0x%x\n", rsp->status);
+
+       nci_req_complete(ndev, rsp->status);
+}
+
 static void nci_rf_disc_map_rsp_packet(struct nci_dev *ndev,
                                       struct sk_buff *skb)
 {
@@ -194,6 +204,10 @@ void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
                nci_core_init_rsp_packet(ndev, skb);
                break;
 
+       case NCI_OP_CORE_SET_CONFIG_RSP:
+               nci_core_set_config_rsp_packet(ndev, skb);
+               break;
+
        case NCI_OP_RF_DISCOVER_MAP_RSP:
                nci_rf_disc_map_rsp_packet(ndev, skb);
                break;
index 4c51714ee74177509d6d7831c064e89280424ded..c1b5285cbde79fa6c861106649d57728373c4ba4 100644 (file)
@@ -58,7 +58,7 @@ static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
 {
        void *hdr;
 
-       hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
+       hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
                          &nfc_genl_family, flags, NFC_CMD_GET_TARGET);
        if (!hdr)
                return -EMSGSIZE;
@@ -165,7 +165,7 @@ int nfc_genl_targets_found(struct nfc_dev *dev)
        struct sk_buff *msg;
        void *hdr;
 
-       dev->genl_data.poll_req_pid = 0;
+       dev->genl_data.poll_req_portid = 0;
 
        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
        if (!msg)
@@ -347,13 +347,13 @@ free_msg:
 }
 
 static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev,
-                               u32 pid, u32 seq,
+                               u32 portid, u32 seq,
                                struct netlink_callback *cb,
                                int flags)
 {
        void *hdr;
 
-       hdr = genlmsg_put(msg, pid, seq, &nfc_genl_family, flags,
+       hdr = genlmsg_put(msg, portid, seq, &nfc_genl_family, flags,
                          NFC_CMD_GET_DEVICE);
        if (!hdr)
                return -EMSGSIZE;
@@ -401,7 +401,7 @@ static int nfc_genl_dump_devices(struct sk_buff *skb,
        while (dev) {
                int rc;
 
-               rc = nfc_genl_send_device(skb, dev, NETLINK_CB(cb->skb).pid,
+               rc = nfc_genl_send_device(skb, dev, NETLINK_CB(cb->skb).portid,
                                          cb->nlh->nlmsg_seq, cb, NLM_F_MULTI);
                if (rc < 0)
                        break;
@@ -520,7 +520,7 @@ static int nfc_genl_get_device(struct sk_buff *skb, struct genl_info *info)
                goto out_putdev;
        }
 
-       rc = nfc_genl_send_device(msg, dev, info->snd_pid, info->snd_seq,
+       rc = nfc_genl_send_device(msg, dev, info->snd_portid, info->snd_seq,
                                  NULL, 0);
        if (rc < 0)
                goto out_free;
@@ -611,7 +611,7 @@ static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info)
 
        rc = nfc_start_poll(dev, im_protocols, tm_protocols);
        if (!rc)
-               dev->genl_data.poll_req_pid = info->snd_pid;
+               dev->genl_data.poll_req_portid = info->snd_portid;
 
        mutex_unlock(&dev->genl_data.genl_data_mutex);
 
@@ -645,13 +645,13 @@ static int nfc_genl_stop_poll(struct sk_buff *skb, struct genl_info *info)
 
        mutex_lock(&dev->genl_data.genl_data_mutex);
 
-       if (dev->genl_data.poll_req_pid != info->snd_pid) {
+       if (dev->genl_data.poll_req_portid != info->snd_portid) {
                rc = -EBUSY;
                goto out;
        }
 
        rc = nfc_stop_poll(dev);
-       dev->genl_data.poll_req_pid = 0;
+       dev->genl_data.poll_req_portid = 0;
 
 out:
        mutex_unlock(&dev->genl_data.genl_data_mutex);
@@ -761,38 +761,70 @@ static struct genl_ops nfc_genl_ops[] = {
        },
 };
 
-static int nfc_genl_rcv_nl_event(struct notifier_block *this,
-                                unsigned long event, void *ptr)
+
+struct urelease_work {
+       struct  work_struct w;
+       int     portid;
+};
+
+static void nfc_urelease_event_work(struct work_struct *work)
 {
-       struct netlink_notify *n = ptr;
+       struct urelease_work *w = container_of(work, struct urelease_work, w);
        struct class_dev_iter iter;
        struct nfc_dev *dev;
 
-       if (event != NETLINK_URELEASE || n->protocol != NETLINK_GENERIC)
-               goto out;
+       pr_debug("portid %d\n", w->portid);
 
-       pr_debug("NETLINK_URELEASE event from id %d\n", n->pid);
+       mutex_lock(&nfc_devlist_mutex);
 
        nfc_device_iter_init(&iter);
        dev = nfc_device_iter_next(&iter);
 
        while (dev) {
-               if (dev->genl_data.poll_req_pid == n->pid) {
+               mutex_lock(&dev->genl_data.genl_data_mutex);
+
+               if (dev->genl_data.poll_req_portid == w->portid) {
                        nfc_stop_poll(dev);
-                       dev->genl_data.poll_req_pid = 0;
+                       dev->genl_data.poll_req_portid = 0;
                }
+
+               mutex_unlock(&dev->genl_data.genl_data_mutex);
+
                dev = nfc_device_iter_next(&iter);
        }
 
        nfc_device_iter_exit(&iter);
 
+       mutex_unlock(&nfc_devlist_mutex);
+
+       kfree(w);
+}
+
+static int nfc_genl_rcv_nl_event(struct notifier_block *this,
+                                unsigned long event, void *ptr)
+{
+       struct netlink_notify *n = ptr;
+       struct urelease_work *w;
+
+       if (event != NETLINK_URELEASE || n->protocol != NETLINK_GENERIC)
+               goto out;
+
+       pr_debug("NETLINK_URELEASE event from id %d\n", n->portid);
+
+       w = kmalloc(sizeof(*w), GFP_ATOMIC);
+       if (w) {
+               INIT_WORK((struct work_struct *) w, nfc_urelease_event_work);
+               w->portid = n->portid;
+               schedule_work((struct work_struct *) w);
+       }
+
 out:
        return NOTIFY_DONE;
 }
 
 void nfc_genl_data_init(struct nfc_genl_data *genl_data)
 {
-       genl_data->poll_req_pid = 0;
+       genl_data->poll_req_portid = 0;
        mutex_init(&genl_data->genl_data_mutex);
 }
 
index 954405ceae9ed5141293d3f47ce7c784aeee3c31..08114478cb853256c3e6f0aeb63d791887513cd8 100644 (file)
@@ -266,7 +266,7 @@ static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
        if (unlikely(!skb))
                return -ENOMEM;
 
-       vport = rcu_dereference(dp->ports[out_port]);
+       vport = ovs_vport_rcu(dp, out_port);
        if (unlikely(!vport)) {
                kfree_skb(skb);
                return -ENODEV;
@@ -286,7 +286,7 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
        upcall.cmd = OVS_PACKET_CMD_ACTION;
        upcall.key = &OVS_CB(skb)->flow->key;
        upcall.userdata = NULL;
-       upcall.pid = 0;
+       upcall.portid = 0;
 
        for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
                 a = nla_next(a, &rem)) {
@@ -296,7 +296,7 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
                        break;
 
                case OVS_USERSPACE_ATTR_PID:
-                       upcall.pid = nla_get_u32(a);
+                       upcall.portid = nla_get_u32(a);
                        break;
                }
        }
index cf58cedad0833f9e9e704401fdecb5480c121caf..4c4b62ccc7d745bf9ba3298f17c2417b28c563b1 100644 (file)
 #include <linux/dmi.h>
 #include <linux/workqueue.h>
 #include <net/genetlink.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
 
 #include "datapath.h"
 #include "flow.h"
 #include "vport-internal_dev.h"
 
+/**
+ * struct ovs_net - Per net-namespace data for ovs.
+ * @dps: List of datapaths to enable dumping them all out.
+ * Protected by genl_mutex.
+ */
+struct ovs_net {
+       struct list_head dps;
+};
+
+static int ovs_net_id __read_mostly;
+
+#define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
+static void rehash_flow_table(struct work_struct *work);
+static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table);
+
 /**
  * DOC: Locking:
  *
  * each other.
  */
 
-/* Global list of datapaths to enable dumping them all out.
- * Protected by genl_mutex.
- */
-static LIST_HEAD(dps);
-
-#define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
-static void rehash_flow_table(struct work_struct *work);
-static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table);
-
 static struct vport *new_vport(const struct vport_parms *);
-static int queue_gso_packets(int dp_ifindex, struct sk_buff *,
+static int queue_gso_packets(struct net *, int dp_ifindex, struct sk_buff *,
                             const struct dp_upcall_info *);
-static int queue_userspace_packet(int dp_ifindex, struct sk_buff *,
+static int queue_userspace_packet(struct net *, int dp_ifindex,
+                                 struct sk_buff *,
                                  const struct dp_upcall_info *);
 
 /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
-static struct datapath *get_dp(int dp_ifindex)
+static struct datapath *get_dp(struct net *net, int dp_ifindex)
 {
        struct datapath *dp = NULL;
        struct net_device *dev;
 
        rcu_read_lock();
-       dev = dev_get_by_index_rcu(&init_net, dp_ifindex);
+       dev = dev_get_by_index_rcu(net, dp_ifindex);
        if (dev) {
                struct vport *vport = ovs_internal_dev_get_vport(dev);
                if (vport)
@@ -107,7 +116,7 @@ static struct datapath *get_dp(int dp_ifindex)
 /* Must be called with rcu_read_lock or RTNL lock. */
 const char *ovs_dp_name(const struct datapath *dp)
 {
-       struct vport *vport = rcu_dereference_rtnl(dp->ports[OVSP_LOCAL]);
+       struct vport *vport = ovs_vport_rtnl_rcu(dp, OVSP_LOCAL);
        return vport->ops->get_name(vport);
 }
 
@@ -118,7 +127,7 @@ static int get_dpifindex(struct datapath *dp)
 
        rcu_read_lock();
 
-       local = rcu_dereference(dp->ports[OVSP_LOCAL]);
+       local = ovs_vport_rcu(dp, OVSP_LOCAL);
        if (local)
                ifindex = local->ops->get_ifindex(local);
        else
@@ -135,9 +144,31 @@ static void destroy_dp_rcu(struct rcu_head *rcu)
 
        ovs_flow_tbl_destroy((__force struct flow_table *)dp->table);
        free_percpu(dp->stats_percpu);
+       release_net(ovs_dp_get_net(dp));
+       kfree(dp->ports);
        kfree(dp);
 }
 
+static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
+                                           u16 port_no)
+{
+       return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
+}
+
+struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
+{
+       struct vport *vport;
+       struct hlist_node *n;
+       struct hlist_head *head;
+
+       head = vport_hash_bucket(dp, port_no);
+       hlist_for_each_entry_rcu(vport, n, head, dp_hash_node) {
+               if (vport->port_no == port_no)
+                       return vport;
+       }
+       return NULL;
+}
+
 /* Called with RTNL lock and genl_lock. */
 static struct vport *new_vport(const struct vport_parms *parms)
 {
@@ -146,9 +177,9 @@ static struct vport *new_vport(const struct vport_parms *parms)
        vport = ovs_vport_add(parms);
        if (!IS_ERR(vport)) {
                struct datapath *dp = parms->dp;
+               struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
 
-               rcu_assign_pointer(dp->ports[parms->port_no], vport);
-               list_add(&vport->node, &dp->port_list);
+               hlist_add_head_rcu(&vport->dp_hash_node, head);
        }
 
        return vport;
@@ -160,8 +191,7 @@ void ovs_dp_detach_port(struct vport *p)
        ASSERT_RTNL();
 
        /* First drop references to device. */
-       list_del(&p->node);
-       rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
+       hlist_del_rcu(&p->dp_hash_node);
 
        /* Then destroy it. */
        ovs_vport_del(p);
@@ -195,7 +225,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
                upcall.cmd = OVS_PACKET_CMD_MISS;
                upcall.key = &key;
                upcall.userdata = NULL;
-               upcall.pid = p->upcall_pid;
+               upcall.portid = p->upcall_portid;
                ovs_dp_upcall(dp, skb, &upcall);
                consume_skb(skb);
                stats_counter = &stats->n_missed;
@@ -220,17 +250,18 @@ static struct genl_family dp_packet_genl_family = {
        .hdrsize = sizeof(struct ovs_header),
        .name = OVS_PACKET_FAMILY,
        .version = OVS_PACKET_VERSION,
-       .maxattr = OVS_PACKET_ATTR_MAX
+       .maxattr = OVS_PACKET_ATTR_MAX,
+       .netnsok = true
 };
 
 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
-             const struct dp_upcall_info *upcall_info)
+                 const struct dp_upcall_info *upcall_info)
 {
        struct dp_stats_percpu *stats;
        int dp_ifindex;
        int err;
 
-       if (upcall_info->pid == 0) {
+       if (upcall_info->portid == 0) {
                err = -ENOTCONN;
                goto err;
        }
@@ -242,9 +273,9 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
        }
 
        if (!skb_is_gso(skb))
-               err = queue_userspace_packet(dp_ifindex, skb, upcall_info);
+               err = queue_userspace_packet(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
        else
-               err = queue_gso_packets(dp_ifindex, skb, upcall_info);
+               err = queue_gso_packets(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
        if (err)
                goto err;
 
@@ -260,7 +291,8 @@ err:
        return err;
 }
 
-static int queue_gso_packets(int dp_ifindex, struct sk_buff *skb,
+static int queue_gso_packets(struct net *net, int dp_ifindex,
+                            struct sk_buff *skb,
                             const struct dp_upcall_info *upcall_info)
 {
        unsigned short gso_type = skb_shinfo(skb)->gso_type;
@@ -276,7 +308,7 @@ static int queue_gso_packets(int dp_ifindex, struct sk_buff *skb,
        /* Queue all of the segments. */
        skb = segs;
        do {
-               err = queue_userspace_packet(dp_ifindex, skb, upcall_info);
+               err = queue_userspace_packet(net, dp_ifindex, skb, upcall_info);
                if (err)
                        break;
 
@@ -306,7 +338,8 @@ static int queue_gso_packets(int dp_ifindex, struct sk_buff *skb,
        return err;
 }
 
-static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb,
+static int queue_userspace_packet(struct net *net, int dp_ifindex,
+                                 struct sk_buff *skb,
                                  const struct dp_upcall_info *upcall_info)
 {
        struct ovs_header *upcall;
@@ -362,7 +395,7 @@ static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb,
 
        skb_copy_and_csum_dev(skb, nla_data(nla));
 
-       err = genlmsg_unicast(&init_net, user_skb, upcall_info->pid);
+       err = genlmsg_unicast(net, user_skb, upcall_info->portid);
 
 out:
        kfree_skb(nskb);
@@ -370,15 +403,10 @@ out:
 }
 
 /* Called with genl_mutex. */
-static int flush_flows(int dp_ifindex)
+static int flush_flows(struct datapath *dp)
 {
        struct flow_table *old_table;
        struct flow_table *new_table;
-       struct datapath *dp;
-
-       dp = get_dp(dp_ifindex);
-       if (!dp)
-               return -ENODEV;
 
        old_table = genl_dereference(dp->table);
        new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS);
@@ -668,7 +696,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
        packet->priority = flow->key.phy.priority;
 
        rcu_read_lock();
-       dp = get_dp(ovs_header->dp_ifindex);
+       dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
        err = -ENODEV;
        if (!dp)
                goto err_unlock;
@@ -742,7 +770,8 @@ static struct genl_family dp_flow_genl_family = {
        .hdrsize = sizeof(struct ovs_header),
        .name = OVS_FLOW_FAMILY,
        .version = OVS_FLOW_VERSION,
-       .maxattr = OVS_FLOW_ATTR_MAX
+       .maxattr = OVS_FLOW_ATTR_MAX,
+       .netnsok = true
 };
 
 static struct genl_multicast_group ovs_dp_flow_multicast_group = {
@@ -751,7 +780,7 @@ static struct genl_multicast_group ovs_dp_flow_multicast_group = {
 
 /* Called with genl_lock. */
 static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
-                                 struct sk_buff *skb, u32 pid,
+                                 struct sk_buff *skb, u32 portid,
                                  u32 seq, u32 flags, u8 cmd)
 {
        const int skb_orig_len = skb->len;
@@ -766,7 +795,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
        sf_acts = rcu_dereference_protected(flow->sf_acts,
                                            lockdep_genl_is_held());
 
-       ovs_header = genlmsg_put(skb, pid, seq, &dp_flow_genl_family, flags, cmd);
+       ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd);
        if (!ovs_header)
                return -EMSGSIZE;
 
@@ -850,7 +879,7 @@ static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow)
 
 static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
                                               struct datapath *dp,
-                                              u32 pid, u32 seq, u8 cmd)
+                                              u32 portid, u32 seq, u8 cmd)
 {
        struct sk_buff *skb;
        int retval;
@@ -859,7 +888,7 @@ static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
        if (!skb)
                return ERR_PTR(-ENOMEM);
 
-       retval = ovs_flow_cmd_fill_info(flow, dp, skb, pid, seq, 0, cmd);
+       retval = ovs_flow_cmd_fill_info(flow, dp, skb, portid, seq, 0, cmd);
        BUG_ON(retval < 0);
        return skb;
 }
@@ -894,7 +923,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
                goto error;
        }
 
-       dp = get_dp(ovs_header->dp_ifindex);
+       dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
        error = -ENODEV;
        if (!dp)
                goto error;
@@ -941,7 +970,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
                flow->hash = ovs_flow_hash(&key, key_len);
                ovs_flow_tbl_insert(table, flow);
 
-               reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
+               reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
                                                info->snd_seq,
                                                OVS_FLOW_CMD_NEW);
        } else {
@@ -979,7 +1008,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
                        ovs_flow_deferred_free_acts(old_acts);
                }
 
-               reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
+               reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
                                               info->snd_seq, OVS_FLOW_CMD_NEW);
 
                /* Clear stats. */
@@ -991,11 +1020,11 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
        }
 
        if (!IS_ERR(reply))
-               genl_notify(reply, genl_info_net(info), info->snd_pid,
+               genl_notify(reply, genl_info_net(info), info->snd_portid,
                           ovs_dp_flow_multicast_group.id, info->nlhdr,
                           GFP_KERNEL);
        else
-               netlink_set_err(init_net.genl_sock, 0,
+               netlink_set_err(sock_net(skb->sk)->genl_sock, 0,
                                ovs_dp_flow_multicast_group.id, PTR_ERR(reply));
        return 0;
 
@@ -1023,7 +1052,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
        if (err)
                return err;
 
-       dp = get_dp(ovs_header->dp_ifindex);
+       dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
        if (!dp)
                return -ENODEV;
 
@@ -1032,7 +1061,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
        if (!flow)
                return -ENOENT;
 
-       reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
+       reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
                                        info->snd_seq, OVS_FLOW_CMD_NEW);
        if (IS_ERR(reply))
                return PTR_ERR(reply);
@@ -1052,16 +1081,17 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
        int err;
        int key_len;
 
+       dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
+       if (!dp)
+               return -ENODEV;
+
        if (!a[OVS_FLOW_ATTR_KEY])
-               return flush_flows(ovs_header->dp_ifindex);
+               return flush_flows(dp);
+
        err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
        if (err)
                return err;
 
-       dp = get_dp(ovs_header->dp_ifindex);
-       if (!dp)
-               return -ENODEV;
-
        table = genl_dereference(dp->table);
        flow = ovs_flow_tbl_lookup(table, &key, key_len);
        if (!flow)
@@ -1073,13 +1103,13 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
 
        ovs_flow_tbl_remove(table, flow);
 
-       err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_pid,
+       err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid,
                                     info->snd_seq, 0, OVS_FLOW_CMD_DEL);
        BUG_ON(err < 0);
 
        ovs_flow_deferred_free(flow);
 
-       genl_notify(reply, genl_info_net(info), info->snd_pid,
+       genl_notify(reply, genl_info_net(info), info->snd_portid,
                    ovs_dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
        return 0;
 }
@@ -1090,7 +1120,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
        struct datapath *dp;
        struct flow_table *table;
 
-       dp = get_dp(ovs_header->dp_ifindex);
+       dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
        if (!dp)
                return -ENODEV;
 
@@ -1107,7 +1137,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
                        break;
 
                if (ovs_flow_cmd_fill_info(flow, dp, skb,
-                                          NETLINK_CB(cb->skb).pid,
+                                          NETLINK_CB(cb->skb).portid,
                                           cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                           OVS_FLOW_CMD_NEW) < 0)
                        break;
@@ -1152,7 +1182,8 @@ static struct genl_family dp_datapath_genl_family = {
        .hdrsize = sizeof(struct ovs_header),
        .name = OVS_DATAPATH_FAMILY,
        .version = OVS_DATAPATH_VERSION,
-       .maxattr = OVS_DP_ATTR_MAX
+       .maxattr = OVS_DP_ATTR_MAX,
+       .netnsok = true
 };
 
 static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
@@ -1160,13 +1191,13 @@ static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
 };
 
 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
-                               u32 pid, u32 seq, u32 flags, u8 cmd)
+                               u32 portid, u32 seq, u32 flags, u8 cmd)
 {
        struct ovs_header *ovs_header;
        struct ovs_dp_stats dp_stats;
        int err;
 
-       ovs_header = genlmsg_put(skb, pid, seq, &dp_datapath_genl_family,
+       ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
                                   flags, cmd);
        if (!ovs_header)
                goto error;
@@ -1191,7 +1222,7 @@ error:
        return -EMSGSIZE;
 }
 
-static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 pid,
+static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 portid,
                                             u32 seq, u8 cmd)
 {
        struct sk_buff *skb;
@@ -1201,7 +1232,7 @@ static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 pid,
        if (!skb)
                return ERR_PTR(-ENOMEM);
 
-       retval = ovs_dp_cmd_fill_info(dp, skb, pid, seq, 0, cmd);
+       retval = ovs_dp_cmd_fill_info(dp, skb, portid, seq, 0, cmd);
        if (retval < 0) {
                kfree_skb(skb);
                return ERR_PTR(retval);
@@ -1210,18 +1241,19 @@ static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 pid,
 }
 
 /* Called with genl_mutex and optionally with RTNL lock also. */
-static struct datapath *lookup_datapath(struct ovs_header *ovs_header,
+static struct datapath *lookup_datapath(struct net *net,
+                                       struct ovs_header *ovs_header,
                                        struct nlattr *a[OVS_DP_ATTR_MAX + 1])
 {
        struct datapath *dp;
 
        if (!a[OVS_DP_ATTR_NAME])
-               dp = get_dp(ovs_header->dp_ifindex);
+               dp = get_dp(net, ovs_header->dp_ifindex);
        else {
                struct vport *vport;
 
                rcu_read_lock();
-               vport = ovs_vport_locate(nla_data(a[OVS_DP_ATTR_NAME]));
+               vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
                dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
                rcu_read_unlock();
        }
@@ -1235,22 +1267,21 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
        struct sk_buff *reply;
        struct datapath *dp;
        struct vport *vport;
-       int err;
+       struct ovs_net *ovs_net;
+       int err, i;
 
        err = -EINVAL;
        if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
                goto err;
 
        rtnl_lock();
-       err = -ENODEV;
-       if (!try_module_get(THIS_MODULE))
-               goto err_unlock_rtnl;
 
        err = -ENOMEM;
        dp = kzalloc(sizeof(*dp), GFP_KERNEL);
        if (dp == NULL)
-               goto err_put_module;
-       INIT_LIST_HEAD(&dp->port_list);
+               goto err_unlock_rtnl;
+
+       ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
 
        /* Allocate table. */
        err = -ENOMEM;
@@ -1264,13 +1295,23 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
                goto err_destroy_table;
        }
 
+       dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
+                       GFP_KERNEL);
+       if (!dp->ports) {
+               err = -ENOMEM;
+               goto err_destroy_percpu;
+       }
+
+       for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
+               INIT_HLIST_HEAD(&dp->ports[i]);
+
        /* Set up our datapath device. */
        parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
        parms.type = OVS_VPORT_TYPE_INTERNAL;
        parms.options = NULL;
        parms.dp = dp;
        parms.port_no = OVSP_LOCAL;
-       parms.upcall_pid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]);
+       parms.upcall_portid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]);
 
        vport = new_vport(&parms);
        if (IS_ERR(vport)) {
@@ -1278,64 +1319,59 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
                if (err == -EBUSY)
                        err = -EEXIST;
 
-               goto err_destroy_percpu;
+               goto err_destroy_ports_array;
        }
 
-       reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
+       reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
                                      info->snd_seq, OVS_DP_CMD_NEW);
        err = PTR_ERR(reply);
        if (IS_ERR(reply))
                goto err_destroy_local_port;
 
-       list_add_tail(&dp->list_node, &dps);
+       ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
+       list_add_tail(&dp->list_node, &ovs_net->dps);
        rtnl_unlock();
 
-       genl_notify(reply, genl_info_net(info), info->snd_pid,
+       genl_notify(reply, genl_info_net(info), info->snd_portid,
                    ovs_dp_datapath_multicast_group.id, info->nlhdr,
                    GFP_KERNEL);
        return 0;
 
 err_destroy_local_port:
-       ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL]));
+       ovs_dp_detach_port(ovs_vport_rtnl(dp, OVSP_LOCAL));
+err_destroy_ports_array:
+       kfree(dp->ports);
 err_destroy_percpu:
        free_percpu(dp->stats_percpu);
 err_destroy_table:
        ovs_flow_tbl_destroy(genl_dereference(dp->table));
 err_free_dp:
+       release_net(ovs_dp_get_net(dp));
        kfree(dp);
-err_put_module:
-       module_put(THIS_MODULE);
 err_unlock_rtnl:
        rtnl_unlock();
 err:
        return err;
 }
 
-static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
+/* Called with genl_mutex. */
+static void __dp_destroy(struct datapath *dp)
 {
-       struct vport *vport, *next_vport;
-       struct sk_buff *reply;
-       struct datapath *dp;
-       int err;
+       int i;
 
        rtnl_lock();
-       dp = lookup_datapath(info->userhdr, info->attrs);
-       err = PTR_ERR(dp);
-       if (IS_ERR(dp))
-               goto exit_unlock;
 
-       reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
-                                     info->snd_seq, OVS_DP_CMD_DEL);
-       err = PTR_ERR(reply);
-       if (IS_ERR(reply))
-               goto exit_unlock;
+       for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
+               struct vport *vport;
+               struct hlist_node *node, *n;
 
-       list_for_each_entry_safe(vport, next_vport, &dp->port_list, node)
-               if (vport->port_no != OVSP_LOCAL)
-                       ovs_dp_detach_port(vport);
+               hlist_for_each_entry_safe(vport, node, n, &dp->ports[i], dp_hash_node)
+                       if (vport->port_no != OVSP_LOCAL)
+                               ovs_dp_detach_port(vport);
+       }
 
        list_del(&dp->list_node);
-       ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL]));
+       ovs_dp_detach_port(ovs_vport_rtnl(dp, OVSP_LOCAL));
 
        /* rtnl_unlock() will wait until all the references to devices that
         * are pending unregistration have been dropped.  We do it here to
@@ -1345,17 +1381,32 @@ static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
        rtnl_unlock();
 
        call_rcu(&dp->rcu, destroy_dp_rcu);
-       module_put(THIS_MODULE);
+}
+
+static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
+{
+       struct sk_buff *reply;
+       struct datapath *dp;
+       int err;
+
+       dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
+       err = PTR_ERR(dp);
+       if (IS_ERR(dp))
+               return err;
 
-       genl_notify(reply, genl_info_net(info), info->snd_pid,
+       reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
+                                     info->snd_seq, OVS_DP_CMD_DEL);
+       err = PTR_ERR(reply);
+       if (IS_ERR(reply))
+               return err;
+
+       __dp_destroy(dp);
+
+       genl_notify(reply, genl_info_net(info), info->snd_portid,
                    ovs_dp_datapath_multicast_group.id, info->nlhdr,
                    GFP_KERNEL);
 
        return 0;
-
-exit_unlock:
-       rtnl_unlock();
-       return err;
 }
 
 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
@@ -1364,20 +1415,20 @@ static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
        struct datapath *dp;
        int err;
 
-       dp = lookup_datapath(info->userhdr, info->attrs);
+       dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
        if (IS_ERR(dp))
                return PTR_ERR(dp);
 
-       reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
+       reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
                                      info->snd_seq, OVS_DP_CMD_NEW);
        if (IS_ERR(reply)) {
                err = PTR_ERR(reply);
-               netlink_set_err(init_net.genl_sock, 0,
+               netlink_set_err(sock_net(skb->sk)->genl_sock, 0,
                                ovs_dp_datapath_multicast_group.id, err);
                return 0;
        }
 
-       genl_notify(reply, genl_info_net(info), info->snd_pid,
+       genl_notify(reply, genl_info_net(info), info->snd_portid,
                    ovs_dp_datapath_multicast_group.id, info->nlhdr,
                    GFP_KERNEL);
 
@@ -1389,11 +1440,11 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
        struct sk_buff *reply;
        struct datapath *dp;
 
-       dp = lookup_datapath(info->userhdr, info->attrs);
+       dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
        if (IS_ERR(dp))
                return PTR_ERR(dp);
 
-       reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
+       reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
                                      info->snd_seq, OVS_DP_CMD_NEW);
        if (IS_ERR(reply))
                return PTR_ERR(reply);
@@ -1403,13 +1454,14 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
 
 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
+       struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
        struct datapath *dp;
        int skip = cb->args[0];
        int i = 0;
 
-       list_for_each_entry(dp, &dps, list_node) {
+       list_for_each_entry(dp, &ovs_net->dps, list_node) {
                if (i >= skip &&
-                   ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid,
+                   ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
                                         cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                         OVS_DP_CMD_NEW) < 0)
                        break;
@@ -1459,7 +1511,8 @@ static struct genl_family dp_vport_genl_family = {
        .hdrsize = sizeof(struct ovs_header),
        .name = OVS_VPORT_FAMILY,
        .version = OVS_VPORT_VERSION,
-       .maxattr = OVS_VPORT_ATTR_MAX
+       .maxattr = OVS_VPORT_ATTR_MAX,
+       .netnsok = true
 };
 
 struct genl_multicast_group ovs_dp_vport_multicast_group = {
@@ -1468,13 +1521,13 @@ struct genl_multicast_group ovs_dp_vport_multicast_group = {
 
 /* Called with RTNL lock or RCU read lock. */
 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
-                                  u32 pid, u32 seq, u32 flags, u8 cmd)
+                                  u32 portid, u32 seq, u32 flags, u8 cmd)
 {
        struct ovs_header *ovs_header;
        struct ovs_vport_stats vport_stats;
        int err;
 
-       ovs_header = genlmsg_put(skb, pid, seq, &dp_vport_genl_family,
+       ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
                                 flags, cmd);
        if (!ovs_header)
                return -EMSGSIZE;
@@ -1484,7 +1537,7 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
        if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
            nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
            nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)) ||
-           nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid))
+           nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_portid))
                goto nla_put_failure;
 
        ovs_vport_get_stats(vport, &vport_stats);
@@ -1506,7 +1559,7 @@ error:
 }
 
 /* Called with RTNL lock or RCU read lock. */
-struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 pid,
+struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
                                         u32 seq, u8 cmd)
 {
        struct sk_buff *skb;
@@ -1516,7 +1569,7 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 pid,
        if (!skb)
                return ERR_PTR(-ENOMEM);
 
-       retval = ovs_vport_cmd_fill_info(vport, skb, pid, seq, 0, cmd);
+       retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
        if (retval < 0) {
                kfree_skb(skb);
                return ERR_PTR(retval);
@@ -1525,14 +1578,15 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 pid,
 }
 
 /* Called with RTNL lock or RCU read lock. */
-static struct vport *lookup_vport(struct ovs_header *ovs_header,
+static struct vport *lookup_vport(struct net *net,
+                                 struct ovs_header *ovs_header,
                                  struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
 {
        struct datapath *dp;
        struct vport *vport;
 
        if (a[OVS_VPORT_ATTR_NAME]) {
-               vport = ovs_vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME]));
+               vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
                if (!vport)
                        return ERR_PTR(-ENODEV);
                if (ovs_header->dp_ifindex &&
@@ -1545,11 +1599,11 @@ static struct vport *lookup_vport(struct ovs_header *ovs_header,
                if (port_no >= DP_MAX_PORTS)
                        return ERR_PTR(-EFBIG);
 
-               dp = get_dp(ovs_header->dp_ifindex);
+               dp = get_dp(net, ovs_header->dp_ifindex);
                if (!dp)
                        return ERR_PTR(-ENODEV);
 
-               vport = rcu_dereference_rtnl(dp->ports[port_no]);
+               vport = ovs_vport_rtnl_rcu(dp, port_no);
                if (!vport)
                        return ERR_PTR(-ENOENT);
                return vport;
@@ -1574,7 +1628,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
                goto exit;
 
        rtnl_lock();
-       dp = get_dp(ovs_header->dp_ifindex);
+       dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
        err = -ENODEV;
        if (!dp)
                goto exit_unlock;
@@ -1586,7 +1640,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
                if (port_no >= DP_MAX_PORTS)
                        goto exit_unlock;
 
-               vport = rtnl_dereference(dp->ports[port_no]);
+               vport = ovs_vport_rtnl_rcu(dp, port_no);
                err = -EBUSY;
                if (vport)
                        goto exit_unlock;
@@ -1596,7 +1650,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
                                err = -EFBIG;
                                goto exit_unlock;
                        }
-                       vport = rtnl_dereference(dp->ports[port_no]);
+                       vport = ovs_vport_rtnl(dp, port_no);
                        if (!vport)
                                break;
                }
@@ -1607,21 +1661,21 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
        parms.options = a[OVS_VPORT_ATTR_OPTIONS];
        parms.dp = dp;
        parms.port_no = port_no;
-       parms.upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
+       parms.upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
 
        vport = new_vport(&parms);
        err = PTR_ERR(vport);
        if (IS_ERR(vport))
                goto exit_unlock;
 
-       reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
+       reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,
                                         OVS_VPORT_CMD_NEW);
        if (IS_ERR(reply)) {
                err = PTR_ERR(reply);
                ovs_dp_detach_port(vport);
                goto exit_unlock;
        }
-       genl_notify(reply, genl_info_net(info), info->snd_pid,
+       genl_notify(reply, genl_info_net(info), info->snd_portid,
                    ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
 
 exit_unlock:
@@ -1638,7 +1692,7 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
        int err;
 
        rtnl_lock();
-       vport = lookup_vport(info->userhdr, a);
+       vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
        err = PTR_ERR(vport);
        if (IS_ERR(vport))
                goto exit_unlock;
@@ -1653,17 +1707,17 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
        if (err)
                goto exit_unlock;
        if (a[OVS_VPORT_ATTR_UPCALL_PID])
-               vport->upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
+               vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
 
-       reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
+       reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,
                                         OVS_VPORT_CMD_NEW);
        if (IS_ERR(reply)) {
-               netlink_set_err(init_net.genl_sock, 0,
+               netlink_set_err(sock_net(skb->sk)->genl_sock, 0,
                                ovs_dp_vport_multicast_group.id, PTR_ERR(reply));
                goto exit_unlock;
        }
 
-       genl_notify(reply, genl_info_net(info), info->snd_pid,
+       genl_notify(reply, genl_info_net(info), info->snd_portid,
                    ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
 
 exit_unlock:
@@ -1679,7 +1733,7 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
        int err;
 
        rtnl_lock();
-       vport = lookup_vport(info->userhdr, a);
+       vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
        err = PTR_ERR(vport);
        if (IS_ERR(vport))
                goto exit_unlock;
@@ -1689,7 +1743,7 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
                goto exit_unlock;
        }
 
-       reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
+       reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,
                                         OVS_VPORT_CMD_DEL);
        err = PTR_ERR(reply);
        if (IS_ERR(reply))
@@ -1697,7 +1751,7 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
 
        ovs_dp_detach_port(vport);
 
-       genl_notify(reply, genl_info_net(info), info->snd_pid,
+       genl_notify(reply, genl_info_net(info), info->snd_portid,
                    ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
 
 exit_unlock:
@@ -1714,12 +1768,12 @@ static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
        int err;
 
        rcu_read_lock();
-       vport = lookup_vport(ovs_header, a);
+       vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
        err = PTR_ERR(vport);
        if (IS_ERR(vport))
                goto exit_unlock;
 
-       reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
+       reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,
                                         OVS_VPORT_CMD_NEW);
        err = PTR_ERR(reply);
        if (IS_ERR(reply))
@@ -1738,54 +1792,39 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
        struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
        struct datapath *dp;
-       u32 port_no;
-       int retval;
+       int bucket = cb->args[0], skip = cb->args[1];
+       int i, j = 0;
 
-       dp = get_dp(ovs_header->dp_ifindex);
+       dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
        if (!dp)
                return -ENODEV;
 
        rcu_read_lock();
-       for (port_no = cb->args[0]; port_no < DP_MAX_PORTS; port_no++) {
+       for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
                struct vport *vport;
-
-               vport = rcu_dereference(dp->ports[port_no]);
-               if (!vport)
-                       continue;
-
-               if (ovs_vport_cmd_fill_info(vport, skb, NETLINK_CB(cb->skb).pid,
-                                           cb->nlh->nlmsg_seq, NLM_F_MULTI,
-                                           OVS_VPORT_CMD_NEW) < 0)
-                       break;
-       }
-       rcu_read_unlock();
-
-       cb->args[0] = port_no;
-       retval = skb->len;
-
-       return retval;
-}
-
-static void rehash_flow_table(struct work_struct *work)
-{
-       struct datapath *dp;
-
-       genl_lock();
-
-       list_for_each_entry(dp, &dps, list_node) {
-               struct flow_table *old_table = genl_dereference(dp->table);
-               struct flow_table *new_table;
-
-               new_table = ovs_flow_tbl_rehash(old_table);
-               if (!IS_ERR(new_table)) {
-                       rcu_assign_pointer(dp->table, new_table);
-                       ovs_flow_tbl_deferred_destroy(old_table);
+               struct hlist_node *n;
+
+               j = 0;
+               hlist_for_each_entry_rcu(vport, n, &dp->ports[i], dp_hash_node) {
+                       if (j >= skip &&
+                           ovs_vport_cmd_fill_info(vport, skb,
+                                                   NETLINK_CB(cb->skb).portid,
+                                                   cb->nlh->nlmsg_seq,
+                                                   NLM_F_MULTI,
+                                                   OVS_VPORT_CMD_NEW) < 0)
+                               goto out;
+
+                       j++;
                }
+               skip = 0;
        }
+out:
+       rcu_read_unlock();
 
-       genl_unlock();
+       cb->args[0] = i;
+       cb->args[1] = j;
 
-       schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
+       return skb->len;
 }
 
 static struct genl_ops dp_vport_genl_ops[] = {
@@ -1872,6 +1911,59 @@ error:
        return err;
 }
 
+static void rehash_flow_table(struct work_struct *work)
+{
+       struct datapath *dp;
+       struct net *net;
+
+       genl_lock();
+       rtnl_lock();
+       for_each_net(net) {
+               struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
+
+               list_for_each_entry(dp, &ovs_net->dps, list_node) {
+                       struct flow_table *old_table = genl_dereference(dp->table);
+                       struct flow_table *new_table;
+
+                       new_table = ovs_flow_tbl_rehash(old_table);
+                       if (!IS_ERR(new_table)) {
+                               rcu_assign_pointer(dp->table, new_table);
+                               ovs_flow_tbl_deferred_destroy(old_table);
+                       }
+               }
+       }
+       rtnl_unlock();
+       genl_unlock();
+
+       schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
+}
+
+static int __net_init ovs_init_net(struct net *net)
+{
+       struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
+
+       INIT_LIST_HEAD(&ovs_net->dps);
+       return 0;
+}
+
+static void __net_exit ovs_exit_net(struct net *net)
+{
+       struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
+       struct datapath *dp, *dp_next;
+
+       genl_lock();
+       list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
+               __dp_destroy(dp);
+       genl_unlock();
+}
+
+static struct pernet_operations ovs_net_ops = {
+       .init = ovs_init_net,
+       .exit = ovs_exit_net,
+       .id   = &ovs_net_id,
+       .size = sizeof(struct ovs_net),
+};
+
 static int __init dp_init(void)
 {
        struct sk_buff *dummy_skb;
@@ -1889,10 +1981,14 @@ static int __init dp_init(void)
        if (err)
                goto error_flow_exit;
 
-       err = register_netdevice_notifier(&ovs_dp_device_notifier);
+       err = register_pernet_device(&ovs_net_ops);
        if (err)
                goto error_vport_exit;
 
+       err = register_netdevice_notifier(&ovs_dp_device_notifier);
+       if (err)
+               goto error_netns_exit;
+
        err = dp_register_genl();
        if (err < 0)
                goto error_unreg_notifier;
@@ -1903,6 +1999,8 @@ static int __init dp_init(void)
 
 error_unreg_notifier:
        unregister_netdevice_notifier(&ovs_dp_device_notifier);
+error_netns_exit:
+       unregister_pernet_device(&ovs_net_ops);
 error_vport_exit:
        ovs_vport_exit();
 error_flow_exit:
@@ -1914,9 +2012,10 @@ error:
 static void dp_cleanup(void)
 {
        cancel_delayed_work_sync(&rehash_flow_wq);
-       rcu_barrier();
        dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
        unregister_netdevice_notifier(&ovs_dp_device_notifier);
+       unregister_pernet_device(&ovs_net_ops);
+       rcu_barrier();
        ovs_vport_exit();
        ovs_flow_exit();
 }
index c1105c147531001b65e8cdf75b0ccab2558bae0e..031dfbf37c937dff57657d33d45143d454cc4e46 100644 (file)
 #include <linux/u64_stats_sync.h>
 
 #include "flow.h"
+#include "vport.h"
 
-struct vport;
+#define DP_MAX_PORTS           USHRT_MAX
+#define DP_VPORT_HASH_BUCKETS  1024
 
-#define DP_MAX_PORTS 1024
 #define SAMPLE_ACTION_DEPTH 3
 
 /**
@@ -58,11 +59,10 @@ struct dp_stats_percpu {
  * @list_node: Element in global 'dps' list.
  * @n_flows: Number of flows currently in flow table.
  * @table: Current flow table.  Protected by genl_lock and RCU.
- * @ports: Map from port number to &struct vport.  %OVSP_LOCAL port
- * always exists, other ports may be %NULL.  Protected by RTNL and RCU.
- * @port_list: List of all ports in @ports in arbitrary order.  RTNL required
- * to iterate or modify.
+ * @ports: Hash table for ports.  %OVSP_LOCAL port always exists.  Protected by
+ * RTNL and RCU.
  * @stats_percpu: Per-CPU datapath statistics.
+ * @net: Reference to net namespace.
  *
  * Context: See the comment on locking at the top of datapath.c for additional
  * locking information.
@@ -75,13 +75,37 @@ struct datapath {
        struct flow_table __rcu *table;
 
        /* Switch ports. */
-       struct vport __rcu *ports[DP_MAX_PORTS];
-       struct list_head port_list;
+       struct hlist_head *ports;
 
        /* Stats. */
        struct dp_stats_percpu __percpu *stats_percpu;
+
+#ifdef CONFIG_NET_NS
+       /* Network namespace ref. */
+       struct net *net;
+#endif
 };
 
+struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no);
+
+static inline struct vport *ovs_vport_rcu(const struct datapath *dp, int port_no)
+{
+       WARN_ON_ONCE(!rcu_read_lock_held());
+       return ovs_lookup_vport(dp, port_no);
+}
+
+static inline struct vport *ovs_vport_rtnl_rcu(const struct datapath *dp, int port_no)
+{
+       WARN_ON_ONCE(!rcu_read_lock_held() && !rtnl_is_locked());
+       return ovs_lookup_vport(dp, port_no);
+}
+
+static inline struct vport *ovs_vport_rtnl(const struct datapath *dp, int port_no)
+{
+       ASSERT_RTNL();
+       return ovs_lookup_vport(dp, port_no);
+}
+
 /**
  * struct ovs_skb_cb - OVS data in skb CB
  * @flow: The flow associated with this packet.  May be %NULL if no flow.
@@ -105,9 +129,19 @@ struct dp_upcall_info {
        u8 cmd;
        const struct sw_flow_key *key;
        const struct nlattr *userdata;
-       u32 pid;
+       u32 portid;
 };
 
+static inline struct net *ovs_dp_get_net(struct datapath *dp)
+{
+       return read_pnet(&dp->net);
+}
+
+static inline void ovs_dp_set_net(struct datapath *dp, struct net *net)
+{
+       write_pnet(&dp->net, net);
+}
+
 extern struct notifier_block ovs_dp_device_notifier;
 extern struct genl_multicast_group ovs_dp_vport_multicast_group;
 
index 36dcee8fc84a27dc29785e9a1adfecb8f021ee57..5558350e0d33ee4f9b26f8be13801a6139fb7aa8 100644 (file)
@@ -41,19 +41,21 @@ static int dp_device_event(struct notifier_block *unused, unsigned long event,
        case NETDEV_UNREGISTER:
                if (!ovs_is_internal_dev(dev)) {
                        struct sk_buff *notify;
+                       struct datapath *dp = vport->dp;
 
                        notify = ovs_vport_cmd_build_info(vport, 0, 0,
                                                          OVS_VPORT_CMD_DEL);
                        ovs_dp_detach_port(vport);
                        if (IS_ERR(notify)) {
-                               netlink_set_err(init_net.genl_sock, 0,
+                               netlink_set_err(ovs_dp_get_net(dp)->genl_sock, 0,
                                                ovs_dp_vport_multicast_group.id,
                                                PTR_ERR(notify));
                                break;
                        }
 
-                       genlmsg_multicast(notify, 0, ovs_dp_vport_multicast_group.id,
-                                         GFP_KERNEL);
+                       genlmsg_multicast_netns(ovs_dp_get_net(dp), notify, 0,
+                                               ovs_dp_vport_multicast_group.id,
+                                               GFP_KERNEL);
                }
                break;
        }
index b7f38b161909f0ea9ad8cf373ae77ab957cf5311..98c70630ad06178778dc4f1e04275fa6032facc8 100644 (file)
@@ -203,10 +203,7 @@ struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *actions)
        int actions_len = nla_len(actions);
        struct sw_flow_actions *sfa;
 
-       /* At least DP_MAX_PORTS actions are required to be able to flood a
-        * packet to every port.  Factor of 2 allows for setting VLAN tags,
-        * etc. */
-       if (actions_len > 2 * DP_MAX_PORTS * nla_total_size(4))
+       if (actions_len > MAX_ACTIONS_BUFSIZE)
                return ERR_PTR(-EINVAL);
 
        sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL);
@@ -427,19 +424,11 @@ void ovs_flow_deferred_free(struct sw_flow *flow)
        call_rcu(&flow->rcu, rcu_free_flow_callback);
 }
 
-/* RCU callback used by ovs_flow_deferred_free_acts. */
-static void rcu_free_acts_callback(struct rcu_head *rcu)
-{
-       struct sw_flow_actions *sf_acts = container_of(rcu,
-                       struct sw_flow_actions, rcu);
-       kfree(sf_acts);
-}
-
 /* Schedules 'sf_acts' to be freed after the next RCU grace period.
  * The caller must hold rcu_read_lock for this to be sensible. */
 void ovs_flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
 {
-       call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
+       kfree_rcu(sf_acts, rcu);
 }
 
 static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
@@ -1000,7 +989,7 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
                swkey->phy.in_port = in_port;
                attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
        } else {
-               swkey->phy.in_port = USHRT_MAX;
+               swkey->phy.in_port = DP_MAX_PORTS;
        }
 
        /* Data attributes. */
@@ -1143,7 +1132,7 @@ int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port,
        const struct nlattr *nla;
        int rem;
 
-       *in_port = USHRT_MAX;
+       *in_port = DP_MAX_PORTS;
        *priority = 0;
 
        nla_for_each_nested(nla, attr, rem) {
@@ -1180,7 +1169,7 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
            nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority))
                goto nla_put_failure;
 
-       if (swkey->phy.in_port != USHRT_MAX &&
+       if (swkey->phy.in_port != DP_MAX_PORTS &&
            nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port))
                goto nla_put_failure;
 
index c30df1a10c670ad01b7b8b88c49434c95c0c659e..14a324eb017b44cca9263701d094f3221a37c9a3 100644 (file)
@@ -43,7 +43,7 @@ struct sw_flow_actions {
 struct sw_flow_key {
        struct {
                u32     priority;       /* Packet QoS priority. */
-               u16     in_port;        /* Input switch port (or USHRT_MAX). */
+               u16     in_port;        /* Input switch port (or DP_MAX_PORTS). */
        } phy;
        struct {
                u8     src[ETH_ALEN];   /* Ethernet source address. */
@@ -163,6 +163,7 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
 int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port,
                               const struct nlattr *);
 
+#define MAX_ACTIONS_BUFSIZE    (16 * 1024)
 #define TBL_MIN_BUCKETS                1024
 
 struct flow_table {
index 4061b9ee07f7ca30af27ffa6f4264716dcda4e84..5d460c37df07ce70b615de1cff8e2191e358bab1 100644 (file)
@@ -144,7 +144,7 @@ static void do_setup(struct net_device *netdev)
        netdev->tx_queue_len = 0;
 
        netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST |
-                               NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_TSO;
+                          NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_TSO;
 
        netdev->vlan_features = netdev->features;
        netdev->features |= NETIF_F_HW_VLAN_TX;
@@ -175,9 +175,14 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
                goto error_free_vport;
        }
 
+       dev_net_set(netdev_vport->dev, ovs_dp_get_net(vport->dp));
        internal_dev = internal_dev_priv(netdev_vport->dev);
        internal_dev->vport = vport;
 
+       /* Restrict bridge port to current netns. */
+       if (vport->port_no == OVSP_LOCAL)
+               netdev_vport->dev->features |= NETIF_F_NETNS_LOCAL;
+
        err = register_netdevice(netdev_vport->dev);
        if (err)
                goto error_free_netdev;
index 6ea3551cc78c8f21a966aeaeffc04a575eeeb824..3c1e58ba714bf9534b597c9f28147cc7c3038b39 100644 (file)
@@ -83,7 +83,7 @@ static struct vport *netdev_create(const struct vport_parms *parms)
 
        netdev_vport = netdev_vport_priv(vport);
 
-       netdev_vport->dev = dev_get_by_name(&init_net, parms->name);
+       netdev_vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), parms->name);
        if (!netdev_vport->dev) {
                err = -ENODEV;
                goto error_free_vport;
index 6140336e79d7dd679603b7a1cb7dc750d3ed7f10..03779e8a262289f9a8178e62e0adf5afc16a9ae5 100644 (file)
  * 02110-1301, USA
  */
 
-#include <linux/dcache.h>
 #include <linux/etherdevice.h>
 #include <linux/if.h>
 #include <linux/if_vlan.h>
+#include <linux/jhash.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/mutex.h>
@@ -27,7 +27,9 @@
 #include <linux/rcupdate.h>
 #include <linux/rtnetlink.h>
 #include <linux/compat.h>
+#include <net/net_namespace.h>
 
+#include "datapath.h"
 #include "vport.h"
 #include "vport-internal_dev.h"
 
@@ -67,9 +69,9 @@ void ovs_vport_exit(void)
        kfree(dev_table);
 }
 
-static struct hlist_head *hash_bucket(const char *name)
+static struct hlist_head *hash_bucket(struct net *net, const char *name)
 {
-       unsigned int hash = full_name_hash(name, strlen(name));
+       unsigned int hash = jhash(name, strlen(name), (unsigned long) net);
        return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
 }
 
@@ -80,14 +82,15 @@ static struct hlist_head *hash_bucket(const char *name)
  *
  * Must be called with RTNL or RCU read lock.
  */
-struct vport *ovs_vport_locate(const char *name)
+struct vport *ovs_vport_locate(struct net *net, const char *name)
 {
-       struct hlist_head *bucket = hash_bucket(name);
+       struct hlist_head *bucket = hash_bucket(net, name);
        struct vport *vport;
        struct hlist_node *node;
 
        hlist_for_each_entry_rcu(vport, node, bucket, hash_node)
-               if (!strcmp(name, vport->ops->get_name(vport)))
+               if (!strcmp(name, vport->ops->get_name(vport)) &&
+                   net_eq(ovs_dp_get_net(vport->dp), net))
                        return vport;
 
        return NULL;
@@ -122,8 +125,9 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
 
        vport->dp = parms->dp;
        vport->port_no = parms->port_no;
-       vport->upcall_pid = parms->upcall_pid;
+       vport->upcall_portid = parms->upcall_portid;
        vport->ops = ops;
+       INIT_HLIST_NODE(&vport->dp_hash_node);
 
        vport->percpu_stats = alloc_percpu(struct vport_percpu_stats);
        if (!vport->percpu_stats) {
@@ -170,14 +174,17 @@ struct vport *ovs_vport_add(const struct vport_parms *parms)
 
        for (i = 0; i < ARRAY_SIZE(vport_ops_list); i++) {
                if (vport_ops_list[i]->type == parms->type) {
+                       struct hlist_head *bucket;
+
                        vport = vport_ops_list[i]->create(parms);
                        if (IS_ERR(vport)) {
                                err = PTR_ERR(vport);
                                goto out;
                        }
 
-                       hlist_add_head_rcu(&vport->hash_node,
-                                          hash_bucket(vport->ops->get_name(vport)));
+                       bucket = hash_bucket(ovs_dp_get_net(vport->dp),
+                                            vport->ops->get_name(vport));
+                       hlist_add_head_rcu(&vport->hash_node, bucket);
                        return vport;
                }
        }
@@ -391,7 +398,7 @@ void ovs_vport_record_error(struct vport *vport, enum vport_err_type err_type)
        case VPORT_E_TX_ERROR:
                vport->err_stats.tx_errors++;
                break;
-       };
+       }
 
        spin_unlock(&vport->stats_lock);
 }
index aac680ca2b06410ec3300766583c3aaef78370ca..3f7961ea3c568d54011d6a570975fceb3c20d15f 100644 (file)
@@ -20,6 +20,7 @@
 #define VPORT_H 1
 
 #include <linux/list.h>
+#include <linux/netlink.h>
 #include <linux/openvswitch.h>
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
@@ -38,7 +39,7 @@ void ovs_vport_exit(void);
 struct vport *ovs_vport_add(const struct vport_parms *);
 void ovs_vport_del(struct vport *);
 
-struct vport *ovs_vport_locate(const char *name);
+struct vport *ovs_vport_locate(struct net *net, const char *name);
 
 void ovs_vport_get_stats(struct vport *, struct ovs_vport_stats *);
 
@@ -69,10 +70,10 @@ struct vport_err_stats {
  * @rcu: RCU callback head for deferred destruction.
  * @port_no: Index into @dp's @ports array.
  * @dp: Datapath to which this port belongs.
- * @node: Element in @dp's @port_list.
- * @upcall_pid: The Netlink port to use for packets received on this port that
+ * @upcall_portid: The Netlink port to use for packets received on this port that
  * miss the flow table.
  * @hash_node: Element in @dev_table hash table in vport.c.
+ * @dp_hash_node: Element in @datapath->ports hash table in datapath.c.
  * @ops: Class structure.
  * @percpu_stats: Points to per-CPU statistics used and maintained by vport
  * @stats_lock: Protects @err_stats;
@@ -82,10 +83,10 @@ struct vport {
        struct rcu_head rcu;
        u16 port_no;
        struct datapath *dp;
-       struct list_head node;
-       u32 upcall_pid;
+       u32 upcall_portid;
 
        struct hlist_node hash_node;
+       struct hlist_node dp_hash_node;
        const struct vport_ops *ops;
 
        struct vport_percpu_stats __percpu *percpu_stats;
@@ -112,7 +113,7 @@ struct vport_parms {
        /* For ovs_vport_alloc(). */
        struct datapath *dp;
        u16 port_no;
-       u32 upcall_pid;
+       u32 upcall_portid;
 };
 
 /**
index 0060e3b396b7b41fb0a5791966a39222cce8c04d..cc55b35f80e5acd045c37fc20bb37d98ba3d258c 100644 (file)
@@ -14,3 +14,11 @@ config PACKET
          be called af_packet.
 
          If unsure, say Y.
+
+config PACKET_DIAG
+       tristate "Packet: sockets monitoring interface"
+       depends on PACKET
+       default n
+       ---help---
+         Support for PF_PACKET sockets monitoring interface used by the ss tool.
+         If unsure, say Y.
index 81183eabfdec5cee58148d85df0877fc0e3a3d45..9df61347a3c3e98938c7b11a83195a22eb5d755c 100644 (file)
@@ -3,3 +3,5 @@
 #
 
 obj-$(CONFIG_PACKET) += af_packet.o
+obj-$(CONFIG_PACKET_DIAG) += af_packet_diag.o
+af_packet_diag-y += diag.o
index 048fba476aa5dc6d3ef8e8ff89aa5bc728756130..94060edbbd706ed11c7609913f1e434e5cec76c1 100644 (file)
@@ -93,6 +93,8 @@
 #include <net/inet_common.h>
 #endif
 
+#include "internal.h"
+
 /*
    Assumptions:
    - if device has no dev->hard_header routine, it adds and removes ll header
@@ -146,14 +148,6 @@ dev->hard_header == NULL (ll header is added by device, we cannot control it)
 
 /* Private packet socket structures. */
 
-struct packet_mclist {
-       struct packet_mclist    *next;
-       int                     ifindex;
-       int                     count;
-       unsigned short          type;
-       unsigned short          alen;
-       unsigned char           addr[MAX_ADDR_LEN];
-};
 /* identical to struct packet_mreq except it has
  * a longer address field.
  */
@@ -175,63 +169,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
 #define BLK_PLUS_PRIV(sz_of_priv) \
        (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
 
-/* kbdq - kernel block descriptor queue */
-struct tpacket_kbdq_core {
-       struct pgv      *pkbdq;
-       unsigned int    feature_req_word;
-       unsigned int    hdrlen;
-       unsigned char   reset_pending_on_curr_blk;
-       unsigned char   delete_blk_timer;
-       unsigned short  kactive_blk_num;
-       unsigned short  blk_sizeof_priv;
-
-       /* last_kactive_blk_num:
-        * trick to see if user-space has caught up
-        * in order to avoid refreshing timer when every single pkt arrives.
-        */
-       unsigned short  last_kactive_blk_num;
-
-       char            *pkblk_start;
-       char            *pkblk_end;
-       int             kblk_size;
-       unsigned int    knum_blocks;
-       uint64_t        knxt_seq_num;
-       char            *prev;
-       char            *nxt_offset;
-       struct sk_buff  *skb;
-
-       atomic_t        blk_fill_in_prog;
-
-       /* Default is set to 8ms */
-#define DEFAULT_PRB_RETIRE_TOV (8)
-
-       unsigned short  retire_blk_tov;
-       unsigned short  version;
-       unsigned long   tov_in_jiffies;
-
-       /* timer to retire an outstanding block */
-       struct timer_list retire_blk_timer;
-};
-
 #define PGV_FROM_VMALLOC 1
-struct pgv {
-       char *buffer;
-};
-
-struct packet_ring_buffer {
-       struct pgv              *pg_vec;
-       unsigned int            head;
-       unsigned int            frames_per_block;
-       unsigned int            frame_size;
-       unsigned int            frame_max;
-
-       unsigned int            pg_vec_order;
-       unsigned int            pg_vec_pages;
-       unsigned int            pg_vec_len;
-
-       struct tpacket_kbdq_core        prb_bdqc;
-       atomic_t                pending;
-};
 
 #define BLOCK_STATUS(x)        ((x)->hdr.bh1.block_status)
 #define BLOCK_NUM_PKTS(x)      ((x)->hdr.bh1.num_pkts)
@@ -269,52 +207,6 @@ static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
                struct tpacket3_hdr *);
 static void packet_flush_mclist(struct sock *sk);
 
-struct packet_fanout;
-struct packet_sock {
-       /* struct sock has to be the first member of packet_sock */
-       struct sock             sk;
-       struct packet_fanout    *fanout;
-       struct tpacket_stats    stats;
-       union  tpacket_stats_u  stats_u;
-       struct packet_ring_buffer       rx_ring;
-       struct packet_ring_buffer       tx_ring;
-       int                     copy_thresh;
-       spinlock_t              bind_lock;
-       struct mutex            pg_vec_lock;
-       unsigned int            running:1,      /* prot_hook is attached*/
-                               auxdata:1,
-                               origdev:1,
-                               has_vnet_hdr:1;
-       int                     ifindex;        /* bound device         */
-       __be16                  num;
-       struct packet_mclist    *mclist;
-       atomic_t                mapped;
-       enum tpacket_versions   tp_version;
-       unsigned int            tp_hdrlen;
-       unsigned int            tp_reserve;
-       unsigned int            tp_loss:1;
-       unsigned int            tp_tstamp;
-       struct packet_type      prot_hook ____cacheline_aligned_in_smp;
-};
-
-#define PACKET_FANOUT_MAX      256
-
-struct packet_fanout {
-#ifdef CONFIG_NET_NS
-       struct net              *net;
-#endif
-       unsigned int            num_members;
-       u16                     id;
-       u8                      type;
-       u8                      defrag;
-       atomic_t                rr_cur;
-       struct list_head        list;
-       struct sock             *arr[PACKET_FANOUT_MAX];
-       spinlock_t              lock;
-       atomic_t                sk_ref;
-       struct packet_type      prot_hook ____cacheline_aligned_in_smp;
-};
-
 struct packet_skb_cb {
        unsigned int origlen;
        union {
@@ -334,11 +226,6 @@ struct packet_skb_cb {
        (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
        ((x)->kactive_blk_num+1) : 0)
 
-static struct packet_sock *pkt_sk(struct sock *sk)
-{
-       return (struct packet_sock *)sk;
-}
-
 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
 static void __fanout_link(struct sock *sk, struct packet_sock *po);
 
@@ -968,7 +855,8 @@ static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
                ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
                ppd->tp_status = TP_STATUS_VLAN_VALID;
        } else {
-               ppd->hv1.tp_vlan_tci = ppd->tp_status = 0;
+               ppd->hv1.tp_vlan_tci = 0;
+               ppd->tp_status = TP_STATUS_AVAILABLE;
        }
 }
 
@@ -1243,7 +1131,8 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
        return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
 }
 
-static DEFINE_MUTEX(fanout_mutex);
+DEFINE_MUTEX(fanout_mutex);
+EXPORT_SYMBOL_GPL(fanout_mutex);
 static LIST_HEAD(fanout_list);
 
 static void __fanout_link(struct sock *sk, struct packet_sock *po)
@@ -1364,9 +1253,9 @@ static void fanout_release(struct sock *sk)
        if (!f)
                return;
 
+       mutex_lock(&fanout_mutex);
        po->fanout = NULL;
 
-       mutex_lock(&fanout_mutex);
        if (atomic_dec_and_test(&f->sk_ref)) {
                list_del(&f->list);
                dev_remove_pack(&f->prot_hook);
@@ -2063,7 +1952,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
        int tp_len, size_max;
        unsigned char *addr;
        int len_sum = 0;
-       int status = 0;
+       int status = TP_STATUS_AVAILABLE;
        int hlen, tlen;
 
        mutex_lock(&po->pg_vec_lock);
@@ -2428,10 +2317,13 @@ static int packet_release(struct socket *sock)
        net = sock_net(sk);
        po = pkt_sk(sk);
 
-       spin_lock_bh(&net->packet.sklist_lock);
+       mutex_lock(&net->packet.sklist_lock);
        sk_del_node_init_rcu(sk);
+       mutex_unlock(&net->packet.sklist_lock);
+
+       preempt_disable();
        sock_prot_inuse_add(net, sk->sk_prot, -1);
-       spin_unlock_bh(&net->packet.sklist_lock);
+       preempt_enable();
 
        spin_lock(&po->bind_lock);
        unregister_prot_hook(sk, false);
@@ -2630,10 +2522,13 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
                register_prot_hook(sk);
        }
 
-       spin_lock_bh(&net->packet.sklist_lock);
+       mutex_lock(&net->packet.sklist_lock);
        sk_add_node_rcu(sk, &net->packet.sklist);
+       mutex_unlock(&net->packet.sklist_lock);
+
+       preempt_disable();
        sock_prot_inuse_add(net, &packet_proto, 1);
-       spin_unlock_bh(&net->packet.sklist_lock);
+       preempt_enable();
 
        return 0;
 out:
@@ -3886,7 +3781,7 @@ static const struct file_operations packet_seq_fops = {
 
 static int __net_init packet_net_init(struct net *net)
 {
-       spin_lock_init(&net->packet.sklist_lock);
+       mutex_init(&net->packet.sklist_lock);
        INIT_HLIST_HEAD(&net->packet.sklist);
 
        if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
diff --git a/net/packet/diag.c b/net/packet/diag.c
new file mode 100644 (file)
index 0000000..8db6e21
--- /dev/null
@@ -0,0 +1,242 @@
+#include <linux/module.h>
+#include <linux/sock_diag.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/packet_diag.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+
+#include "internal.h"
+
+static int pdiag_put_info(const struct packet_sock *po, struct sk_buff *nlskb)
+{
+       struct packet_diag_info pinfo;
+
+       pinfo.pdi_index = po->ifindex;
+       pinfo.pdi_version = po->tp_version;
+       pinfo.pdi_reserve = po->tp_reserve;
+       pinfo.pdi_copy_thresh = po->copy_thresh;
+       pinfo.pdi_tstamp = po->tp_tstamp;
+
+       pinfo.pdi_flags = 0;
+       if (po->running)
+               pinfo.pdi_flags |= PDI_RUNNING;
+       if (po->auxdata)
+               pinfo.pdi_flags |= PDI_AUXDATA;
+       if (po->origdev)
+               pinfo.pdi_flags |= PDI_ORIGDEV;
+       if (po->has_vnet_hdr)
+               pinfo.pdi_flags |= PDI_VNETHDR;
+       if (po->tp_loss)
+               pinfo.pdi_flags |= PDI_LOSS;
+
+       return nla_put(nlskb, PACKET_DIAG_INFO, sizeof(pinfo), &pinfo);
+}
+
+static int pdiag_put_mclist(const struct packet_sock *po, struct sk_buff *nlskb)
+{
+       struct nlattr *mca;
+       struct packet_mclist *ml;
+
+       mca = nla_nest_start(nlskb, PACKET_DIAG_MCLIST);
+       if (!mca)
+               return -EMSGSIZE;
+
+       rtnl_lock();
+       for (ml = po->mclist; ml; ml = ml->next) {
+               struct packet_diag_mclist *dml;
+
+               dml = nla_reserve_nohdr(nlskb, sizeof(*dml));
+               if (!dml) {
+                       rtnl_unlock();
+                       nla_nest_cancel(nlskb, mca);
+                       return -EMSGSIZE;
+               }
+
+               dml->pdmc_index = ml->ifindex;
+               dml->pdmc_type = ml->type;
+               dml->pdmc_alen = ml->alen;
+               dml->pdmc_count = ml->count;
+               BUILD_BUG_ON(sizeof(dml->pdmc_addr) != sizeof(ml->addr));
+               memcpy(dml->pdmc_addr, ml->addr, sizeof(ml->addr));
+       }
+
+       rtnl_unlock();
+       nla_nest_end(nlskb, mca);
+
+       return 0;
+}
+
+static int pdiag_put_ring(struct packet_ring_buffer *ring, int ver, int nl_type,
+               struct sk_buff *nlskb)
+{
+       struct packet_diag_ring pdr;
+
+       if (!ring->pg_vec || ((ver > TPACKET_V2) &&
+                               (nl_type == PACKET_DIAG_TX_RING)))
+               return 0;
+
+       pdr.pdr_block_size = ring->pg_vec_pages << PAGE_SHIFT;
+       pdr.pdr_block_nr = ring->pg_vec_len;
+       pdr.pdr_frame_size = ring->frame_size;
+       pdr.pdr_frame_nr = ring->frame_max + 1;
+
+       if (ver > TPACKET_V2) {
+               pdr.pdr_retire_tmo = ring->prb_bdqc.retire_blk_tov;
+               pdr.pdr_sizeof_priv = ring->prb_bdqc.blk_sizeof_priv;
+               pdr.pdr_features = ring->prb_bdqc.feature_req_word;
+       } else {
+               pdr.pdr_retire_tmo = 0;
+               pdr.pdr_sizeof_priv = 0;
+               pdr.pdr_features = 0;
+       }
+
+       return nla_put(nlskb, nl_type, sizeof(pdr), &pdr);
+}
+
+static int pdiag_put_rings_cfg(struct packet_sock *po, struct sk_buff *skb)
+{
+       int ret;
+
+       mutex_lock(&po->pg_vec_lock);
+       ret = pdiag_put_ring(&po->rx_ring, po->tp_version,
+                       PACKET_DIAG_RX_RING, skb);
+       if (!ret)
+               ret = pdiag_put_ring(&po->tx_ring, po->tp_version,
+                               PACKET_DIAG_TX_RING, skb);
+       mutex_unlock(&po->pg_vec_lock);
+
+       return ret;
+}
+
+static int pdiag_put_fanout(struct packet_sock *po, struct sk_buff *nlskb)
+{
+       int ret = 0;
+
+       mutex_lock(&fanout_mutex);
+       if (po->fanout) {
+               u32 val;
+
+               val = (u32)po->fanout->id | ((u32)po->fanout->type << 16);
+               ret = nla_put_u32(nlskb, PACKET_DIAG_FANOUT, val);
+       }
+       mutex_unlock(&fanout_mutex);
+
+       return ret;
+}
+
+static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct packet_diag_req *req,
+               u32 portid, u32 seq, u32 flags, int sk_ino)
+{
+       struct nlmsghdr *nlh;
+       struct packet_diag_msg *rp;
+       struct packet_sock *po = pkt_sk(sk);
+
+       nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rp), flags);
+       if (!nlh)
+               return -EMSGSIZE;
+
+       rp = nlmsg_data(nlh);
+       rp->pdiag_family = AF_PACKET;
+       rp->pdiag_type = sk->sk_type;
+       rp->pdiag_num = ntohs(po->num);
+       rp->pdiag_ino = sk_ino;
+       sock_diag_save_cookie(sk, rp->pdiag_cookie);
+
+       if ((req->pdiag_show & PACKET_SHOW_INFO) &&
+                       pdiag_put_info(po, skb))
+               goto out_nlmsg_trim;
+
+       if ((req->pdiag_show & PACKET_SHOW_MCLIST) &&
+                       pdiag_put_mclist(po, skb))
+               goto out_nlmsg_trim;
+
+       if ((req->pdiag_show & PACKET_SHOW_RING_CFG) &&
+                       pdiag_put_rings_cfg(po, skb))
+               goto out_nlmsg_trim;
+
+       if ((req->pdiag_show & PACKET_SHOW_FANOUT) &&
+                       pdiag_put_fanout(po, skb))
+               goto out_nlmsg_trim;
+
+       return nlmsg_end(skb, nlh);
+
+out_nlmsg_trim:
+       nlmsg_cancel(skb, nlh);
+       return -EMSGSIZE;
+}
+
+static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       int num = 0, s_num = cb->args[0];
+       struct packet_diag_req *req;
+       struct net *net;
+       struct sock *sk;
+       struct hlist_node *node;
+
+       net = sock_net(skb->sk);
+       req = nlmsg_data(cb->nlh);
+
+       mutex_lock(&net->packet.sklist_lock);
+       sk_for_each(sk, node, &net->packet.sklist) {
+               if (!net_eq(sock_net(sk), net))
+                       continue;
+               if (num < s_num)
+                       goto next;
+
+               if (sk_diag_fill(sk, skb, req, NETLINK_CB(cb->skb).portid,
+                                       cb->nlh->nlmsg_seq, NLM_F_MULTI,
+                                       sock_i_ino(sk)) < 0)
+                       goto done;
+next:
+               num++;
+       }
+done:
+       mutex_unlock(&net->packet.sklist_lock);
+       cb->args[0] = num;
+
+       return skb->len;
+}
+
+static int packet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
+{
+       int hdrlen = sizeof(struct packet_diag_req);
+       struct net *net = sock_net(skb->sk);
+       struct packet_diag_req *req;
+
+       if (nlmsg_len(h) < hdrlen)
+               return -EINVAL;
+
+       req = nlmsg_data(h);
+       /* Make it possible to support protocol filtering later */
+       if (req->sdiag_protocol)
+               return -EINVAL;
+
+       if (h->nlmsg_flags & NLM_F_DUMP) {
+               struct netlink_dump_control c = {
+                       .dump = packet_diag_dump,
+               };
+               return netlink_dump_start(net->diag_nlsk, skb, h, &c);
+       } else
+               return -EOPNOTSUPP;
+}
+
+static const struct sock_diag_handler packet_diag_handler = {
+       .family = AF_PACKET,
+       .dump = packet_diag_handler_dump,
+};
+
+static int __init packet_diag_init(void)
+{
+       return sock_diag_register(&packet_diag_handler);
+}
+
+static void __exit packet_diag_exit(void)
+{
+       sock_diag_unregister(&packet_diag_handler);
+}
+
+module_init(packet_diag_init);
+module_exit(packet_diag_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 17 /* AF_PACKET */);
diff --git a/net/packet/internal.h b/net/packet/internal.h
new file mode 100644 (file)
index 0000000..44945f6
--- /dev/null
@@ -0,0 +1,121 @@
+#ifndef __PACKET_INTERNAL_H__
+#define __PACKET_INTERNAL_H__
+
+struct packet_mclist {
+       struct packet_mclist    *next;
+       int                     ifindex;
+       int                     count;
+       unsigned short          type;
+       unsigned short          alen;
+       unsigned char           addr[MAX_ADDR_LEN];
+};
+
+/* kbdq - kernel block descriptor queue */
+struct tpacket_kbdq_core {
+       struct pgv      *pkbdq;
+       unsigned int    feature_req_word;
+       unsigned int    hdrlen;
+       unsigned char   reset_pending_on_curr_blk;
+       unsigned char   delete_blk_timer;
+       unsigned short  kactive_blk_num;
+       unsigned short  blk_sizeof_priv;
+
+       /* last_kactive_blk_num:
+        * trick to see if user-space has caught up
+        * in order to avoid refreshing timer when every single pkt arrives.
+        */
+       unsigned short  last_kactive_blk_num;
+
+       char            *pkblk_start;
+       char            *pkblk_end;
+       int             kblk_size;
+       unsigned int    knum_blocks;
+       uint64_t        knxt_seq_num;
+       char            *prev;
+       char            *nxt_offset;
+       struct sk_buff  *skb;
+
+       atomic_t        blk_fill_in_prog;
+
+       /* Default is set to 8ms */
+#define DEFAULT_PRB_RETIRE_TOV (8)
+
+       unsigned short  retire_blk_tov;
+       unsigned short  version;
+       unsigned long   tov_in_jiffies;
+
+       /* timer to retire an outstanding block */
+       struct timer_list retire_blk_timer;
+};
+
+struct pgv {
+       char *buffer;
+};
+
+struct packet_ring_buffer {
+       struct pgv              *pg_vec;
+       unsigned int            head;
+       unsigned int            frames_per_block;
+       unsigned int            frame_size;
+       unsigned int            frame_max;
+
+       unsigned int            pg_vec_order;
+       unsigned int            pg_vec_pages;
+       unsigned int            pg_vec_len;
+
+       struct tpacket_kbdq_core        prb_bdqc;
+       atomic_t                pending;
+};
+
+extern struct mutex fanout_mutex;
+#define PACKET_FANOUT_MAX      256
+
+struct packet_fanout {
+#ifdef CONFIG_NET_NS
+       struct net              *net;
+#endif
+       unsigned int            num_members;
+       u16                     id;
+       u8                      type;
+       u8                      defrag;
+       atomic_t                rr_cur;
+       struct list_head        list;
+       struct sock             *arr[PACKET_FANOUT_MAX];
+       spinlock_t              lock;
+       atomic_t                sk_ref;
+       struct packet_type      prot_hook ____cacheline_aligned_in_smp;
+};
+
+struct packet_sock {
+       /* struct sock has to be the first member of packet_sock */
+       struct sock             sk;
+       struct packet_fanout    *fanout;
+       struct tpacket_stats    stats;
+       union  tpacket_stats_u  stats_u;
+       struct packet_ring_buffer       rx_ring;
+       struct packet_ring_buffer       tx_ring;
+       int                     copy_thresh;
+       spinlock_t              bind_lock;
+       struct mutex            pg_vec_lock;
+       unsigned int            running:1,      /* prot_hook is attached*/
+                               auxdata:1,
+                               origdev:1,
+                               has_vnet_hdr:1;
+       int                     ifindex;        /* bound device         */
+       __be16                  num;
+       struct packet_mclist    *mclist;
+       atomic_t                mapped;
+       enum tpacket_versions   tp_version;
+       unsigned int            tp_hdrlen;
+       unsigned int            tp_reserve;
+       unsigned int            tp_loss:1;
+       unsigned int            tp_tstamp;
+       struct packet_type      prot_hook ____cacheline_aligned_in_smp;
+};
+
+static struct packet_sock *pkt_sk(struct sock *sk)
+{
+       return (struct packet_sock *)sk;
+}
+
+#endif
index 7dd762a464e55f9ef7f41c83b433aa81b6f4d7cf..83a8389619aa7ccc0a490a8f8b664200bee53df9 100644 (file)
@@ -33,7 +33,7 @@
 /* Device address handling */
 
 static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr,
-                    u32 pid, u32 seq, int event);
+                    u32 portid, u32 seq, int event);
 
 void phonet_address_notify(int event, struct net_device *dev, u8 addr)
 {
@@ -101,12 +101,12 @@ static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *attr)
 }
 
 static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr,
-                       u32 pid, u32 seq, int event)
+                       u32 portid, u32 seq, int event)
 {
        struct ifaddrmsg *ifm;
        struct nlmsghdr *nlh;
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), 0);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), 0);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -148,7 +148,7 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
                                continue;
 
                        if (fill_addr(skb, pnd->netdev, addr << 2,
-                                        NETLINK_CB(cb->skb).pid,
+                                        NETLINK_CB(cb->skb).portid,
                                        cb->nlh->nlmsg_seq, RTM_NEWADDR) < 0)
                                goto out;
                }
@@ -165,12 +165,12 @@ out:
 /* Routes handling */
 
 static int fill_route(struct sk_buff *skb, struct net_device *dev, u8 dst,
-                       u32 pid, u32 seq, int event)
+                       u32 portid, u32 seq, int event)
 {
        struct rtmsg *rtm;
        struct nlmsghdr *nlh;
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), 0);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), 0);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -276,7 +276,7 @@ static int route_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
 
                if (addr_idx++ < addr_start_idx)
                        continue;
-               if (fill_route(skb, dev, addr << 2, NETLINK_CB(cb->skb).pid,
+               if (fill_route(skb, dev, addr << 2, NETLINK_CB(cb->skb).portid,
                                cb->nlh->nlmsg_seq, RTM_NEWROUTE))
                        goto out;
        }
index af95c8e058fc0d45096234aa8aeb7c8da6fda175..a65ee78db0c54e1062186b9b132fe46fac380c91 100644 (file)
@@ -43,7 +43,7 @@ void rds_tcp_state_change(struct sock *sk)
        struct rds_connection *conn;
        struct rds_tcp_connection *tc;
 
-       read_lock_bh(&sk->sk_callback_lock);
+       read_lock(&sk->sk_callback_lock);
        conn = sk->sk_user_data;
        if (!conn) {
                state_change = sk->sk_state_change;
@@ -68,7 +68,7 @@ void rds_tcp_state_change(struct sock *sk)
                        break;
        }
 out:
-       read_unlock_bh(&sk->sk_callback_lock);
+       read_unlock(&sk->sk_callback_lock);
        state_change(sk);
 }
 
index 72981375f47cc90cba0aaac80952bec3b0636bb4..7787537e9c2e95fffbcc8f89d594fc6de6ad52fe 100644 (file)
@@ -114,7 +114,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
 
        rdsdebug("listen data ready sk %p\n", sk);
 
-       read_lock_bh(&sk->sk_callback_lock);
+       read_lock(&sk->sk_callback_lock);
        ready = sk->sk_user_data;
        if (!ready) { /* check for teardown race */
                ready = sk->sk_data_ready;
@@ -131,7 +131,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
                queue_work(rds_wq, &rds_tcp_listen_work);
 
 out:
-       read_unlock_bh(&sk->sk_callback_lock);
+       read_unlock(&sk->sk_callback_lock);
        ready(sk, bytes);
 }
 
index 6243258f840f0e79dace450b97e3914e1badbacc..4fac4f2bb9dccd11f304aa6dd94f8852e91a5b53 100644 (file)
@@ -322,7 +322,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
 
        rdsdebug("data ready sk %p bytes %d\n", sk, bytes);
 
-       read_lock_bh(&sk->sk_callback_lock);
+       read_lock(&sk->sk_callback_lock);
        conn = sk->sk_user_data;
        if (!conn) { /* check for teardown race */
                ready = sk->sk_data_ready;
@@ -336,7 +336,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
        if (rds_tcp_read_sock(conn, GFP_ATOMIC) == -ENOMEM)
                queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
 out:
-       read_unlock_bh(&sk->sk_callback_lock);
+       read_unlock(&sk->sk_callback_lock);
        ready(sk, bytes);
 }
 
index 1b4fd68f0c7c4db1f97859f407cded3a1e4f8144..81cf5a4c5e40c3c50b98c6694edd265a37e13b51 100644 (file)
@@ -174,7 +174,7 @@ void rds_tcp_write_space(struct sock *sk)
        struct rds_connection *conn;
        struct rds_tcp_connection *tc;
 
-       read_lock_bh(&sk->sk_callback_lock);
+       read_lock(&sk->sk_callback_lock);
        conn = sk->sk_user_data;
        if (!conn) {
                write_space = sk->sk_write_space;
@@ -194,7 +194,7 @@ void rds_tcp_write_space(struct sock *sk)
                queue_delayed_work(rds_wq, &conn->c_send_w, 0);
 
 out:
-       read_unlock_bh(&sk->sk_callback_lock);
+       read_unlock(&sk->sk_callback_lock);
 
        /*
         * write_space is only called when data leaves tcp's send queue if
index 752b72360ebcb5736c849201aa6ed00361714ed9..a5c95274127990b34de5af72f82e9135cf73aa85 100644 (file)
@@ -150,6 +150,20 @@ static void rfkill_led_trigger_activate(struct led_classdev *led)
        rfkill_led_trigger_event(rfkill);
 }
 
+const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
+{
+       return rfkill->led_trigger.name;
+}
+EXPORT_SYMBOL(rfkill_get_led_trigger_name);
+
+void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
+{
+       BUG_ON(!rfkill);
+
+       rfkill->ledtrigname = name;
+}
+EXPORT_SYMBOL(rfkill_set_led_trigger_name);
+
 static int rfkill_led_trigger_register(struct rfkill *rfkill)
 {
        rfkill->led_trigger.name = rfkill->ledtrigname
@@ -256,6 +270,7 @@ static bool __rfkill_set_hw_state(struct rfkill *rfkill,
 static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
 {
        unsigned long flags;
+       bool prev, curr;
        int err;
 
        if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
@@ -270,6 +285,8 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
                rfkill->ops->query(rfkill, rfkill->data);
 
        spin_lock_irqsave(&rfkill->lock, flags);
+       prev = rfkill->state & RFKILL_BLOCK_SW;
+
        if (rfkill->state & RFKILL_BLOCK_SW)
                rfkill->state |= RFKILL_BLOCK_SW_PREV;
        else
@@ -299,10 +316,13 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
        }
        rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL;
        rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
+       curr = rfkill->state & RFKILL_BLOCK_SW;
        spin_unlock_irqrestore(&rfkill->lock, flags);
 
        rfkill_led_trigger_event(rfkill);
-       rfkill_event(rfkill);
+
+       if (prev != curr)
+               rfkill_event(rfkill);
 }
 
 #ifdef CONFIG_RFKILL_INPUT
index e3d2c78cb52c9aebf735fa01c1712203216a920e..102761d294cbe7b470de479be5f07b7c5d4d69ed 100644 (file)
@@ -644,7 +644,7 @@ errout:
 }
 
 static int
-tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq,
+tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 portid, u32 seq,
             u16 flags, int event, int bind, int ref)
 {
        struct tcamsg *t;
@@ -652,7 +652,7 @@ tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq,
        unsigned char *b = skb_tail_pointer(skb);
        struct nlattr *nest;
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*t), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
        if (!nlh)
                goto out_nlmsg_trim;
        t = nlmsg_data(nlh);
@@ -678,7 +678,7 @@ out_nlmsg_trim:
 }
 
 static int
-act_get_notify(struct net *net, u32 pid, struct nlmsghdr *n,
+act_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
               struct tc_action *a, int event)
 {
        struct sk_buff *skb;
@@ -686,16 +686,16 @@ act_get_notify(struct net *net, u32 pid, struct nlmsghdr *n,
        skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
        if (!skb)
                return -ENOBUFS;
-       if (tca_get_fill(skb, a, pid, n->nlmsg_seq, 0, event, 0, 0) <= 0) {
+       if (tca_get_fill(skb, a, portid, n->nlmsg_seq, 0, event, 0, 0) <= 0) {
                kfree_skb(skb);
                return -EINVAL;
        }
 
-       return rtnl_unicast(skb, net, pid);
+       return rtnl_unicast(skb, net, portid);
 }
 
 static struct tc_action *
-tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
+tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 portid)
 {
        struct nlattr *tb[TCA_ACT_MAX + 1];
        struct tc_action *a;
@@ -762,7 +762,7 @@ static struct tc_action *create_a(int i)
 }
 
 static int tca_action_flush(struct net *net, struct nlattr *nla,
-                           struct nlmsghdr *n, u32 pid)
+                           struct nlmsghdr *n, u32 portid)
 {
        struct sk_buff *skb;
        unsigned char *b;
@@ -799,7 +799,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
        if (a->ops == NULL)
                goto err_out;
 
-       nlh = nlmsg_put(skb, pid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t), 0);
+       nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t), 0);
        if (!nlh)
                goto out_module_put;
        t = nlmsg_data(nlh);
@@ -823,7 +823,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
        nlh->nlmsg_flags |= NLM_F_ROOT;
        module_put(a->ops->owner);
        kfree(a);
-       err = rtnetlink_send(skb, net, pid, RTNLGRP_TC,
+       err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
                             n->nlmsg_flags & NLM_F_ECHO);
        if (err > 0)
                return 0;
@@ -841,7 +841,7 @@ noflush_out:
 
 static int
 tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
-             u32 pid, int event)
+             u32 portid, int event)
 {
        int i, ret;
        struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
@@ -853,13 +853,13 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
 
        if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
                if (tb[1] != NULL)
-                       return tca_action_flush(net, tb[1], n, pid);
+                       return tca_action_flush(net, tb[1], n, portid);
                else
                        return -EINVAL;
        }
 
        for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
-               act = tcf_action_get_1(tb[i], n, pid);
+               act = tcf_action_get_1(tb[i], n, portid);
                if (IS_ERR(act)) {
                        ret = PTR_ERR(act);
                        goto err;
@@ -874,7 +874,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
        }
 
        if (event == RTM_GETACTION)
-               ret = act_get_notify(net, pid, n, head, event);
+               ret = act_get_notify(net, portid, n, head, event);
        else { /* delete */
                struct sk_buff *skb;
 
@@ -884,7 +884,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
                        goto err;
                }
 
-               if (tca_get_fill(skb, head, pid, n->nlmsg_seq, 0, event,
+               if (tca_get_fill(skb, head, portid, n->nlmsg_seq, 0, event,
                                 0, 1) <= 0) {
                        kfree_skb(skb);
                        ret = -EINVAL;
@@ -893,7 +893,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
 
                /* now do the delete */
                tcf_action_destroy(head, 0);
-               ret = rtnetlink_send(skb, net, pid, RTNLGRP_TC,
+               ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
                                     n->nlmsg_flags & NLM_F_ECHO);
                if (ret > 0)
                        return 0;
@@ -905,7 +905,7 @@ err:
 }
 
 static int tcf_add_notify(struct net *net, struct tc_action *a,
-                         u32 pid, u32 seq, int event, u16 flags)
+                         u32 portid, u32 seq, int event, u16 flags)
 {
        struct tcamsg *t;
        struct nlmsghdr *nlh;
@@ -920,7 +920,7 @@ static int tcf_add_notify(struct net *net, struct tc_action *a,
 
        b = skb_tail_pointer(skb);
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*t), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
        if (!nlh)
                goto out_kfree_skb;
        t = nlmsg_data(nlh);
@@ -940,7 +940,7 @@ static int tcf_add_notify(struct net *net, struct tc_action *a,
        nlh->nlmsg_len = skb_tail_pointer(skb) - b;
        NETLINK_CB(skb).dst_group = RTNLGRP_TC;
 
-       err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags & NLM_F_ECHO);
+       err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
        if (err > 0)
                err = 0;
        return err;
@@ -953,7 +953,7 @@ out_kfree_skb:
 
 static int
 tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
-              u32 pid, int ovr)
+              u32 portid, int ovr)
 {
        int ret = 0;
        struct tc_action *act;
@@ -971,7 +971,7 @@ tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
        /* dump then free all the actions after update; inserted policy
         * stays intact
         */
-       ret = tcf_add_notify(net, act, pid, seq, RTM_NEWACTION, n->nlmsg_flags);
+       ret = tcf_add_notify(net, act, portid, seq, RTM_NEWACTION, n->nlmsg_flags);
        for (a = act; a; a = act) {
                act = a->next;
                kfree(a);
@@ -984,7 +984,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
 {
        struct net *net = sock_net(skb->sk);
        struct nlattr *tca[TCA_ACT_MAX + 1];
-       u32 pid = skb ? NETLINK_CB(skb).pid : 0;
+       u32 portid = skb ? NETLINK_CB(skb).portid : 0;
        int ret = 0, ovr = 0;
 
        ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL);
@@ -1008,17 +1008,17 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
                if (n->nlmsg_flags & NLM_F_REPLACE)
                        ovr = 1;
 replay:
-               ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, pid, ovr);
+               ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr);
                if (ret == -EAGAIN)
                        goto replay;
                break;
        case RTM_DELACTION:
                ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
-                                   pid, RTM_DELACTION);
+                                   portid, RTM_DELACTION);
                break;
        case RTM_GETACTION:
                ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
-                                   pid, RTM_GETACTION);
+                                   portid, RTM_GETACTION);
                break;
        default:
                BUG();
@@ -1085,7 +1085,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
                goto out_module_put;
        }
 
-       nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
+       nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
                        cb->nlh->nlmsg_type, sizeof(*t), 0);
        if (!nlh)
                goto out_module_put;
@@ -1109,7 +1109,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
                nla_nest_cancel(skb, nest);
 
        nlh->nlmsg_len = skb_tail_pointer(skb) - b;
-       if (NETLINK_CB(cb->skb).pid && ret)
+       if (NETLINK_CB(cb->skb).portid && ret)
                nlh->nlmsg_flags |= NLM_F_MULTI;
        module_put(a_o->owner);
        return skb->len;
index dc3ef5aef3559a4e4dd0f490230789e0e440e8f3..7ae02892437c25bf1e925d6dc756ed23aabff1a3 100644 (file)
@@ -343,13 +343,13 @@ errout:
 }
 
 static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp,
-                        unsigned long fh, u32 pid, u32 seq, u16 flags, int event)
+                        unsigned long fh, u32 portid, u32 seq, u16 flags, int event)
 {
        struct tcmsg *tcm;
        struct nlmsghdr  *nlh;
        unsigned char *b = skb_tail_pointer(skb);
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*tcm), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
        if (!nlh)
                goto out_nlmsg_trim;
        tcm = nlmsg_data(nlh);
@@ -381,18 +381,18 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
                          unsigned long fh, int event)
 {
        struct sk_buff *skb;
-       u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
+       u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
 
        skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
        if (!skb)
                return -ENOBUFS;
 
-       if (tcf_fill_node(skb, tp, fh, pid, n->nlmsg_seq, 0, event) <= 0) {
+       if (tcf_fill_node(skb, tp, fh, portid, n->nlmsg_seq, 0, event) <= 0) {
                kfree_skb(skb);
                return -EINVAL;
        }
 
-       return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
+       return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
                              n->nlmsg_flags & NLM_F_ECHO);
 }
 
@@ -407,7 +407,7 @@ static int tcf_node_dump(struct tcf_proto *tp, unsigned long n,
 {
        struct tcf_dump_args *a = (void *)arg;
 
-       return tcf_fill_node(a->skb, tp, n, NETLINK_CB(a->cb->skb).pid,
+       return tcf_fill_node(a->skb, tp, n, NETLINK_CB(a->cb->skb).portid,
                             a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER);
 }
 
@@ -465,7 +465,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
                if (t > s_t)
                        memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
                if (cb->args[1] == 0) {
-                       if (tcf_fill_node(skb, tp, 0, NETLINK_CB(cb->skb).pid,
+                       if (tcf_fill_node(skb, tp, 0, NETLINK_CB(cb->skb).portid,
                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                          RTM_NEWTFILTER) <= 0)
                                break;
index 4ab6e33255736b90374d3dc53da60e79c277ebb9..7c3de6ffa5164db0f7abd3f0e2cc0ca2e92ecda9 100644 (file)
@@ -461,7 +461,7 @@ META_COLLECTOR(int_sk_sndtimeo)
 META_COLLECTOR(int_sk_sendmsg_off)
 {
        SKIP_NONLOCAL(skb);
-       dst->value = skb->sk->sk_sndmsg_off;
+       dst->value = skb->sk->sk_frag.offset;
 }
 
 META_COLLECTOR(int_sk_write_pend)
index a08b4ab3e421da67538f019cd6d7ec19f49fb12d..a18d975db59cea34eb0558490deb800f24c10d22 100644 (file)
@@ -1185,7 +1185,7 @@ graft:
 }
 
 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
-                        u32 pid, u32 seq, u16 flags, int event)
+                        u32 portid, u32 seq, u16 flags, int event)
 {
        struct tcmsg *tcm;
        struct nlmsghdr  *nlh;
@@ -1193,7 +1193,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
        struct gnet_dump d;
        struct qdisc_size_table *stab;
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*tcm), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
        if (!nlh)
                goto out_nlmsg_trim;
        tcm = nlmsg_data(nlh);
@@ -1248,25 +1248,25 @@ static int qdisc_notify(struct net *net, struct sk_buff *oskb,
                        struct Qdisc *old, struct Qdisc *new)
 {
        struct sk_buff *skb;
-       u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
+       u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
 
        skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
        if (!skb)
                return -ENOBUFS;
 
        if (old && !tc_qdisc_dump_ignore(old)) {
-               if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq,
+               if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
                                  0, RTM_DELQDISC) < 0)
                        goto err_out;
        }
        if (new && !tc_qdisc_dump_ignore(new)) {
-               if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq,
+               if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
                                  old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
                        goto err_out;
        }
 
        if (skb->len)
-               return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
+               return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
                                      n->nlmsg_flags & NLM_F_ECHO);
 
 err_out:
@@ -1289,7 +1289,7 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
                q_idx++;
        } else {
                if (!tc_qdisc_dump_ignore(q) &&
-                   tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
+                   tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
                                  cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
                        goto done;
                q_idx++;
@@ -1300,7 +1300,7 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
                        continue;
                }
                if (!tc_qdisc_dump_ignore(q) &&
-                   tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
+                   tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
                                  cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
                        goto done;
                q_idx++;
@@ -1375,7 +1375,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
        const struct Qdisc_class_ops *cops;
        unsigned long cl = 0;
        unsigned long new_cl;
-       u32 pid = tcm->tcm_parent;
+       u32 portid = tcm->tcm_parent;
        u32 clid = tcm->tcm_handle;
        u32 qid = TC_H_MAJ(clid);
        int err;
@@ -1403,8 +1403,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
 
        /* Step 1. Determine qdisc handle X:0 */
 
-       if (pid != TC_H_ROOT) {
-               u32 qid1 = TC_H_MAJ(pid);
+       if (portid != TC_H_ROOT) {
+               u32 qid1 = TC_H_MAJ(portid);
 
                if (qid && qid1) {
                        /* If both majors are known, they must be identical. */
@@ -1418,10 +1418,10 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
                /* Now qid is genuine qdisc handle consistent
                 * both with parent and child.
                 *
-                * TC_H_MAJ(pid) still may be unspecified, complete it now.
+                * TC_H_MAJ(portid) still may be unspecified, complete it now.
                 */
-               if (pid)
-                       pid = TC_H_MAKE(qid, pid);
+               if (portid)
+                       portid = TC_H_MAKE(qid, portid);
        } else {
                if (qid == 0)
                        qid = dev->qdisc->handle;
@@ -1439,7 +1439,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
 
        /* Now try to get class */
        if (clid == 0) {
-               if (pid == TC_H_ROOT)
+               if (portid == TC_H_ROOT)
                        clid = qid;
        } else
                clid = TC_H_MAKE(qid, clid);
@@ -1478,7 +1478,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
        new_cl = cl;
        err = -EOPNOTSUPP;
        if (cops->change)
-               err = cops->change(q, clid, pid, tca, &new_cl);
+               err = cops->change(q, clid, portid, tca, &new_cl);
        if (err == 0)
                tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
 
@@ -1492,7 +1492,7 @@ out:
 
 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
                          unsigned long cl,
-                         u32 pid, u32 seq, u16 flags, int event)
+                         u32 portid, u32 seq, u16 flags, int event)
 {
        struct tcmsg *tcm;
        struct nlmsghdr  *nlh;
@@ -1500,7 +1500,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
        struct gnet_dump d;
        const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*tcm), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
        if (!nlh)
                goto out_nlmsg_trim;
        tcm = nlmsg_data(nlh);
@@ -1540,18 +1540,18 @@ static int tclass_notify(struct net *net, struct sk_buff *oskb,
                         unsigned long cl, int event)
 {
        struct sk_buff *skb;
-       u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
+       u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
 
        skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
        if (!skb)
                return -ENOBUFS;
 
-       if (tc_fill_tclass(skb, q, cl, pid, n->nlmsg_seq, 0, event) < 0) {
+       if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
                kfree_skb(skb);
                return -EINVAL;
        }
 
-       return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
+       return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
                              n->nlmsg_flags & NLM_F_ECHO);
 }
 
@@ -1565,7 +1565,7 @@ static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walk
 {
        struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
 
-       return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).pid,
+       return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
                              a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
 }
 
index 9ce0b4fe23ffcb95db43bd11f0f45fe1f4edf65f..71e50c80315fe00437461f59aee17a2b5b2b09b1 100644 (file)
@@ -352,7 +352,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
        struct drr_sched *q = qdisc_priv(sch);
        struct drr_class *cl;
-       int err;
+       int err = 0;
 
        cl = drr_classify(skb, sch, &err);
        if (cl == NULL) {
index 511323e89cecb221f9650d9d35e13c20d5d54b29..aefc1504dc88f8b07963c390417f6dbf26544fa5 100644 (file)
@@ -324,24 +324,6 @@ void netif_carrier_off(struct net_device *dev)
 }
 EXPORT_SYMBOL(netif_carrier_off);
 
-/**
- *     netif_notify_peers - notify network peers about existence of @dev
- *     @dev: network device
- *
- * Generate traffic such that interested network peers are aware of
- * @dev, such as by generating a gratuitous ARP. This may be used when
- * a device wants to inform the rest of the network about some sort of
- * reconfiguration such as a failover event or virtual machine
- * migration.
- */
-void netif_notify_peers(struct net_device *dev)
-{
-       rtnl_lock();
-       call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
-       rtnl_unlock();
-}
-EXPORT_SYMBOL(netif_notify_peers);
-
 /* "NOOP" scheduler: the best scheduler, recommended for all interfaces
    under all circumstances. It is difficult to invent anything faster or
    cheaper.
@@ -545,6 +527,8 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = {
 };
 EXPORT_SYMBOL(pfifo_fast_ops);
 
+static struct lock_class_key qdisc_tx_busylock;
+
 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
                          struct Qdisc_ops *ops)
 {
@@ -552,6 +536,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
        struct Qdisc *sch;
        unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size;
        int err = -ENOBUFS;
+       struct net_device *dev = dev_queue->dev;
 
        p = kzalloc_node(size, GFP_KERNEL,
                         netdev_queue_numa_node_read(dev_queue));
@@ -571,12 +556,16 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
        }
        INIT_LIST_HEAD(&sch->list);
        skb_queue_head_init(&sch->q);
+
        spin_lock_init(&sch->busylock);
+       lockdep_set_class(&sch->busylock,
+                         dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
+
        sch->ops = ops;
        sch->enqueue = ops->enqueue;
        sch->dequeue = ops->dequeue;
        sch->dev_queue = dev_queue;
-       dev_hold(qdisc_dev(sch));
+       dev_hold(dev);
        atomic_set(&sch->refcnt, 1);
 
        return sch;
index 211a212170451c41f352f087dc9e0b3af9e8ae2c..f0dd83cff90652dc870f5bc720f9ae1a404b8931 100644 (file)
@@ -881,7 +881,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
        struct qfq_sched *q = qdisc_priv(sch);
        struct qfq_class *cl;
-       int err;
+       int err = 0;
 
        cl = qfq_classify(skb, sch, &err);
        if (cl == NULL) {
index ebaef3ed6065bee6d49880cde701ee49b26585e4..b1ef3bc301a5ad424fb041c0f6f37c010098bcc3 100644 (file)
@@ -82,6 +82,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
                                          sctp_scope_t scope,
                                          gfp_t gfp)
 {
+       struct net *net = sock_net(sk);
        struct sctp_sock *sp;
        int i;
        sctp_paramhdr_t *p;
@@ -124,7 +125,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
         * socket values.
         */
        asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
-       asoc->pf_retrans  = sctp_pf_retrans;
+       asoc->pf_retrans  = net->sctp.pf_retrans;
 
        asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
        asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
@@ -175,7 +176,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
        asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
        asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
        asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
-               min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ;
+               min_t(unsigned long, sp->autoclose, net->sctp.max_autoclose) * HZ;
 
        /* Initializes the timers */
        for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
@@ -281,7 +282,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
         * and will revert old behavior.
         */
        asoc->peer.asconf_capable = 0;
-       if (sctp_addip_noauth)
+       if (net->sctp.addip_noauth)
                asoc->peer.asconf_capable = 1;
        asoc->asconf_addr_del_pending = NULL;
        asoc->src_out_of_asoc_ok = 0;
@@ -641,6 +642,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
                                           const gfp_t gfp,
                                           const int peer_state)
 {
+       struct net *net = sock_net(asoc->base.sk);
        struct sctp_transport *peer;
        struct sctp_sock *sp;
        unsigned short port;
@@ -674,7 +676,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
                return peer;
        }
 
-       peer = sctp_transport_new(addr, gfp);
+       peer = sctp_transport_new(net, addr, gfp);
        if (!peer)
                return NULL;
 
@@ -1089,13 +1091,15 @@ out:
 
 /* Is this the association we are looking for? */
 struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc,
+                                          struct net *net,
                                           const union sctp_addr *laddr,
                                           const union sctp_addr *paddr)
 {
        struct sctp_transport *transport;
 
        if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) &&
-           (htons(asoc->peer.port) == paddr->v4.sin_port)) {
+           (htons(asoc->peer.port) == paddr->v4.sin_port) &&
+           net_eq(sock_net(asoc->base.sk), net)) {
                transport = sctp_assoc_lookup_paddr(asoc, paddr);
                if (!transport)
                        goto out;
@@ -1116,6 +1120,7 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
        struct sctp_association *asoc =
                container_of(work, struct sctp_association,
                             base.inqueue.immediate);
+       struct net *net = sock_net(asoc->base.sk);
        struct sctp_endpoint *ep;
        struct sctp_chunk *chunk;
        struct sctp_inq *inqueue;
@@ -1148,13 +1153,13 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
                if (sctp_chunk_is_data(chunk))
                        asoc->peer.last_data_from = chunk->transport;
                else
-                       SCTP_INC_STATS(SCTP_MIB_INCTRLCHUNKS);
+                       SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS);
 
                if (chunk->transport)
                        chunk->transport->last_time_heard = jiffies;
 
                /* Run through the state machine. */
-               error = sctp_do_sm(SCTP_EVENT_T_CHUNK, subtype,
+               error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype,
                                   state, ep, asoc, chunk, GFP_ATOMIC);
 
                /* Check to see if the association is freed in response to
@@ -1414,6 +1419,7 @@ void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
 /* Should we send a SACK to update our peer? */
 static inline int sctp_peer_needs_update(struct sctp_association *asoc)
 {
+       struct net *net = sock_net(asoc->base.sk);
        switch (asoc->state) {
        case SCTP_STATE_ESTABLISHED:
        case SCTP_STATE_SHUTDOWN_PENDING:
@@ -1421,7 +1427,7 @@ static inline int sctp_peer_needs_update(struct sctp_association *asoc)
        case SCTP_STATE_SHUTDOWN_SENT:
                if ((asoc->rwnd > asoc->a_rwnd) &&
                    ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
-                          (asoc->base.sk->sk_rcvbuf >> sctp_rwnd_upd_shift),
+                          (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift),
                           asoc->pathmtu)))
                        return 1;
                break;
@@ -1542,7 +1548,8 @@ int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
        if (asoc->peer.ipv6_address)
                flags |= SCTP_ADDR6_PEERSUPP;
 
-       return sctp_bind_addr_copy(&asoc->base.bind_addr,
+       return sctp_bind_addr_copy(sock_net(asoc->base.sk),
+                                  &asoc->base.bind_addr,
                                   &asoc->ep->base.bind_addr,
                                   scope, gfp, flags);
 }
index bf812048cf6f7a244c547e0cd31a731351abfab3..159b9bc5d63300e53560cf6495f8f65b9fd06449 100644 (file)
@@ -392,13 +392,14 @@ nomem:
  */
 int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp)
 {
+       struct net *net = sock_net(asoc->base.sk);
        struct sctp_auth_bytes  *secret;
        struct sctp_shared_key *ep_key;
 
        /* If we don't support AUTH, or peer is not capable
         * we don't need to do anything.
         */
-       if (!sctp_auth_enable || !asoc->peer.auth_capable)
+       if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
                return 0;
 
        /* If the key_id is non-zero and we couldn't find an
@@ -445,11 +446,12 @@ struct sctp_shared_key *sctp_auth_get_shkey(
  */
 int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp)
 {
+       struct net *net = sock_net(ep->base.sk);
        struct crypto_hash *tfm = NULL;
        __u16   id;
 
        /* if the transforms are already allocted, we are done */
-       if (!sctp_auth_enable) {
+       if (!net->sctp.auth_enable) {
                ep->auth_hmacs = NULL;
                return 0;
        }
@@ -674,7 +676,12 @@ static int __sctp_auth_cid(sctp_cid_t chunk, struct sctp_chunks_param *param)
 /* Check if peer requested that this chunk is authenticated */
 int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc)
 {
-       if (!sctp_auth_enable || !asoc || !asoc->peer.auth_capable)
+       struct net  *net;
+       if (!asoc)
+               return 0;
+
+       net = sock_net(asoc->base.sk);
+       if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
                return 0;
 
        return __sctp_auth_cid(chunk, asoc->peer.peer_chunks);
@@ -683,7 +690,12 @@ int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc)
 /* Check if we requested that peer authenticate this chunk. */
 int sctp_auth_recv_cid(sctp_cid_t chunk, const struct sctp_association *asoc)
 {
-       if (!sctp_auth_enable || !asoc)
+       struct net *net;
+       if (!asoc)
+               return 0;
+
+       net = sock_net(asoc->base.sk);
+       if (!net->sctp.auth_enable)
                return 0;
 
        return __sctp_auth_cid(chunk,
index 4ece451c8d27d59ba2a73fba1baf6cb8ca19fe4e..d886b3bf84f5a1823e208d3c784db05a6c88938a 100644 (file)
@@ -52,8 +52,8 @@
 #include <net/sctp/sm.h>
 
 /* Forward declarations for internal helpers. */
-static int sctp_copy_one_addr(struct sctp_bind_addr *, union sctp_addr *,
-                             sctp_scope_t scope, gfp_t gfp,
+static int sctp_copy_one_addr(struct net *, struct sctp_bind_addr *,
+                             union sctp_addr *, sctp_scope_t scope, gfp_t gfp,
                              int flags);
 static void sctp_bind_addr_clean(struct sctp_bind_addr *);
 
@@ -62,7 +62,7 @@ static void sctp_bind_addr_clean(struct sctp_bind_addr *);
 /* Copy 'src' to 'dest' taking 'scope' into account.  Omit addresses
  * in 'src' which have a broader scope than 'scope'.
  */
-int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
+int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest,
                        const struct sctp_bind_addr *src,
                        sctp_scope_t scope, gfp_t gfp,
                        int flags)
@@ -75,7 +75,7 @@ int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
 
        /* Extract the addresses which are relevant for this scope.  */
        list_for_each_entry(addr, &src->address_list, list) {
-               error = sctp_copy_one_addr(dest, &addr->a, scope,
+               error = sctp_copy_one_addr(net, dest, &addr->a, scope,
                                           gfp, flags);
                if (error < 0)
                        goto out;
@@ -87,7 +87,7 @@ int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
         */
        if (list_empty(&dest->address_list) && (SCTP_SCOPE_GLOBAL == scope)) {
                list_for_each_entry(addr, &src->address_list, list) {
-                       error = sctp_copy_one_addr(dest, &addr->a,
+                       error = sctp_copy_one_addr(net, dest, &addr->a,
                                                   SCTP_SCOPE_LINK, gfp,
                                                   flags);
                        if (error < 0)
@@ -448,7 +448,7 @@ union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr       *bp,
 }
 
 /* Copy out addresses from the global local address list. */
-static int sctp_copy_one_addr(struct sctp_bind_addr *dest,
+static int sctp_copy_one_addr(struct net *net, struct sctp_bind_addr *dest,
                              union sctp_addr *addr,
                              sctp_scope_t scope, gfp_t gfp,
                              int flags)
@@ -456,8 +456,8 @@ static int sctp_copy_one_addr(struct sctp_bind_addr *dest,
        int error = 0;
 
        if (sctp_is_any(NULL, addr)) {
-               error = sctp_copy_local_addr_list(dest, scope, gfp, flags);
-       } else if (sctp_in_scope(addr, scope)) {
+               error = sctp_copy_local_addr_list(net, dest, scope, gfp, flags);
+       } else if (sctp_in_scope(net, addr, scope)) {
                /* Now that the address is in scope, check to see if
                 * the address type is supported by local sock as
                 * well as the remote peer.
@@ -494,7 +494,7 @@ int sctp_is_any(struct sock *sk, const union sctp_addr *addr)
 }
 
 /* Is 'addr' valid for 'scope'?  */
-int sctp_in_scope(const union sctp_addr *addr, sctp_scope_t scope)
+int sctp_in_scope(struct net *net, const union sctp_addr *addr, sctp_scope_t scope)
 {
        sctp_scope_t addr_scope = sctp_scope(addr);
 
@@ -512,7 +512,7 @@ int sctp_in_scope(const union sctp_addr *addr, sctp_scope_t scope)
         * Address scoping can be selectively controlled via sysctl
         * option
         */
-       switch (sctp_scope_policy) {
+       switch (net->sctp.scope_policy) {
        case SCTP_SCOPE_POLICY_DISABLE:
                return 1;
        case SCTP_SCOPE_POLICY_ENABLE:
index 6c8556459a751b3e2faa6b0442b804396ff6de7e..7c2df9c33df37a588c426e7945cd71233edd4577 100644 (file)
@@ -257,7 +257,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
        offset = 0;
 
        if ((whole > 1) || (whole && over))
-               SCTP_INC_STATS_USER(SCTP_MIB_FRAGUSRMSGS);
+               SCTP_INC_STATS_USER(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS);
 
        /* Create chunks for all the full sized DATA chunks. */
        for (i=0, len=first_len; i < whole; i++) {
index 68a385d7c3bdaaab2ebed8c7f4bec94dc53d4c79..1859e2bc83d113d1a14d01f904475b01099e0626 100644 (file)
@@ -65,6 +65,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
                                                struct sock *sk,
                                                gfp_t gfp)
 {
+       struct net *net = sock_net(sk);
        struct sctp_hmac_algo_param *auth_hmacs = NULL;
        struct sctp_chunks_param *auth_chunks = NULL;
        struct sctp_shared_key *null_key;
@@ -74,7 +75,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
        if (!ep->digest)
                return NULL;
 
-       if (sctp_auth_enable) {
+       if (net->sctp.auth_enable) {
                /* Allocate space for HMACS and CHUNKS authentication
                 * variables.  There are arrays that we encode directly
                 * into parameters to make the rest of the operations easier.
@@ -106,7 +107,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
                /* If the Add-IP functionality is enabled, we must
                 * authenticate, ASCONF and ASCONF-ACK chunks
                 */
-               if (sctp_addip_enable) {
+               if (net->sctp.addip_enable) {
                        auth_chunks->chunks[0] = SCTP_CID_ASCONF;
                        auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK;
                        auth_chunks->param_hdr.length =
@@ -140,14 +141,14 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
        INIT_LIST_HEAD(&ep->asocs);
 
        /* Use SCTP specific send buffer space queues.  */
-       ep->sndbuf_policy = sctp_sndbuf_policy;
+       ep->sndbuf_policy = net->sctp.sndbuf_policy;
 
        sk->sk_data_ready = sctp_data_ready;
        sk->sk_write_space = sctp_write_space;
        sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
 
        /* Get the receive buffer policy for this endpoint */
-       ep->rcvbuf_policy = sctp_rcvbuf_policy;
+       ep->rcvbuf_policy = net->sctp.rcvbuf_policy;
 
        /* Initialize the secret key used with cookie. */
        get_random_bytes(&ep->secret_key[0], SCTP_SECRET_SIZE);
@@ -302,11 +303,13 @@ void sctp_endpoint_put(struct sctp_endpoint *ep)
 
 /* Is this the endpoint we are looking for?  */
 struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep,
+                                              struct net *net,
                                               const union sctp_addr *laddr)
 {
        struct sctp_endpoint *retval = NULL;
 
-       if (htons(ep->base.bind_addr.port) == laddr->v4.sin_port) {
+       if ((htons(ep->base.bind_addr.port) == laddr->v4.sin_port) &&
+           net_eq(sock_net(ep->base.sk), net)) {
                if (sctp_bind_addr_match(&ep->base.bind_addr, laddr,
                                         sctp_sk(ep->base.sk)))
                        retval = ep;
@@ -343,7 +346,8 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
 
        rport = ntohs(paddr->v4.sin_port);
 
-       hash = sctp_assoc_hashfn(ep->base.bind_addr.port, rport);
+       hash = sctp_assoc_hashfn(sock_net(ep->base.sk), ep->base.bind_addr.port,
+                                rport);
        head = &sctp_assoc_hashtable[hash];
        read_lock(&head->lock);
        sctp_for_each_hentry(epb, node, &head->chain) {
@@ -386,13 +390,14 @@ int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep,
 {
        struct sctp_sockaddr_entry *addr;
        struct sctp_bind_addr *bp;
+       struct net *net = sock_net(ep->base.sk);
 
        bp = &ep->base.bind_addr;
        /* This function is called with the socket lock held,
         * so the address_list can not change.
         */
        list_for_each_entry(addr, &bp->address_list, list) {
-               if (sctp_has_association(&addr->a, paddr))
+               if (sctp_has_association(net, &addr->a, paddr))
                        return 1;
        }
 
@@ -409,6 +414,7 @@ static void sctp_endpoint_bh_rcv(struct work_struct *work)
                             base.inqueue.immediate);
        struct sctp_association *asoc;
        struct sock *sk;
+       struct net *net;
        struct sctp_transport *transport;
        struct sctp_chunk *chunk;
        struct sctp_inq *inqueue;
@@ -423,6 +429,7 @@ static void sctp_endpoint_bh_rcv(struct work_struct *work)
        asoc = NULL;
        inqueue = &ep->base.inqueue;
        sk = ep->base.sk;
+       net = sock_net(sk);
 
        while (NULL != (chunk = sctp_inq_pop(inqueue))) {
                subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
@@ -474,12 +481,12 @@ normal:
                if (asoc && sctp_chunk_is_data(chunk))
                        asoc->peer.last_data_from = chunk->transport;
                else
-                       SCTP_INC_STATS(SCTP_MIB_INCTRLCHUNKS);
+                       SCTP_INC_STATS(sock_net(ep->base.sk), SCTP_MIB_INCTRLCHUNKS);
 
                if (chunk->transport)
                        chunk->transport->last_time_heard = jiffies;
 
-               error = sctp_do_sm(SCTP_EVENT_T_CHUNK, subtype, state,
+               error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype, state,
                                   ep, asoc, chunk, GFP_ATOMIC);
 
                if (error && chunk)
index e64d5210ed130610402b261218360adc5295102c..25dfe7380479e9598e5732a753349e324aea463a 100644 (file)
 
 /* Forward declarations for internal helpers. */
 static int sctp_rcv_ootb(struct sk_buff *);
-static struct sctp_association *__sctp_rcv_lookup(struct sk_buff *skb,
+static struct sctp_association *__sctp_rcv_lookup(struct net *net,
+                                     struct sk_buff *skb,
                                      const union sctp_addr *laddr,
                                      const union sctp_addr *paddr,
                                      struct sctp_transport **transportp);
-static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *laddr);
+static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net,
+                                               const union sctp_addr *laddr);
 static struct sctp_association *__sctp_lookup_association(
+                                       struct net *net,
                                        const union sctp_addr *local,
                                        const union sctp_addr *peer,
                                        struct sctp_transport **pt);
@@ -80,7 +83,7 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
 
 
 /* Calculate the SCTP checksum of an SCTP packet.  */
-static inline int sctp_rcv_checksum(struct sk_buff *skb)
+static inline int sctp_rcv_checksum(struct net *net, struct sk_buff *skb)
 {
        struct sctphdr *sh = sctp_hdr(skb);
        __le32 cmp = sh->checksum;
@@ -96,7 +99,7 @@ static inline int sctp_rcv_checksum(struct sk_buff *skb)
 
        if (val != cmp) {
                /* CRC failure, dump it. */
-               SCTP_INC_STATS_BH(SCTP_MIB_CHECKSUMERRORS);
+               SCTP_INC_STATS_BH(net, SCTP_MIB_CHECKSUMERRORS);
                return -1;
        }
        return 0;
@@ -129,11 +132,12 @@ int sctp_rcv(struct sk_buff *skb)
        union sctp_addr dest;
        int family;
        struct sctp_af *af;
+       struct net *net = dev_net(skb->dev);
 
        if (skb->pkt_type!=PACKET_HOST)
                goto discard_it;
 
-       SCTP_INC_STATS_BH(SCTP_MIB_INSCTPPACKS);
+       SCTP_INC_STATS_BH(net, SCTP_MIB_INSCTPPACKS);
 
        if (skb_linearize(skb))
                goto discard_it;
@@ -145,7 +149,7 @@ int sctp_rcv(struct sk_buff *skb)
        if (skb->len < sizeof(struct sctphdr))
                goto discard_it;
        if (!sctp_checksum_disable && !skb_csum_unnecessary(skb) &&
-                 sctp_rcv_checksum(skb) < 0)
+                 sctp_rcv_checksum(net, skb) < 0)
                goto discard_it;
 
        skb_pull(skb, sizeof(struct sctphdr));
@@ -178,10 +182,10 @@ int sctp_rcv(struct sk_buff *skb)
            !af->addr_valid(&dest, NULL, skb))
                goto discard_it;
 
-       asoc = __sctp_rcv_lookup(skb, &src, &dest, &transport);
+       asoc = __sctp_rcv_lookup(net, skb, &src, &dest, &transport);
 
        if (!asoc)
-               ep = __sctp_rcv_lookup_endpoint(&dest);
+               ep = __sctp_rcv_lookup_endpoint(net, &dest);
 
        /* Retrieve the common input handling substructure. */
        rcvr = asoc ? &asoc->base : &ep->base;
@@ -200,7 +204,7 @@ int sctp_rcv(struct sk_buff *skb)
                        sctp_endpoint_put(ep);
                        ep = NULL;
                }
-               sk = sctp_get_ctl_sock();
+               sk = net->sctp.ctl_sock;
                ep = sctp_sk(sk)->ep;
                sctp_endpoint_hold(ep);
                rcvr = &ep->base;
@@ -216,7 +220,7 @@ int sctp_rcv(struct sk_buff *skb)
         */
        if (!asoc) {
                if (sctp_rcv_ootb(skb)) {
-                       SCTP_INC_STATS_BH(SCTP_MIB_OUTOFBLUES);
+                       SCTP_INC_STATS_BH(net, SCTP_MIB_OUTOFBLUES);
                        goto discard_release;
                }
        }
@@ -272,9 +276,9 @@ int sctp_rcv(struct sk_buff *skb)
                        skb = NULL; /* sctp_chunk_free already freed the skb */
                        goto discard_release;
                }
-               SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG);
+               SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_BACKLOG);
        } else {
-               SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ);
+               SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_SOFTIRQ);
                sctp_inq_push(&chunk->rcvr->inqueue, chunk);
        }
 
@@ -289,7 +293,7 @@ int sctp_rcv(struct sk_buff *skb)
        return 0;
 
 discard_it:
-       SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_DISCARDS);
+       SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_DISCARDS);
        kfree_skb(skb);
        return 0;
 
@@ -462,11 +466,13 @@ void sctp_icmp_proto_unreachable(struct sock *sk,
                }
                        
        } else {
+               struct net *net = sock_net(sk);
+
                if (timer_pending(&t->proto_unreach_timer) &&
                    del_timer(&t->proto_unreach_timer))
                        sctp_association_put(asoc);
 
-               sctp_do_sm(SCTP_EVENT_T_OTHER,
+               sctp_do_sm(net, SCTP_EVENT_T_OTHER,
                           SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
                           asoc->state, asoc->ep, asoc, t,
                           GFP_ATOMIC);
@@ -474,7 +480,7 @@ void sctp_icmp_proto_unreachable(struct sock *sk,
 }
 
 /* Common lookup code for icmp/icmpv6 error handler. */
-struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
+struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
                             struct sctphdr *sctphdr,
                             struct sctp_association **app,
                             struct sctp_transport **tpp)
@@ -503,7 +509,7 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
        /* Look for an association that matches the incoming ICMP error
         * packet.
         */
-       asoc = __sctp_lookup_association(&saddr, &daddr, &transport);
+       asoc = __sctp_lookup_association(net, &saddr, &daddr, &transport);
        if (!asoc)
                return NULL;
 
@@ -539,7 +545,7 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
         * servers this needs to be solved differently.
         */
        if (sock_owned_by_user(sk))
-               NET_INC_STATS_BH(&init_net, LINUX_MIB_LOCKDROPPEDICMPS);
+               NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
 
        *app = asoc;
        *tpp = transport;
@@ -586,9 +592,10 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
        struct inet_sock *inet;
        sk_buff_data_t saveip, savesctp;
        int err;
+       struct net *net = dev_net(skb->dev);
 
        if (skb->len < ihlen + 8) {
-               ICMP_INC_STATS_BH(&init_net, ICMP_MIB_INERRORS);
+               ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
                return;
        }
 
@@ -597,12 +604,12 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
        savesctp = skb->transport_header;
        skb_reset_network_header(skb);
        skb_set_transport_header(skb, ihlen);
-       sk = sctp_err_lookup(AF_INET, skb, sctp_hdr(skb), &asoc, &transport);
+       sk = sctp_err_lookup(net, AF_INET, skb, sctp_hdr(skb), &asoc, &transport);
        /* Put back, the original values. */
        skb->network_header = saveip;
        skb->transport_header = savesctp;
        if (!sk) {
-               ICMP_INC_STATS_BH(&init_net, ICMP_MIB_INERRORS);
+               ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
                return;
        }
        /* Warning:  The sock lock is held.  Remember to call
@@ -723,12 +730,13 @@ discard:
 /* Insert endpoint into the hash table.  */
 static void __sctp_hash_endpoint(struct sctp_endpoint *ep)
 {
+       struct net *net = sock_net(ep->base.sk);
        struct sctp_ep_common *epb;
        struct sctp_hashbucket *head;
 
        epb = &ep->base;
 
-       epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
+       epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port);
        head = &sctp_ep_hashtable[epb->hashent];
 
        sctp_write_lock(&head->lock);
@@ -747,12 +755,13 @@ void sctp_hash_endpoint(struct sctp_endpoint *ep)
 /* Remove endpoint from the hash table.  */
 static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
 {
+       struct net *net = sock_net(ep->base.sk);
        struct sctp_hashbucket *head;
        struct sctp_ep_common *epb;
 
        epb = &ep->base;
 
-       epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
+       epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port);
 
        head = &sctp_ep_hashtable[epb->hashent];
 
@@ -770,7 +779,8 @@ void sctp_unhash_endpoint(struct sctp_endpoint *ep)
 }
 
 /* Look up an endpoint. */
-static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *laddr)
+static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net,
+                                               const union sctp_addr *laddr)
 {
        struct sctp_hashbucket *head;
        struct sctp_ep_common *epb;
@@ -778,16 +788,16 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *l
        struct hlist_node *node;
        int hash;
 
-       hash = sctp_ep_hashfn(ntohs(laddr->v4.sin_port));
+       hash = sctp_ep_hashfn(net, ntohs(laddr->v4.sin_port));
        head = &sctp_ep_hashtable[hash];
        read_lock(&head->lock);
        sctp_for_each_hentry(epb, node, &head->chain) {
                ep = sctp_ep(epb);
-               if (sctp_endpoint_is_match(ep, laddr))
+               if (sctp_endpoint_is_match(ep, net, laddr))
                        goto hit;
        }
 
-       ep = sctp_sk((sctp_get_ctl_sock()))->ep;
+       ep = sctp_sk(net->sctp.ctl_sock)->ep;
 
 hit:
        sctp_endpoint_hold(ep);
@@ -798,13 +808,15 @@ hit:
 /* Insert association into the hash table.  */
 static void __sctp_hash_established(struct sctp_association *asoc)
 {
+       struct net *net = sock_net(asoc->base.sk);
        struct sctp_ep_common *epb;
        struct sctp_hashbucket *head;
 
        epb = &asoc->base;
 
        /* Calculate which chain this entry will belong to. */
-       epb->hashent = sctp_assoc_hashfn(epb->bind_addr.port, asoc->peer.port);
+       epb->hashent = sctp_assoc_hashfn(net, epb->bind_addr.port,
+                                        asoc->peer.port);
 
        head = &sctp_assoc_hashtable[epb->hashent];
 
@@ -827,12 +839,13 @@ void sctp_hash_established(struct sctp_association *asoc)
 /* Remove association from the hash table.  */
 static void __sctp_unhash_established(struct sctp_association *asoc)
 {
+       struct net *net = sock_net(asoc->base.sk);
        struct sctp_hashbucket *head;
        struct sctp_ep_common *epb;
 
        epb = &asoc->base;
 
-       epb->hashent = sctp_assoc_hashfn(epb->bind_addr.port,
+       epb->hashent = sctp_assoc_hashfn(net, epb->bind_addr.port,
                                         asoc->peer.port);
 
        head = &sctp_assoc_hashtable[epb->hashent];
@@ -855,6 +868,7 @@ void sctp_unhash_established(struct sctp_association *asoc)
 
 /* Look up an association. */
 static struct sctp_association *__sctp_lookup_association(
+                                       struct net *net,
                                        const union sctp_addr *local,
                                        const union sctp_addr *peer,
                                        struct sctp_transport **pt)
@@ -869,12 +883,13 @@ static struct sctp_association *__sctp_lookup_association(
        /* Optimize here for direct hit, only listening connections can
         * have wildcards anyways.
         */
-       hash = sctp_assoc_hashfn(ntohs(local->v4.sin_port), ntohs(peer->v4.sin_port));
+       hash = sctp_assoc_hashfn(net, ntohs(local->v4.sin_port),
+                                ntohs(peer->v4.sin_port));
        head = &sctp_assoc_hashtable[hash];
        read_lock(&head->lock);
        sctp_for_each_hentry(epb, node, &head->chain) {
                asoc = sctp_assoc(epb);
-               transport = sctp_assoc_is_match(asoc, local, peer);
+               transport = sctp_assoc_is_match(asoc, net, local, peer);
                if (transport)
                        goto hit;
        }
@@ -892,27 +907,29 @@ hit:
 
 /* Look up an association. BH-safe. */
 SCTP_STATIC
-struct sctp_association *sctp_lookup_association(const union sctp_addr *laddr,
+struct sctp_association *sctp_lookup_association(struct net *net,
+                                                const union sctp_addr *laddr,
                                                 const union sctp_addr *paddr,
                                            struct sctp_transport **transportp)
 {
        struct sctp_association *asoc;
 
        sctp_local_bh_disable();
-       asoc = __sctp_lookup_association(laddr, paddr, transportp);
+       asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
        sctp_local_bh_enable();
 
        return asoc;
 }
 
 /* Is there an association matching the given local and peer addresses? */
-int sctp_has_association(const union sctp_addr *laddr,
+int sctp_has_association(struct net *net,
+                        const union sctp_addr *laddr,
                         const union sctp_addr *paddr)
 {
        struct sctp_association *asoc;
        struct sctp_transport *transport;
 
-       if ((asoc = sctp_lookup_association(laddr, paddr, &transport))) {
+       if ((asoc = sctp_lookup_association(net, laddr, paddr, &transport))) {
                sctp_association_put(asoc);
                return 1;
        }
@@ -938,7 +955,8 @@ int sctp_has_association(const union sctp_addr *laddr,
  * in certain circumstances.
  *
  */
-static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb,
+static struct sctp_association *__sctp_rcv_init_lookup(struct net *net,
+       struct sk_buff *skb,
        const union sctp_addr *laddr, struct sctp_transport **transportp)
 {
        struct sctp_association *asoc;
@@ -978,7 +996,7 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb,
 
                af->from_addr_param(paddr, params.addr, sh->source, 0);
 
-               asoc = __sctp_lookup_association(laddr, paddr, &transport);
+               asoc = __sctp_lookup_association(net, laddr, paddr, &transport);
                if (asoc)
                        return asoc;
        }
@@ -1001,6 +1019,7 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb,
  * subsequent ASCONF Chunks. If found, proceed to rule D4.
  */
 static struct sctp_association *__sctp_rcv_asconf_lookup(
+                                       struct net *net,
                                        sctp_chunkhdr_t *ch,
                                        const union sctp_addr *laddr,
                                        __be16 peer_port,
@@ -1020,7 +1039,7 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
 
        af->from_addr_param(&paddr, param, peer_port, 0);
 
-       return __sctp_lookup_association(laddr, &paddr, transportp);
+       return __sctp_lookup_association(net, laddr, &paddr, transportp);
 }
 
 
@@ -1033,7 +1052,8 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
 * This means that any chunks that can help us identify the association need
 * to be looked at to find this association.
 */
-static struct sctp_association *__sctp_rcv_walk_lookup(struct sk_buff *skb,
+static struct sctp_association *__sctp_rcv_walk_lookup(struct net *net,
+                                     struct sk_buff *skb,
                                      const union sctp_addr *laddr,
                                      struct sctp_transport **transportp)
 {
@@ -1074,8 +1094,9 @@ static struct sctp_association *__sctp_rcv_walk_lookup(struct sk_buff *skb,
                            break;
 
                    case SCTP_CID_ASCONF:
-                           if (have_auth || sctp_addip_noauth)
-                                   asoc = __sctp_rcv_asconf_lookup(ch, laddr,
+                           if (have_auth || net->sctp.addip_noauth)
+                                   asoc = __sctp_rcv_asconf_lookup(
+                                                       net, ch, laddr,
                                                        sctp_hdr(skb)->source,
                                                        transportp);
                    default:
@@ -1098,7 +1119,8 @@ static struct sctp_association *__sctp_rcv_walk_lookup(struct sk_buff *skb,
  * include looking inside of INIT/INIT-ACK chunks or after the AUTH
  * chunks.
  */
-static struct sctp_association *__sctp_rcv_lookup_harder(struct sk_buff *skb,
+static struct sctp_association *__sctp_rcv_lookup_harder(struct net *net,
+                                     struct sk_buff *skb,
                                      const union sctp_addr *laddr,
                                      struct sctp_transport **transportp)
 {
@@ -1118,11 +1140,11 @@ static struct sctp_association *__sctp_rcv_lookup_harder(struct sk_buff *skb,
        switch (ch->type) {
        case SCTP_CID_INIT:
        case SCTP_CID_INIT_ACK:
-               return __sctp_rcv_init_lookup(skb, laddr, transportp);
+               return __sctp_rcv_init_lookup(net, skb, laddr, transportp);
                break;
 
        default:
-               return __sctp_rcv_walk_lookup(skb, laddr, transportp);
+               return __sctp_rcv_walk_lookup(net, skb, laddr, transportp);
                break;
        }
 
@@ -1131,21 +1153,22 @@ static struct sctp_association *__sctp_rcv_lookup_harder(struct sk_buff *skb,
 }
 
 /* Lookup an association for an inbound skb. */
-static struct sctp_association *__sctp_rcv_lookup(struct sk_buff *skb,
+static struct sctp_association *__sctp_rcv_lookup(struct net *net,
+                                     struct sk_buff *skb,
                                      const union sctp_addr *paddr,
                                      const union sctp_addr *laddr,
                                      struct sctp_transport **transportp)
 {
        struct sctp_association *asoc;
 
-       asoc = __sctp_lookup_association(laddr, paddr, transportp);
+       asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
 
        /* Further lookup for INIT/INIT-ACK packets.
         * SCTP Implementors Guide, 2.18 Handling of address
         * parameters within the INIT or INIT-ACK.
         */
        if (!asoc)
-               asoc = __sctp_rcv_lookup_harder(skb, laddr, transportp);
+               asoc = __sctp_rcv_lookup_harder(net, skb, laddr, transportp);
 
        return asoc;
 }
index ed7139ea7978dc664f6dfbff33977cf31bbc4325..ea14cb44529528124e2bdd24988a59f1cacc2569 100644 (file)
@@ -99,6 +99,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
        struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
        struct sctp_sockaddr_entry *addr = NULL;
        struct sctp_sockaddr_entry *temp;
+       struct net *net = dev_net(ifa->idev->dev);
        int found = 0;
 
        switch (ev) {
@@ -110,27 +111,27 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
                        addr->a.v6.sin6_addr = ifa->addr;
                        addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
                        addr->valid = 1;
-                       spin_lock_bh(&sctp_local_addr_lock);
-                       list_add_tail_rcu(&addr->list, &sctp_local_addr_list);
-                       sctp_addr_wq_mgmt(addr, SCTP_ADDR_NEW);
-                       spin_unlock_bh(&sctp_local_addr_lock);
+                       spin_lock_bh(&net->sctp.local_addr_lock);
+                       list_add_tail_rcu(&addr->list, &net->sctp.local_addr_list);
+                       sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_NEW);
+                       spin_unlock_bh(&net->sctp.local_addr_lock);
                }
                break;
        case NETDEV_DOWN:
-               spin_lock_bh(&sctp_local_addr_lock);
+               spin_lock_bh(&net->sctp.local_addr_lock);
                list_for_each_entry_safe(addr, temp,
-                                       &sctp_local_addr_list, list) {
+                                       &net->sctp.local_addr_list, list) {
                        if (addr->a.sa.sa_family == AF_INET6 &&
                                        ipv6_addr_equal(&addr->a.v6.sin6_addr,
                                                &ifa->addr)) {
-                               sctp_addr_wq_mgmt(addr, SCTP_ADDR_DEL);
+                               sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL);
                                found = 1;
                                addr->valid = 0;
                                list_del_rcu(&addr->list);
                                break;
                        }
                }
-               spin_unlock_bh(&sctp_local_addr_lock);
+               spin_unlock_bh(&net->sctp.local_addr_lock);
                if (found)
                        kfree_rcu(addr, rcu);
                break;
@@ -154,6 +155,7 @@ SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        struct ipv6_pinfo *np;
        sk_buff_data_t saveip, savesctp;
        int err;
+       struct net *net = dev_net(skb->dev);
 
        idev = in6_dev_get(skb->dev);
 
@@ -162,12 +164,12 @@ SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        savesctp = skb->transport_header;
        skb_reset_network_header(skb);
        skb_set_transport_header(skb, offset);
-       sk = sctp_err_lookup(AF_INET6, skb, sctp_hdr(skb), &asoc, &transport);
+       sk = sctp_err_lookup(net, AF_INET6, skb, sctp_hdr(skb), &asoc, &transport);
        /* Put back, the original pointers. */
        skb->network_header   = saveip;
        skb->transport_header = savesctp;
        if (!sk) {
-               ICMP6_INC_STATS_BH(dev_net(skb->dev), idev, ICMP6_MIB_INERRORS);
+               ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_INERRORS);
                goto out;
        }
 
@@ -241,7 +243,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
                          __func__, skb, skb->len,
                          &fl6.saddr, &fl6.daddr);
 
-       SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
+       SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
 
        if (!(transport->param_flags & SPP_PMTUD_ENABLE))
                skb->local_df = 1;
@@ -580,7 +582,7 @@ static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp)
        if (!(type & IPV6_ADDR_UNICAST))
                return 0;
 
-       return ipv6_chk_addr(&init_net, in6, NULL, 0);
+       return ipv6_chk_addr(sock_net(&sp->inet.sk), in6, NULL, 0);
 }
 
 /* This function checks if the address is a valid address to be used for
@@ -857,14 +859,14 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr)
                struct net_device *dev;
 
                if (type & IPV6_ADDR_LINKLOCAL) {
+                       struct net *net;
                        if (!addr->v6.sin6_scope_id)
                                return 0;
+                       net = sock_net(&opt->inet.sk);
                        rcu_read_lock();
-                       dev = dev_get_by_index_rcu(&init_net,
-                                                  addr->v6.sin6_scope_id);
+                       dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id);
                        if (!dev ||
-                           !ipv6_chk_addr(&init_net, &addr->v6.sin6_addr,
-                                          dev, 0)) {
+                           !ipv6_chk_addr(net, &addr->v6.sin6_addr, dev, 0)) {
                                rcu_read_unlock();
                                return 0;
                        }
@@ -897,7 +899,7 @@ static int sctp_inet6_send_verify(struct sctp_sock *opt, union sctp_addr *addr)
                        if (!addr->v6.sin6_scope_id)
                                return 0;
                        rcu_read_lock();
-                       dev = dev_get_by_index_rcu(&init_net,
+                       dev = dev_get_by_index_rcu(sock_net(&opt->inet.sk),
                                                   addr->v6.sin6_scope_id);
                        rcu_read_unlock();
                        if (!dev)
index 8ef8e7d9eb61bbf74b6c023f4e33ee360085f84c..fe012c44f8dff15e4882165e2718b11f919cf260 100644 (file)
@@ -129,20 +129,20 @@ static const struct file_operations sctp_objcnt_ops = {
 };
 
 /* Initialize the objcount in the proc filesystem.  */
-void sctp_dbg_objcnt_init(void)
+void sctp_dbg_objcnt_init(struct net *net)
 {
        struct proc_dir_entry *ent;
 
        ent = proc_create("sctp_dbg_objcnt", 0,
-                         proc_net_sctp, &sctp_objcnt_ops);
+                         net->sctp.proc_net_sctp, &sctp_objcnt_ops);
        if (!ent)
                pr_warn("sctp_dbg_objcnt: Unable to create /proc entry.\n");
 }
 
 /* Cleanup the objcount entry in the proc filesystem.  */
-void sctp_dbg_objcnt_exit(void)
+void sctp_dbg_objcnt_exit(struct net *net)
 {
-       remove_proc_entry("sctp_dbg_objcnt", proc_net_sctp);
+       remove_proc_entry("sctp_dbg_objcnt", net->sctp.proc_net_sctp);
 }
 
 
index be50aa234dcdea30a5c7986eaeaae3570f64a6e3..4e90188bf4895b95f89cdcd9ca0f024c31bbe08a 100644 (file)
@@ -616,7 +616,7 @@ out:
        return err;
 no_route:
        kfree_skb(nskb);
-       IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES);
+       IP_INC_STATS_BH(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
 
        /* FIXME: Returning the 'err' will effect all the associations
         * associated with a socket, although only one of the paths of the
index e7aa177c9522a232c1f1b58c6e5a40df2db03a29..d16632e1503a56a4c592157936ab568a2be3a3a4 100644 (file)
@@ -299,6 +299,7 @@ void sctp_outq_free(struct sctp_outq *q)
 /* Put a new chunk in an sctp_outq.  */
 int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
 {
+       struct net *net = sock_net(q->asoc->base.sk);
        int error = 0;
 
        SCTP_DEBUG_PRINTK("sctp_outq_tail(%p, %p[%s])\n",
@@ -337,15 +338,15 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
 
                        sctp_outq_tail_data(q, chunk);
                        if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
-                               SCTP_INC_STATS(SCTP_MIB_OUTUNORDERCHUNKS);
+                               SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
                        else
-                               SCTP_INC_STATS(SCTP_MIB_OUTORDERCHUNKS);
+                               SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
                        q->empty = 0;
                        break;
                }
        } else {
                list_add_tail(&chunk->list, &q->control_chunk_list);
-               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+               SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
        }
 
        if (error < 0)
@@ -478,11 +479,12 @@ void sctp_retransmit_mark(struct sctp_outq *q,
 void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
                     sctp_retransmit_reason_t reason)
 {
+       struct net *net = sock_net(q->asoc->base.sk);
        int error = 0;
 
        switch(reason) {
        case SCTP_RTXR_T3_RTX:
-               SCTP_INC_STATS(SCTP_MIB_T3_RETRANSMITS);
+               SCTP_INC_STATS(net, SCTP_MIB_T3_RETRANSMITS);
                sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
                /* Update the retran path if the T3-rtx timer has expired for
                 * the current retran path.
@@ -493,15 +495,15 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
                        transport->asoc->unack_data;
                break;
        case SCTP_RTXR_FAST_RTX:
-               SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS);
+               SCTP_INC_STATS(net, SCTP_MIB_FAST_RETRANSMITS);
                sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
                q->fast_rtx = 1;
                break;
        case SCTP_RTXR_PMTUD:
-               SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS);
+               SCTP_INC_STATS(net, SCTP_MIB_PMTUD_RETRANSMITS);
                break;
        case SCTP_RTXR_T1_RTX:
-               SCTP_INC_STATS(SCTP_MIB_T1_RETRANSMITS);
+               SCTP_INC_STATS(net, SCTP_MIB_T1_RETRANSMITS);
                transport->asoc->init_retries++;
                break;
        default:
@@ -589,9 +591,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
                 * next chunk.
                 */
                if (chunk->tsn_gap_acked) {
-                       list_del(&chunk->transmitted_list);
-                       list_add_tail(&chunk->transmitted_list,
-                                       &transport->transmitted);
+                       list_move_tail(&chunk->transmitted_list,
+                                      &transport->transmitted);
                        continue;
                }
 
@@ -655,9 +656,8 @@ redo:
                        /* The append was successful, so add this chunk to
                         * the transmitted list.
                         */
-                       list_del(&chunk->transmitted_list);
-                       list_add_tail(&chunk->transmitted_list,
-                                       &transport->transmitted);
+                       list_move_tail(&chunk->transmitted_list,
+                                      &transport->transmitted);
 
                        /* Mark the chunk as ineligible for fast retransmit
                         * after it is retransmitted.
@@ -1914,6 +1914,6 @@ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
 
        if (ftsn_chunk) {
                list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
-               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+               SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS);
        }
 }
index 534c7eae9d15f9b0371b7cd2963f74e7cbf18e7c..794bb14decdea60ec58e68a59bc5a6b2feda4a01 100644 (file)
@@ -57,7 +57,7 @@
 
 #define DECLARE_PRIMITIVE(name) \
 /* This is called in the code as sctp_primitive_ ## name.  */ \
-int sctp_primitive_ ## name(struct sctp_association *asoc, \
+int sctp_primitive_ ## name(struct net *net, struct sctp_association *asoc, \
                            void *arg) { \
        int error = 0; \
        sctp_event_t event_type; sctp_subtype_t subtype; \
@@ -69,7 +69,7 @@ int sctp_primitive_ ## name(struct sctp_association *asoc, \
        state = asoc ? asoc->state : SCTP_STATE_CLOSED; \
        ep = asoc ? asoc->ep : NULL; \
        \
-       error = sctp_do_sm(event_type, subtype, state, ep, asoc, \
+       error = sctp_do_sm(net, event_type, subtype, state, ep, asoc,   \
                           arg, GFP_KERNEL); \
        return error; \
 }
index dc12febc977a2958f62f729ed434230d867e05a4..c3bea269faf4e6228143f70c6422046842922177 100644 (file)
@@ -80,11 +80,12 @@ static const struct snmp_mib sctp_snmp_list[] = {
 /* Display sctp snmp mib statistics(/proc/net/sctp/snmp). */
 static int sctp_snmp_seq_show(struct seq_file *seq, void *v)
 {
+       struct net *net = seq->private;
        int i;
 
        for (i = 0; sctp_snmp_list[i].name != NULL; i++)
                seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name,
-                          snmp_fold_field((void __percpu **)sctp_statistics,
+                          snmp_fold_field((void __percpu **)net->sctp.sctp_statistics,
                                      sctp_snmp_list[i].entry));
 
        return 0;
@@ -93,7 +94,7 @@ static int sctp_snmp_seq_show(struct seq_file *seq, void *v)
 /* Initialize the seq file operations for 'snmp' object. */
 static int sctp_snmp_seq_open(struct inode *inode, struct file *file)
 {
-       return single_open(file, sctp_snmp_seq_show, NULL);
+       return single_open_net(inode, file, sctp_snmp_seq_show);
 }
 
 static const struct file_operations sctp_snmp_seq_fops = {
@@ -105,11 +106,12 @@ static const struct file_operations sctp_snmp_seq_fops = {
 };
 
 /* Set up the proc fs entry for 'snmp' object. */
-int __init sctp_snmp_proc_init(void)
+int __net_init sctp_snmp_proc_init(struct net *net)
 {
        struct proc_dir_entry *p;
 
-       p = proc_create("snmp", S_IRUGO, proc_net_sctp, &sctp_snmp_seq_fops);
+       p = proc_create("snmp", S_IRUGO, net->sctp.proc_net_sctp,
+                       &sctp_snmp_seq_fops);
        if (!p)
                return -ENOMEM;
 
@@ -117,9 +119,9 @@ int __init sctp_snmp_proc_init(void)
 }
 
 /* Cleanup the proc fs entry for 'snmp' object. */
-void sctp_snmp_proc_exit(void)
+void sctp_snmp_proc_exit(struct net *net)
 {
-       remove_proc_entry("snmp", proc_net_sctp);
+       remove_proc_entry("snmp", net->sctp.proc_net_sctp);
 }
 
 /* Dump local addresses of an association/endpoint. */
@@ -213,6 +215,8 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
        sctp_for_each_hentry(epb, node, &head->chain) {
                ep = sctp_ep(epb);
                sk = epb->sk;
+               if (!net_eq(sock_net(sk), seq_file_net(seq)))
+                       continue;
                seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
                           sctp_sk(sk)->type, sk->sk_state, hash,
                           epb->bind_addr.port,
@@ -239,7 +243,8 @@ static const struct seq_operations sctp_eps_ops = {
 /* Initialize the seq file operations for 'eps' object. */
 static int sctp_eps_seq_open(struct inode *inode, struct file *file)
 {
-       return seq_open(file, &sctp_eps_ops);
+       return seq_open_net(inode, file, &sctp_eps_ops,
+                           sizeof(struct seq_net_private));
 }
 
 static const struct file_operations sctp_eps_seq_fops = {
@@ -250,11 +255,12 @@ static const struct file_operations sctp_eps_seq_fops = {
 };
 
 /* Set up the proc fs entry for 'eps' object. */
-int __init sctp_eps_proc_init(void)
+int __net_init sctp_eps_proc_init(struct net *net)
 {
        struct proc_dir_entry *p;
 
-       p = proc_create("eps", S_IRUGO, proc_net_sctp, &sctp_eps_seq_fops);
+       p = proc_create("eps", S_IRUGO, net->sctp.proc_net_sctp,
+                       &sctp_eps_seq_fops);
        if (!p)
                return -ENOMEM;
 
@@ -262,9 +268,9 @@ int __init sctp_eps_proc_init(void)
 }
 
 /* Cleanup the proc fs entry for 'eps' object. */
-void sctp_eps_proc_exit(void)
+void sctp_eps_proc_exit(struct net *net)
 {
-       remove_proc_entry("eps", proc_net_sctp);
+       remove_proc_entry("eps", net->sctp.proc_net_sctp);
 }
 
 
@@ -317,6 +323,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
        sctp_for_each_hentry(epb, node, &head->chain) {
                assoc = sctp_assoc(epb);
                sk = epb->sk;
+               if (!net_eq(sock_net(sk), seq_file_net(seq)))
+                       continue;
                seq_printf(seq,
                           "%8pK %8pK %-3d %-3d %-2d %-4d "
                           "%4d %8d %8d %7d %5lu %-5d %5d ",
@@ -356,7 +364,8 @@ static const struct seq_operations sctp_assoc_ops = {
 /* Initialize the seq file operations for 'assocs' object. */
 static int sctp_assocs_seq_open(struct inode *inode, struct file *file)
 {
-       return seq_open(file, &sctp_assoc_ops);
+       return seq_open_net(inode, file, &sctp_assoc_ops,
+                           sizeof(struct seq_net_private));
 }
 
 static const struct file_operations sctp_assocs_seq_fops = {
@@ -367,11 +376,11 @@ static const struct file_operations sctp_assocs_seq_fops = {
 };
 
 /* Set up the proc fs entry for 'assocs' object. */
-int __init sctp_assocs_proc_init(void)
+int __net_init sctp_assocs_proc_init(struct net *net)
 {
        struct proc_dir_entry *p;
 
-       p = proc_create("assocs", S_IRUGO, proc_net_sctp,
+       p = proc_create("assocs", S_IRUGO, net->sctp.proc_net_sctp,
                        &sctp_assocs_seq_fops);
        if (!p)
                return -ENOMEM;
@@ -380,9 +389,9 @@ int __init sctp_assocs_proc_init(void)
 }
 
 /* Cleanup the proc fs entry for 'assocs' object. */
-void sctp_assocs_proc_exit(void)
+void sctp_assocs_proc_exit(struct net *net)
 {
-       remove_proc_entry("assocs", proc_net_sctp);
+       remove_proc_entry("assocs", net->sctp.proc_net_sctp);
 }
 
 static void *sctp_remaddr_seq_start(struct seq_file *seq, loff_t *pos)
@@ -428,6 +437,8 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
        sctp_local_bh_disable();
        read_lock(&head->lock);
        sctp_for_each_hentry(epb, node, &head->chain) {
+               if (!net_eq(sock_net(epb->sk), seq_file_net(seq)))
+                       continue;
                assoc = sctp_assoc(epb);
                list_for_each_entry(tsp, &assoc->peer.transport_addr_list,
                                        transports) {
@@ -491,14 +502,15 @@ static const struct seq_operations sctp_remaddr_ops = {
 };
 
 /* Cleanup the proc fs entry for 'remaddr' object. */
-void sctp_remaddr_proc_exit(void)
+void sctp_remaddr_proc_exit(struct net *net)
 {
-       remove_proc_entry("remaddr", proc_net_sctp);
+       remove_proc_entry("remaddr", net->sctp.proc_net_sctp);
 }
 
 static int sctp_remaddr_seq_open(struct inode *inode, struct file *file)
 {
-       return seq_open(file, &sctp_remaddr_ops);
+       return seq_open_net(inode, file, &sctp_remaddr_ops,
+                           sizeof(struct seq_net_private));
 }
 
 static const struct file_operations sctp_remaddr_seq_fops = {
@@ -508,11 +520,12 @@ static const struct file_operations sctp_remaddr_seq_fops = {
        .release = seq_release,
 };
 
-int __init sctp_remaddr_proc_init(void)
+int __net_init sctp_remaddr_proc_init(struct net *net)
 {
        struct proc_dir_entry *p;
 
-       p = proc_create("remaddr", S_IRUGO, proc_net_sctp, &sctp_remaddr_seq_fops);
+       p = proc_create("remaddr", S_IRUGO, net->sctp.proc_net_sctp,
+                       &sctp_remaddr_seq_fops);
        if (!p)
                return -ENOMEM;
        return 0;
index 1f89c4e696457fc02948066052713cac4d2f46fc..2d518425d5984bf954c6ebba3d7db0abb7ef5dc7 100644 (file)
 
 /* Global data structures. */
 struct sctp_globals sctp_globals __read_mostly;
-DEFINE_SNMP_STAT(struct sctp_mib, sctp_statistics) __read_mostly;
-
-#ifdef CONFIG_PROC_FS
-struct proc_dir_entry  *proc_net_sctp;
-#endif
 
 struct idr sctp_assocs_id;
 DEFINE_SPINLOCK(sctp_assocs_id_lock);
 
-/* This is the global socket data structure used for responding to
- * the Out-of-the-blue (OOTB) packets.  A control sock will be created
- * for this socket at the initialization time.
- */
-static struct sock *sctp_ctl_sock;
-
 static struct sctp_pf *sctp_pf_inet6_specific;
 static struct sctp_pf *sctp_pf_inet_specific;
 static struct sctp_af *sctp_af_v4_specific;
@@ -96,74 +85,54 @@ long sysctl_sctp_mem[3];
 int sysctl_sctp_rmem[3];
 int sysctl_sctp_wmem[3];
 
-/* Return the address of the control sock. */
-struct sock *sctp_get_ctl_sock(void)
-{
-       return sctp_ctl_sock;
-}
-
 /* Set up the proc fs entry for the SCTP protocol. */
-static __init int sctp_proc_init(void)
+static __net_init int sctp_proc_init(struct net *net)
 {
-       if (percpu_counter_init(&sctp_sockets_allocated, 0))
-               goto out_nomem;
 #ifdef CONFIG_PROC_FS
-       if (!proc_net_sctp) {
-               proc_net_sctp = proc_mkdir("sctp", init_net.proc_net);
-               if (!proc_net_sctp)
-                       goto out_free_percpu;
-       }
-
-       if (sctp_snmp_proc_init())
+       net->sctp.proc_net_sctp = proc_net_mkdir(net, "sctp", net->proc_net);
+       if (!net->sctp.proc_net_sctp)
+               goto out_proc_net_sctp;
+       if (sctp_snmp_proc_init(net))
                goto out_snmp_proc_init;
-       if (sctp_eps_proc_init())
+       if (sctp_eps_proc_init(net))
                goto out_eps_proc_init;
-       if (sctp_assocs_proc_init())
+       if (sctp_assocs_proc_init(net))
                goto out_assocs_proc_init;
-       if (sctp_remaddr_proc_init())
+       if (sctp_remaddr_proc_init(net))
                goto out_remaddr_proc_init;
 
        return 0;
 
 out_remaddr_proc_init:
-       sctp_assocs_proc_exit();
+       sctp_assocs_proc_exit(net);
 out_assocs_proc_init:
-       sctp_eps_proc_exit();
+       sctp_eps_proc_exit(net);
 out_eps_proc_init:
-       sctp_snmp_proc_exit();
+       sctp_snmp_proc_exit(net);
 out_snmp_proc_init:
-       if (proc_net_sctp) {
-               proc_net_sctp = NULL;
-               remove_proc_entry("sctp", init_net.proc_net);
-       }
-out_free_percpu:
-       percpu_counter_destroy(&sctp_sockets_allocated);
-#else
-       return 0;
-#endif /* CONFIG_PROC_FS */
-
-out_nomem:
+       remove_proc_entry("sctp", net->proc_net);
+       net->sctp.proc_net_sctp = NULL;
+out_proc_net_sctp:
        return -ENOMEM;
+#endif /* CONFIG_PROC_FS */
+       return 0;
 }
 
 /* Clean up the proc fs entry for the SCTP protocol.
  * Note: Do not make this __exit as it is used in the init error
  * path.
  */
-static void sctp_proc_exit(void)
+static void sctp_proc_exit(struct net *net)
 {
 #ifdef CONFIG_PROC_FS
-       sctp_snmp_proc_exit();
-       sctp_eps_proc_exit();
-       sctp_assocs_proc_exit();
-       sctp_remaddr_proc_exit();
-
-       if (proc_net_sctp) {
-               proc_net_sctp = NULL;
-               remove_proc_entry("sctp", init_net.proc_net);
-       }
+       sctp_snmp_proc_exit(net);
+       sctp_eps_proc_exit(net);
+       sctp_assocs_proc_exit(net);
+       sctp_remaddr_proc_exit(net);
+
+       remove_proc_entry("sctp", net->proc_net);
+       net->sctp.proc_net_sctp = NULL;
 #endif
-       percpu_counter_destroy(&sctp_sockets_allocated);
 }
 
 /* Private helper to extract ipv4 address and stash them in
@@ -201,29 +170,29 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
 /* Extract our IP addresses from the system and stash them in the
  * protocol structure.
  */
-static void sctp_get_local_addr_list(void)
+static void sctp_get_local_addr_list(struct net *net)
 {
        struct net_device *dev;
        struct list_head *pos;
        struct sctp_af *af;
 
        rcu_read_lock();
-       for_each_netdev_rcu(&init_net, dev) {
+       for_each_netdev_rcu(net, dev) {
                __list_for_each(pos, &sctp_address_families) {
                        af = list_entry(pos, struct sctp_af, list);
-                       af->copy_addrlist(&sctp_local_addr_list, dev);
+                       af->copy_addrlist(&net->sctp.local_addr_list, dev);
                }
        }
        rcu_read_unlock();
 }
 
 /* Free the existing local addresses.  */
-static void sctp_free_local_addr_list(void)
+static void sctp_free_local_addr_list(struct net *net)
 {
        struct sctp_sockaddr_entry *addr;
        struct list_head *pos, *temp;
 
-       list_for_each_safe(pos, temp, &sctp_local_addr_list) {
+       list_for_each_safe(pos, temp, &net->sctp.local_addr_list) {
                addr = list_entry(pos, struct sctp_sockaddr_entry, list);
                list_del(pos);
                kfree(addr);
@@ -231,17 +200,17 @@ static void sctp_free_local_addr_list(void)
 }
 
 /* Copy the local addresses which are valid for 'scope' into 'bp'.  */
-int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope,
-                             gfp_t gfp, int copy_flags)
+int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp,
+                             sctp_scope_t scope, gfp_t gfp, int copy_flags)
 {
        struct sctp_sockaddr_entry *addr;
        int error = 0;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(addr, &sctp_local_addr_list, list) {
+       list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) {
                if (!addr->valid)
                        continue;
-               if (sctp_in_scope(&addr->a, scope)) {
+               if (sctp_in_scope(net, &addr->a, scope)) {
                        /* Now that the address is in scope, check to see if
                         * the address type is really supported by the local
                         * sock as well as the remote peer.
@@ -397,7 +366,8 @@ static int sctp_v4_addr_valid(union sctp_addr *addr,
 /* Should this be available for binding?   */
 static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp)
 {
-       int ret = inet_addr_type(&init_net, addr->v4.sin_addr.s_addr);
+       struct net *net = sock_net(&sp->inet.sk);
+       int ret = inet_addr_type(net, addr->v4.sin_addr.s_addr);
 
 
        if (addr->v4.sin_addr.s_addr != htonl(INADDR_ANY) &&
@@ -484,7 +454,7 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
        SCTP_DEBUG_PRINTK("%s: DST:%pI4, SRC:%pI4 - ",
                          __func__, &fl4->daddr, &fl4->saddr);
 
-       rt = ip_route_output_key(&init_net, fl4);
+       rt = ip_route_output_key(sock_net(sk), fl4);
        if (!IS_ERR(rt))
                dst = &rt->dst;
 
@@ -530,7 +500,7 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
                    (AF_INET == laddr->a.sa.sa_family)) {
                        fl4->saddr = laddr->a.v4.sin_addr.s_addr;
                        fl4->fl4_sport = laddr->a.v4.sin_port;
-                       rt = ip_route_output_key(&init_net, fl4);
+                       rt = ip_route_output_key(sock_net(sk), fl4);
                        if (!IS_ERR(rt)) {
                                dst = &rt->dst;
                                goto out_unlock;
@@ -627,14 +597,15 @@ static void sctp_v4_ecn_capable(struct sock *sk)
 
 void sctp_addr_wq_timeout_handler(unsigned long arg)
 {
+       struct net *net = (struct net *)arg;
        struct sctp_sockaddr_entry *addrw, *temp;
        struct sctp_sock *sp;
 
-       spin_lock_bh(&sctp_addr_wq_lock);
+       spin_lock_bh(&net->sctp.addr_wq_lock);
 
-       list_for_each_entry_safe(addrw, temp, &sctp_addr_waitq, list) {
+       list_for_each_entry_safe(addrw, temp, &net->sctp.addr_waitq, list) {
                SCTP_DEBUG_PRINTK_IPADDR("sctp_addrwq_timo_handler: the first ent in wq %p is ",
-                   " for cmd %d at entry %p\n", &sctp_addr_waitq, &addrw->a, addrw->state,
+                   " for cmd %d at entry %p\n", &net->sctp.addr_waitq, &addrw->a, addrw->state,
                    addrw);
 
 #if IS_ENABLED(CONFIG_IPV6)
@@ -648,7 +619,7 @@ void sctp_addr_wq_timeout_handler(unsigned long arg)
                                goto free_next;
 
                        in6 = (struct in6_addr *)&addrw->a.v6.sin6_addr;
-                       if (ipv6_chk_addr(&init_net, in6, NULL, 0) == 0 &&
+                       if (ipv6_chk_addr(net, in6, NULL, 0) == 0 &&
                            addrw->state == SCTP_ADDR_NEW) {
                                unsigned long timeo_val;
 
@@ -656,12 +627,12 @@ void sctp_addr_wq_timeout_handler(unsigned long arg)
                                    SCTP_ADDRESS_TICK_DELAY);
                                timeo_val = jiffies;
                                timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY);
-                               mod_timer(&sctp_addr_wq_timer, timeo_val);
+                               mod_timer(&net->sctp.addr_wq_timer, timeo_val);
                                break;
                        }
                }
 #endif
-               list_for_each_entry(sp, &sctp_auto_asconf_splist, auto_asconf_list) {
+               list_for_each_entry(sp, &net->sctp.auto_asconf_splist, auto_asconf_list) {
                        struct sock *sk;
 
                        sk = sctp_opt2sk(sp);
@@ -679,31 +650,32 @@ free_next:
                list_del(&addrw->list);
                kfree(addrw);
        }
-       spin_unlock_bh(&sctp_addr_wq_lock);
+       spin_unlock_bh(&net->sctp.addr_wq_lock);
 }
 
-static void sctp_free_addr_wq(void)
+static void sctp_free_addr_wq(struct net *net)
 {
        struct sctp_sockaddr_entry *addrw;
        struct sctp_sockaddr_entry *temp;
 
-       spin_lock_bh(&sctp_addr_wq_lock);
-       del_timer(&sctp_addr_wq_timer);
-       list_for_each_entry_safe(addrw, temp, &sctp_addr_waitq, list) {
+       spin_lock_bh(&net->sctp.addr_wq_lock);
+       del_timer(&net->sctp.addr_wq_timer);
+       list_for_each_entry_safe(addrw, temp, &net->sctp.addr_waitq, list) {
                list_del(&addrw->list);
                kfree(addrw);
        }
-       spin_unlock_bh(&sctp_addr_wq_lock);
+       spin_unlock_bh(&net->sctp.addr_wq_lock);
 }
 
 /* lookup the entry for the same address in the addr_waitq
  * sctp_addr_wq MUST be locked
  */
-static struct sctp_sockaddr_entry *sctp_addr_wq_lookup(struct sctp_sockaddr_entry *addr)
+static struct sctp_sockaddr_entry *sctp_addr_wq_lookup(struct net *net,
+                                       struct sctp_sockaddr_entry *addr)
 {
        struct sctp_sockaddr_entry *addrw;
 
-       list_for_each_entry(addrw, &sctp_addr_waitq, list) {
+       list_for_each_entry(addrw, &net->sctp.addr_waitq, list) {
                if (addrw->a.sa.sa_family != addr->a.sa.sa_family)
                        continue;
                if (addrw->a.sa.sa_family == AF_INET) {
@@ -719,7 +691,7 @@ static struct sctp_sockaddr_entry *sctp_addr_wq_lookup(struct sctp_sockaddr_entr
        return NULL;
 }
 
-void sctp_addr_wq_mgmt(struct sctp_sockaddr_entry *addr, int cmd)
+void sctp_addr_wq_mgmt(struct net *net, struct sctp_sockaddr_entry *addr, int cmd)
 {
        struct sctp_sockaddr_entry *addrw;
        unsigned long timeo_val;
@@ -730,38 +702,38 @@ void sctp_addr_wq_mgmt(struct sctp_sockaddr_entry *addr, int cmd)
         * new address after a couple of addition and deletion of that address
         */
 
-       spin_lock_bh(&sctp_addr_wq_lock);
+       spin_lock_bh(&net->sctp.addr_wq_lock);
        /* Offsets existing events in addr_wq */
-       addrw = sctp_addr_wq_lookup(addr);
+       addrw = sctp_addr_wq_lookup(net, addr);
        if (addrw) {
                if (addrw->state != cmd) {
                        SCTP_DEBUG_PRINTK_IPADDR("sctp_addr_wq_mgmt offsets existing entry for %d ",
                            " in wq %p\n", addrw->state, &addrw->a,
-                           &sctp_addr_waitq);
+                           &net->sctp.addr_waitq);
                        list_del(&addrw->list);
                        kfree(addrw);
                }
-               spin_unlock_bh(&sctp_addr_wq_lock);
+               spin_unlock_bh(&net->sctp.addr_wq_lock);
                return;
        }
 
        /* OK, we have to add the new address to the wait queue */
        addrw = kmemdup(addr, sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
        if (addrw == NULL) {
-               spin_unlock_bh(&sctp_addr_wq_lock);
+               spin_unlock_bh(&net->sctp.addr_wq_lock);
                return;
        }
        addrw->state = cmd;
-       list_add_tail(&addrw->list, &sctp_addr_waitq);
+       list_add_tail(&addrw->list, &net->sctp.addr_waitq);
        SCTP_DEBUG_PRINTK_IPADDR("sctp_addr_wq_mgmt add new entry for cmd:%d ",
-           " in wq %p\n", addrw->state, &addrw->a, &sctp_addr_waitq);
+           " in wq %p\n", addrw->state, &addrw->a, &net->sctp.addr_waitq);
 
-       if (!timer_pending(&sctp_addr_wq_timer)) {
+       if (!timer_pending(&net->sctp.addr_wq_timer)) {
                timeo_val = jiffies;
                timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY);
-               mod_timer(&sctp_addr_wq_timer, timeo_val);
+               mod_timer(&net->sctp.addr_wq_timer, timeo_val);
        }
-       spin_unlock_bh(&sctp_addr_wq_lock);
+       spin_unlock_bh(&net->sctp.addr_wq_lock);
 }
 
 /* Event handler for inet address addition/deletion events.
@@ -776,11 +748,9 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
        struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
        struct sctp_sockaddr_entry *addr = NULL;
        struct sctp_sockaddr_entry *temp;
+       struct net *net = dev_net(ifa->ifa_dev->dev);
        int found = 0;
 
-       if (!net_eq(dev_net(ifa->ifa_dev->dev), &init_net))
-               return NOTIFY_DONE;
-
        switch (ev) {
        case NETDEV_UP:
                addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
@@ -789,27 +759,27 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
                        addr->a.v4.sin_port = 0;
                        addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
                        addr->valid = 1;
-                       spin_lock_bh(&sctp_local_addr_lock);
-                       list_add_tail_rcu(&addr->list, &sctp_local_addr_list);
-                       sctp_addr_wq_mgmt(addr, SCTP_ADDR_NEW);
-                       spin_unlock_bh(&sctp_local_addr_lock);
+                       spin_lock_bh(&net->sctp.local_addr_lock);
+                       list_add_tail_rcu(&addr->list, &net->sctp.local_addr_list);
+                       sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_NEW);
+                       spin_unlock_bh(&net->sctp.local_addr_lock);
                }
                break;
        case NETDEV_DOWN:
-               spin_lock_bh(&sctp_local_addr_lock);
+               spin_lock_bh(&net->sctp.local_addr_lock);
                list_for_each_entry_safe(addr, temp,
-                                       &sctp_local_addr_list, list) {
+                                       &net->sctp.local_addr_list, list) {
                        if (addr->a.sa.sa_family == AF_INET &&
                                        addr->a.v4.sin_addr.s_addr ==
                                        ifa->ifa_local) {
-                               sctp_addr_wq_mgmt(addr, SCTP_ADDR_DEL);
+                               sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL);
                                found = 1;
                                addr->valid = 0;
                                list_del_rcu(&addr->list);
                                break;
                        }
                }
-               spin_unlock_bh(&sctp_local_addr_lock);
+               spin_unlock_bh(&net->sctp.local_addr_lock);
                if (found)
                        kfree_rcu(addr, rcu);
                break;
@@ -822,7 +792,7 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
  * Initialize the control inode/socket with a control endpoint data
  * structure.  This endpoint is reserved exclusively for the OOTB processing.
  */
-static int sctp_ctl_sock_init(void)
+static int sctp_ctl_sock_init(struct net *net)
 {
        int err;
        sa_family_t family = PF_INET;
@@ -830,14 +800,14 @@ static int sctp_ctl_sock_init(void)
        if (sctp_get_pf_specific(PF_INET6))
                family = PF_INET6;
 
-       err = inet_ctl_sock_create(&sctp_ctl_sock, family,
-                                  SOCK_SEQPACKET, IPPROTO_SCTP, &init_net);
+       err = inet_ctl_sock_create(&net->sctp.ctl_sock, family,
+                                  SOCK_SEQPACKET, IPPROTO_SCTP, net);
 
        /* If IPv6 socket could not be created, try the IPv4 socket */
        if (err < 0 && family == PF_INET6)
-               err = inet_ctl_sock_create(&sctp_ctl_sock, AF_INET,
+               err = inet_ctl_sock_create(&net->sctp.ctl_sock, AF_INET,
                                           SOCK_SEQPACKET, IPPROTO_SCTP,
-                                          &init_net);
+                                          net);
 
        if (err < 0) {
                pr_err("Failed to create the SCTP control socket\n");
@@ -990,7 +960,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
        inet->pmtudisc = transport->param_flags & SPP_PMTUD_ENABLE ?
                         IP_PMTUDISC_DO : IP_PMTUDISC_DONT;
 
-       SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
+       SCTP_INC_STATS(sock_net(&inet->sk), SCTP_MIB_OUTSCTPPACKS);
        return ip_queue_xmit(skb, &transport->fl);
 }
 
@@ -1063,6 +1033,7 @@ static const struct net_protocol sctp_protocol = {
        .handler     = sctp_rcv,
        .err_handler = sctp_v4_err,
        .no_policy   = 1,
+       .netns_ok    = 1,
 };
 
 /* IPv4 address related functions.  */
@@ -1130,16 +1101,16 @@ int sctp_register_pf(struct sctp_pf *pf, sa_family_t family)
        return 1;
 }
 
-static inline int init_sctp_mibs(void)
+static inline int init_sctp_mibs(struct net *net)
 {
-       return snmp_mib_init((void __percpu **)sctp_statistics,
+       return snmp_mib_init((void __percpu **)net->sctp.sctp_statistics,
                             sizeof(struct sctp_mib),
                             __alignof__(struct sctp_mib));
 }
 
-static inline void cleanup_sctp_mibs(void)
+static inline void cleanup_sctp_mibs(struct net *net)
 {
-       snmp_mib_free((void __percpu **)sctp_statistics);
+       snmp_mib_free((void __percpu **)net->sctp.sctp_statistics);
 }
 
 static void sctp_v4_pf_init(void)
@@ -1194,6 +1165,143 @@ static void sctp_v4_del_protocol(void)
        unregister_inetaddr_notifier(&sctp_inetaddr_notifier);
 }
 
+static int sctp_net_init(struct net *net)
+{
+       int status;
+
+       /*
+        * 14. Suggested SCTP Protocol Parameter Values
+        */
+       /* The following protocol parameters are RECOMMENDED:  */
+       /* RTO.Initial              - 3  seconds */
+       net->sctp.rto_initial                   = SCTP_RTO_INITIAL;
+       /* RTO.Min                  - 1  second */
+       net->sctp.rto_min                       = SCTP_RTO_MIN;
+       /* RTO.Max                 -  60 seconds */
+       net->sctp.rto_max                       = SCTP_RTO_MAX;
+       /* RTO.Alpha                - 1/8 */
+       net->sctp.rto_alpha                     = SCTP_RTO_ALPHA;
+       /* RTO.Beta                 - 1/4 */
+       net->sctp.rto_beta                      = SCTP_RTO_BETA;
+
+       /* Valid.Cookie.Life        - 60  seconds */
+       net->sctp.valid_cookie_life             = SCTP_DEFAULT_COOKIE_LIFE;
+
+       /* Whether Cookie Preservative is enabled(1) or not(0) */
+       net->sctp.cookie_preserve_enable        = 1;
+
+       /* Max.Burst                - 4 */
+       net->sctp.max_burst                     = SCTP_DEFAULT_MAX_BURST;
+
+       /* Association.Max.Retrans  - 10 attempts
+        * Path.Max.Retrans         - 5  attempts (per destination address)
+        * Max.Init.Retransmits     - 8  attempts
+        */
+       net->sctp.max_retrans_association       = 10;
+       net->sctp.max_retrans_path              = 5;
+       net->sctp.max_retrans_init              = 8;
+
+       /* Sendbuffer growth        - do per-socket accounting */
+       net->sctp.sndbuf_policy                 = 0;
+
+       /* Rcvbuffer growth         - do per-socket accounting */
+       net->sctp.rcvbuf_policy                 = 0;
+
+       /* HB.interval              - 30 seconds */
+       net->sctp.hb_interval                   = SCTP_DEFAULT_TIMEOUT_HEARTBEAT;
+
+       /* delayed SACK timeout */
+       net->sctp.sack_timeout                  = SCTP_DEFAULT_TIMEOUT_SACK;
+
+       /* Disable ADDIP by default. */
+       net->sctp.addip_enable = 0;
+       net->sctp.addip_noauth = 0;
+       net->sctp.default_auto_asconf = 0;
+
+       /* Enable PR-SCTP by default. */
+       net->sctp.prsctp_enable = 1;
+
+       /* Disable AUTH by default. */
+       net->sctp.auth_enable = 0;
+
+       /* Set SCOPE policy to enabled */
+       net->sctp.scope_policy = SCTP_SCOPE_POLICY_ENABLE;
+
+       /* Set the default rwnd update threshold */
+       net->sctp.rwnd_upd_shift = SCTP_DEFAULT_RWND_SHIFT;
+
+       /* Initialize maximum autoclose timeout. */
+       net->sctp.max_autoclose         = INT_MAX / HZ;
+
+       status = sctp_sysctl_net_register(net);
+       if (status)
+               goto err_sysctl_register;
+
+       /* Allocate and initialise sctp mibs.  */
+       status = init_sctp_mibs(net);
+       if (status)
+               goto err_init_mibs;
+
+       /* Initialize proc fs directory.  */
+       status = sctp_proc_init(net);
+       if (status)
+               goto err_init_proc;
+
+       sctp_dbg_objcnt_init(net);
+
+       /* Initialize the control inode/socket for handling OOTB packets.  */
+       if ((status = sctp_ctl_sock_init(net))) {
+               pr_err("Failed to initialize the SCTP control sock\n");
+               goto err_ctl_sock_init;
+       }
+
+       /* Initialize the local address list. */
+       INIT_LIST_HEAD(&net->sctp.local_addr_list);
+       spin_lock_init(&net->sctp.local_addr_lock);
+       sctp_get_local_addr_list(net);
+
+       /* Initialize the address event list */
+       INIT_LIST_HEAD(&net->sctp.addr_waitq);
+       INIT_LIST_HEAD(&net->sctp.auto_asconf_splist);
+       spin_lock_init(&net->sctp.addr_wq_lock);
+       net->sctp.addr_wq_timer.expires = 0;
+       setup_timer(&net->sctp.addr_wq_timer, sctp_addr_wq_timeout_handler,
+                   (unsigned long)net);
+
+       return 0;
+
+err_ctl_sock_init:
+       sctp_dbg_objcnt_exit(net);
+       sctp_proc_exit(net);
+err_init_proc:
+       cleanup_sctp_mibs(net);
+err_init_mibs:
+       sctp_sysctl_net_unregister(net);
+err_sysctl_register:
+       return status;
+}
+
+static void sctp_net_exit(struct net *net)
+{
+       /* Free the local address list */
+       sctp_free_addr_wq(net);
+       sctp_free_local_addr_list(net);
+
+       /* Free the control endpoint.  */
+       inet_ctl_sock_destroy(net->sctp.ctl_sock);
+
+       sctp_dbg_objcnt_exit(net);
+
+       sctp_proc_exit(net);
+       cleanup_sctp_mibs(net);
+       sctp_sysctl_net_unregister(net);
+}
+
+static struct pernet_operations sctp_net_ops = {
+       .init = sctp_net_init,
+       .exit = sctp_net_exit,
+};
+
 /* Initialize the universe into something sensible.  */
 SCTP_STATIC __init int sctp_init(void)
 {
@@ -1224,62 +1332,9 @@ SCTP_STATIC __init int sctp_init(void)
        if (!sctp_chunk_cachep)
                goto err_chunk_cachep;
 
-       /* Allocate and initialise sctp mibs.  */
-       status = init_sctp_mibs();
+       status = percpu_counter_init(&sctp_sockets_allocated, 0);
        if (status)
-               goto err_init_mibs;
-
-       /* Initialize proc fs directory.  */
-       status = sctp_proc_init();
-       if (status)
-               goto err_init_proc;
-
-       /* Initialize object count debugging.  */
-       sctp_dbg_objcnt_init();
-
-       /*
-        * 14. Suggested SCTP Protocol Parameter Values
-        */
-       /* The following protocol parameters are RECOMMENDED:  */
-       /* RTO.Initial              - 3  seconds */
-       sctp_rto_initial                = SCTP_RTO_INITIAL;
-       /* RTO.Min                  - 1  second */
-       sctp_rto_min                    = SCTP_RTO_MIN;
-       /* RTO.Max                 -  60 seconds */
-       sctp_rto_max                    = SCTP_RTO_MAX;
-       /* RTO.Alpha                - 1/8 */
-       sctp_rto_alpha                  = SCTP_RTO_ALPHA;
-       /* RTO.Beta                 - 1/4 */
-       sctp_rto_beta                   = SCTP_RTO_BETA;
-
-       /* Valid.Cookie.Life        - 60  seconds */
-       sctp_valid_cookie_life          = SCTP_DEFAULT_COOKIE_LIFE;
-
-       /* Whether Cookie Preservative is enabled(1) or not(0) */
-       sctp_cookie_preserve_enable     = 1;
-
-       /* Max.Burst                - 4 */
-       sctp_max_burst                  = SCTP_DEFAULT_MAX_BURST;
-
-       /* Association.Max.Retrans  - 10 attempts
-        * Path.Max.Retrans         - 5  attempts (per destination address)
-        * Max.Init.Retransmits     - 8  attempts
-        */
-       sctp_max_retrans_association    = 10;
-       sctp_max_retrans_path           = 5;
-       sctp_max_retrans_init           = 8;
-
-       /* Sendbuffer growth        - do per-socket accounting */
-       sctp_sndbuf_policy              = 0;
-
-       /* Rcvbuffer growth         - do per-socket accounting */
-       sctp_rcvbuf_policy              = 0;
-
-       /* HB.interval              - 30 seconds */
-       sctp_hb_interval                = SCTP_DEFAULT_TIMEOUT_HEARTBEAT;
-
-       /* delayed SACK timeout */
-       sctp_sack_timeout               = SCTP_DEFAULT_TIMEOUT_SACK;
+               goto err_percpu_counter_init;
 
        /* Implementation specific variables. */
 
@@ -1287,9 +1342,6 @@ SCTP_STATIC __init int sctp_init(void)
        sctp_max_instreams              = SCTP_DEFAULT_INSTREAMS;
        sctp_max_outstreams             = SCTP_DEFAULT_OUTSTREAMS;
 
-       /* Initialize maximum autoclose timeout. */
-       sctp_max_autoclose              = INT_MAX / HZ;
-
        /* Initialize handle used for association ids. */
        idr_init(&sctp_assocs_id);
 
@@ -1376,41 +1428,12 @@ SCTP_STATIC __init int sctp_init(void)
        pr_info("Hash tables configured (established %d bind %d)\n",
                sctp_assoc_hashsize, sctp_port_hashsize);
 
-       /* Disable ADDIP by default. */
-       sctp_addip_enable = 0;
-       sctp_addip_noauth = 0;
-       sctp_default_auto_asconf = 0;
-
-       /* Enable PR-SCTP by default. */
-       sctp_prsctp_enable = 1;
-
-       /* Disable AUTH by default. */
-       sctp_auth_enable = 0;
-
-       /* Set SCOPE policy to enabled */
-       sctp_scope_policy = SCTP_SCOPE_POLICY_ENABLE;
-
-       /* Set the default rwnd update threshold */
-       sctp_rwnd_upd_shift             = SCTP_DEFAULT_RWND_SHIFT;
-
        sctp_sysctl_register();
 
        INIT_LIST_HEAD(&sctp_address_families);
        sctp_v4_pf_init();
        sctp_v6_pf_init();
 
-       /* Initialize the local address list. */
-       INIT_LIST_HEAD(&sctp_local_addr_list);
-       spin_lock_init(&sctp_local_addr_lock);
-       sctp_get_local_addr_list();
-
-       /* Initialize the address event list */
-       INIT_LIST_HEAD(&sctp_addr_waitq);
-       INIT_LIST_HEAD(&sctp_auto_asconf_splist);
-       spin_lock_init(&sctp_addr_wq_lock);
-       sctp_addr_wq_timer.expires = 0;
-       setup_timer(&sctp_addr_wq_timer, sctp_addr_wq_timeout_handler, 0);
-
        status = sctp_v4_protosw_init();
 
        if (status)
@@ -1420,11 +1443,9 @@ SCTP_STATIC __init int sctp_init(void)
        if (status)
                goto err_v6_protosw_init;
 
-       /* Initialize the control inode/socket for handling OOTB packets.  */
-       if ((status = sctp_ctl_sock_init())) {
-               pr_err("Failed to initialize the SCTP control sock\n");
-               goto err_ctl_sock_init;
-       }
+       status = register_pernet_subsys(&sctp_net_ops);
+       if (status)
+               goto err_register_pernet_subsys;
 
        status = sctp_v4_add_protocol();
        if (status)
@@ -1441,13 +1462,12 @@ out:
 err_v6_add_protocol:
        sctp_v4_del_protocol();
 err_add_protocol:
-       inet_ctl_sock_destroy(sctp_ctl_sock);
-err_ctl_sock_init:
+       unregister_pernet_subsys(&sctp_net_ops);
+err_register_pernet_subsys:
        sctp_v6_protosw_exit();
 err_v6_protosw_init:
        sctp_v4_protosw_exit();
 err_protosw_init:
-       sctp_free_local_addr_list();
        sctp_v4_pf_exit();
        sctp_v6_pf_exit();
        sctp_sysctl_unregister();
@@ -1461,11 +1481,8 @@ err_ehash_alloc:
                   get_order(sctp_assoc_hashsize *
                             sizeof(struct sctp_hashbucket)));
 err_ahash_alloc:
-       sctp_dbg_objcnt_exit();
-       sctp_proc_exit();
-err_init_proc:
-       cleanup_sctp_mibs();
-err_init_mibs:
+       percpu_counter_destroy(&sctp_sockets_allocated);
+err_percpu_counter_init:
        kmem_cache_destroy(sctp_chunk_cachep);
 err_chunk_cachep:
        kmem_cache_destroy(sctp_bucket_cachep);
@@ -1482,18 +1499,13 @@ SCTP_STATIC __exit void sctp_exit(void)
        /* Unregister with inet6/inet layers. */
        sctp_v6_del_protocol();
        sctp_v4_del_protocol();
-       sctp_free_addr_wq();
 
-       /* Free the control endpoint.  */
-       inet_ctl_sock_destroy(sctp_ctl_sock);
+       unregister_pernet_subsys(&sctp_net_ops);
 
        /* Free protosw registrations */
        sctp_v6_protosw_exit();
        sctp_v4_protosw_exit();
 
-       /* Free the local address list.  */
-       sctp_free_local_addr_list();
-
        /* Unregister with socket layer. */
        sctp_v6_pf_exit();
        sctp_v4_pf_exit();
@@ -1508,9 +1520,7 @@ SCTP_STATIC __exit void sctp_exit(void)
                   get_order(sctp_port_hashsize *
                             sizeof(struct sctp_bind_hashbucket)));
 
-       sctp_dbg_objcnt_exit();
-       sctp_proc_exit();
-       cleanup_sctp_mibs();
+       percpu_counter_destroy(&sctp_sockets_allocated);
 
        rcu_barrier(); /* Wait for completion of call_rcu()'s */
 
index 479a70ef6ff8abe2a59a16ddc672f6638c7b3369..fbe1636309a75ac054de225fe4d1cf245a3923d2 100644 (file)
@@ -198,6 +198,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
                             const struct sctp_bind_addr *bp,
                             gfp_t gfp, int vparam_len)
 {
+       struct net *net = sock_net(asoc->base.sk);
        sctp_inithdr_t init;
        union sctp_params addrs;
        size_t chunksize;
@@ -237,7 +238,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
        chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types));
        chunksize += sizeof(ecap_param);
 
-       if (sctp_prsctp_enable)
+       if (net->sctp.prsctp_enable)
                chunksize += sizeof(prsctp_param);
 
        /* ADDIP: Section 4.2.7:
@@ -245,7 +246,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
         *  the ASCONF,the ASCONF-ACK, and the AUTH  chunks in its INIT and
         *  INIT-ACK parameters.
         */
-       if (sctp_addip_enable) {
+       if (net->sctp.addip_enable) {
                extensions[num_ext] = SCTP_CID_ASCONF;
                extensions[num_ext+1] = SCTP_CID_ASCONF_ACK;
                num_ext += 2;
@@ -257,7 +258,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
        chunksize += vparam_len;
 
        /* Account for AUTH related parameters */
-       if (sctp_auth_enable) {
+       if (net->sctp.auth_enable) {
                /* Add random parameter length*/
                chunksize += sizeof(asoc->c.auth_random);
 
@@ -331,7 +332,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
                sctp_addto_param(retval, num_ext, extensions);
        }
 
-       if (sctp_prsctp_enable)
+       if (net->sctp.prsctp_enable)
                sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param);
 
        if (sp->adaptation_ind) {
@@ -342,7 +343,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
        }
 
        /* Add SCTP-AUTH chunks to the parameter list */
-       if (sctp_auth_enable) {
+       if (net->sctp.auth_enable) {
                sctp_addto_chunk(retval, sizeof(asoc->c.auth_random),
                                 asoc->c.auth_random);
                if (auth_hmacs)
@@ -1940,7 +1941,7 @@ static int sctp_process_hn_param(const struct sctp_association *asoc,
        return 0;
 }
 
-static int sctp_verify_ext_param(union sctp_params param)
+static int sctp_verify_ext_param(struct net *net, union sctp_params param)
 {
        __u16 num_ext = ntohs(param.p->length) - sizeof(sctp_paramhdr_t);
        int have_auth = 0;
@@ -1964,10 +1965,10 @@ static int sctp_verify_ext_param(union sctp_params param)
         * only if ADD-IP is turned on and we are not backward-compatible
         * mode.
         */
-       if (sctp_addip_noauth)
+       if (net->sctp.addip_noauth)
                return 1;
 
-       if (sctp_addip_enable && !have_auth && have_asconf)
+       if (net->sctp.addip_enable && !have_auth && have_asconf)
                return 0;
 
        return 1;
@@ -1976,13 +1977,14 @@ static int sctp_verify_ext_param(union sctp_params param)
 static void sctp_process_ext_param(struct sctp_association *asoc,
                                    union sctp_params param)
 {
+       struct net *net = sock_net(asoc->base.sk);
        __u16 num_ext = ntohs(param.p->length) - sizeof(sctp_paramhdr_t);
        int i;
 
        for (i = 0; i < num_ext; i++) {
                switch (param.ext->chunks[i]) {
                    case SCTP_CID_FWD_TSN:
-                           if (sctp_prsctp_enable &&
+                           if (net->sctp.prsctp_enable &&
                                !asoc->peer.prsctp_capable)
                                    asoc->peer.prsctp_capable = 1;
                            break;
@@ -1990,12 +1992,12 @@ static void sctp_process_ext_param(struct sctp_association *asoc,
                            /* if the peer reports AUTH, assume that he
                             * supports AUTH.
                             */
-                           if (sctp_auth_enable)
+                           if (net->sctp.auth_enable)
                                    asoc->peer.auth_capable = 1;
                            break;
                    case SCTP_CID_ASCONF:
                    case SCTP_CID_ASCONF_ACK:
-                           if (sctp_addip_enable)
+                           if (net->sctp.addip_enable)
                                    asoc->peer.asconf_capable = 1;
                            break;
                    default:
@@ -2081,7 +2083,8 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc,
  *     SCTP_IERROR_ERROR - stop processing, trigger an ERROR
  *     SCTP_IERROR_NO_ERROR - continue with the chunk
  */
-static sctp_ierror_t sctp_verify_param(const struct sctp_association *asoc,
+static sctp_ierror_t sctp_verify_param(struct net *net,
+                                       const struct sctp_association *asoc,
                                        union sctp_params param,
                                        sctp_cid_t cid,
                                        struct sctp_chunk *chunk,
@@ -2110,12 +2113,12 @@ static sctp_ierror_t sctp_verify_param(const struct sctp_association *asoc,
                break;
 
        case SCTP_PARAM_SUPPORTED_EXT:
-               if (!sctp_verify_ext_param(param))
+               if (!sctp_verify_ext_param(net, param))
                        return SCTP_IERROR_ABORT;
                break;
 
        case SCTP_PARAM_SET_PRIMARY:
-               if (sctp_addip_enable)
+               if (net->sctp.addip_enable)
                        break;
                goto fallthrough;
 
@@ -2126,12 +2129,12 @@ static sctp_ierror_t sctp_verify_param(const struct sctp_association *asoc,
                break;
 
        case SCTP_PARAM_FWD_TSN_SUPPORT:
-               if (sctp_prsctp_enable)
+               if (net->sctp.prsctp_enable)
                        break;
                goto fallthrough;
 
        case SCTP_PARAM_RANDOM:
-               if (!sctp_auth_enable)
+               if (!net->sctp.auth_enable)
                        goto fallthrough;
 
                /* SCTP-AUTH: Secion 6.1
@@ -2148,7 +2151,7 @@ static sctp_ierror_t sctp_verify_param(const struct sctp_association *asoc,
                break;
 
        case SCTP_PARAM_CHUNKS:
-               if (!sctp_auth_enable)
+               if (!net->sctp.auth_enable)
                        goto fallthrough;
 
                /* SCTP-AUTH: Section 3.2
@@ -2164,7 +2167,7 @@ static sctp_ierror_t sctp_verify_param(const struct sctp_association *asoc,
                break;
 
        case SCTP_PARAM_HMAC_ALGO:
-               if (!sctp_auth_enable)
+               if (!net->sctp.auth_enable)
                        goto fallthrough;
 
                hmacs = (struct sctp_hmac_algo_param *)param.p;
@@ -2198,7 +2201,7 @@ fallthrough:
 }
 
 /* Verify the INIT packet before we process it.  */
-int sctp_verify_init(const struct sctp_association *asoc,
+int sctp_verify_init(struct net *net, const struct sctp_association *asoc,
                     sctp_cid_t cid,
                     sctp_init_chunk_t *peer_init,
                     struct sctp_chunk *chunk,
@@ -2245,7 +2248,7 @@ int sctp_verify_init(const struct sctp_association *asoc,
        /* Verify all the variable length parameters */
        sctp_walk_params(param, peer_init, init_hdr.params) {
 
-               result = sctp_verify_param(asoc, param, cid, chunk, errp);
+               result = sctp_verify_param(net, asoc, param, cid, chunk, errp);
                switch (result) {
                    case SCTP_IERROR_ABORT:
                    case SCTP_IERROR_NOMEM:
@@ -2270,6 +2273,7 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
                      const union sctp_addr *peer_addr,
                      sctp_init_chunk_t *peer_init, gfp_t gfp)
 {
+       struct net *net = sock_net(asoc->base.sk);
        union sctp_params param;
        struct sctp_transport *transport;
        struct list_head *pos, *temp;
@@ -2326,7 +2330,7 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
         * also give us an option to silently ignore the packet, which
         * is what we'll do here.
         */
-       if (!sctp_addip_noauth &&
+       if (!net->sctp.addip_noauth &&
             (asoc->peer.asconf_capable && !asoc->peer.auth_capable)) {
                asoc->peer.addip_disabled_mask |= (SCTP_PARAM_ADD_IP |
                                                  SCTP_PARAM_DEL_IP |
@@ -2466,6 +2470,7 @@ static int sctp_process_param(struct sctp_association *asoc,
                              const union sctp_addr *peer_addr,
                              gfp_t gfp)
 {
+       struct net *net = sock_net(asoc->base.sk);
        union sctp_addr addr;
        int i;
        __u16 sat;
@@ -2494,13 +2499,13 @@ do_addr_param:
                af = sctp_get_af_specific(param_type2af(param.p->type));
                af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0);
                scope = sctp_scope(peer_addr);
-               if (sctp_in_scope(&addr, scope))
+               if (sctp_in_scope(net, &addr, scope))
                        if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED))
                                return 0;
                break;
 
        case SCTP_PARAM_COOKIE_PRESERVATIVE:
-               if (!sctp_cookie_preserve_enable)
+               if (!net->sctp.cookie_preserve_enable)
                        break;
 
                stale = ntohl(param.life->lifespan_increment);
@@ -2580,7 +2585,7 @@ do_addr_param:
                break;
 
        case SCTP_PARAM_SET_PRIMARY:
-               if (!sctp_addip_enable)
+               if (!net->sctp.addip_enable)
                        goto fall_through;
 
                addr_param = param.v + sizeof(sctp_addip_param_t);
@@ -2607,7 +2612,7 @@ do_addr_param:
                break;
 
        case SCTP_PARAM_FWD_TSN_SUPPORT:
-               if (sctp_prsctp_enable) {
+               if (net->sctp.prsctp_enable) {
                        asoc->peer.prsctp_capable = 1;
                        break;
                }
@@ -2615,7 +2620,7 @@ do_addr_param:
                goto fall_through;
 
        case SCTP_PARAM_RANDOM:
-               if (!sctp_auth_enable)
+               if (!net->sctp.auth_enable)
                        goto fall_through;
 
                /* Save peer's random parameter */
@@ -2628,7 +2633,7 @@ do_addr_param:
                break;
 
        case SCTP_PARAM_HMAC_ALGO:
-               if (!sctp_auth_enable)
+               if (!net->sctp.auth_enable)
                        goto fall_through;
 
                /* Save peer's HMAC list */
@@ -2644,7 +2649,7 @@ do_addr_param:
                break;
 
        case SCTP_PARAM_CHUNKS:
-               if (!sctp_auth_enable)
+               if (!net->sctp.auth_enable)
                        goto fall_through;
 
                asoc->peer.peer_chunks = kmemdup(param.p,
index fe99628e1257bd1173dadfa1826a0f2d5f35c7f6..bcfebb91559d1d9e9a0b6472986f626021a3f358 100644 (file)
@@ -251,6 +251,7 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
        int error;
        struct sctp_transport *transport = (struct sctp_transport *) peer;
        struct sctp_association *asoc = transport->asoc;
+       struct net *net = sock_net(asoc->base.sk);
 
        /* Check whether a task is in the sock.  */
 
@@ -271,7 +272,7 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
                goto out_unlock;
 
        /* Run through the state machine.  */
-       error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT,
+       error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
                           SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX),
                           asoc->state,
                           asoc->ep, asoc,
@@ -291,6 +292,7 @@ out_unlock:
 static void sctp_generate_timeout_event(struct sctp_association *asoc,
                                        sctp_event_timeout_t timeout_type)
 {
+       struct net *net = sock_net(asoc->base.sk);
        int error = 0;
 
        sctp_bh_lock_sock(asoc->base.sk);
@@ -312,7 +314,7 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc,
                goto out_unlock;
 
        /* Run through the state machine.  */
-       error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT,
+       error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
                           SCTP_ST_TIMEOUT(timeout_type),
                           asoc->state, asoc->ep, asoc,
                           (void *)timeout_type, GFP_ATOMIC);
@@ -371,6 +373,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
        int error = 0;
        struct sctp_transport *transport = (struct sctp_transport *) data;
        struct sctp_association *asoc = transport->asoc;
+       struct net *net = sock_net(asoc->base.sk);
 
        sctp_bh_lock_sock(asoc->base.sk);
        if (sock_owned_by_user(asoc->base.sk)) {
@@ -388,7 +391,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
        if (transport->dead)
                goto out_unlock;
 
-       error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT,
+       error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
                           SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT),
                           asoc->state, asoc->ep, asoc,
                           transport, GFP_ATOMIC);
@@ -408,6 +411,7 @@ void sctp_generate_proto_unreach_event(unsigned long data)
 {
        struct sctp_transport *transport = (struct sctp_transport *) data;
        struct sctp_association *asoc = transport->asoc;
+       struct net *net = sock_net(asoc->base.sk);
        
        sctp_bh_lock_sock(asoc->base.sk);
        if (sock_owned_by_user(asoc->base.sk)) {
@@ -426,7 +430,7 @@ void sctp_generate_proto_unreach_event(unsigned long data)
        if (asoc->base.dead)
                goto out_unlock;
 
-       sctp_do_sm(SCTP_EVENT_T_OTHER,
+       sctp_do_sm(net, SCTP_EVENT_T_OTHER,
                   SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
                   asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
 
@@ -753,8 +757,10 @@ static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds,
        int err = 0;
 
        if (sctp_outq_sack(&asoc->outqueue, sackh)) {
+               struct net *net = sock_net(asoc->base.sk);
+
                /* There are no more TSNs awaiting SACK.  */
-               err = sctp_do_sm(SCTP_EVENT_T_OTHER,
+               err = sctp_do_sm(net, SCTP_EVENT_T_OTHER,
                                 SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN),
                                 asoc->state, asoc->ep, asoc, NULL,
                                 GFP_ATOMIC);
@@ -1042,6 +1048,8 @@ static int sctp_cmd_send_msg(struct sctp_association *asoc,
  */
 static void sctp_cmd_send_asconf(struct sctp_association *asoc)
 {
+       struct net *net = sock_net(asoc->base.sk);
+
        /* Send the next asconf chunk from the addip chunk
         * queue.
         */
@@ -1053,7 +1061,7 @@ static void sctp_cmd_send_asconf(struct sctp_association *asoc)
 
                /* Hold the chunk until an ASCONF_ACK is received. */
                sctp_chunk_hold(asconf);
-               if (sctp_primitive_ASCONF(asoc, asconf))
+               if (sctp_primitive_ASCONF(net, asoc, asconf))
                        sctp_chunk_free(asconf);
                else
                        asoc->addip_last_asconf = asconf;
@@ -1089,7 +1097,7 @@ static void sctp_cmd_send_asconf(struct sctp_association *asoc)
  * If you want to understand all of lksctp, this is a
  * good place to start.
  */
-int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype,
+int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype,
               sctp_state_t state,
               struct sctp_endpoint *ep,
               struct sctp_association *asoc,
@@ -1110,12 +1118,12 @@ int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype,
        /* Look up the state function, run it, and then process the
         * side effects.  These three steps are the heart of lksctp.
         */
-       state_fn = sctp_sm_lookup_event(event_type, state, subtype);
+       state_fn = sctp_sm_lookup_event(net, event_type, state, subtype);
 
        sctp_init_cmd_seq(&commands);
 
        DEBUG_PRE;
-       status = (*state_fn->fn)(ep, asoc, subtype, event_arg, &commands);
+       status = (*state_fn->fn)(net, ep, asoc, subtype, event_arg, &commands);
        DEBUG_POST;
 
        error = sctp_side_effects(event_type, subtype, state,
index 9fca103573508aa6f0880455e0e2c2d66dbb6150..094813b6c3c3cb99dddf407eeb93a2397a676cae 100644 (file)
@@ -66,7 +66,8 @@
 #include <net/sctp/sm.h>
 #include <net/sctp/structs.h>
 
-static struct sctp_packet *sctp_abort_pkt_new(const struct sctp_endpoint *ep,
+static struct sctp_packet *sctp_abort_pkt_new(struct net *net,
+                                 const struct sctp_endpoint *ep,
                                  const struct sctp_association *asoc,
                                  struct sctp_chunk *chunk,
                                  const void *payload,
@@ -74,36 +75,43 @@ static struct sctp_packet *sctp_abort_pkt_new(const struct sctp_endpoint *ep,
 static int sctp_eat_data(const struct sctp_association *asoc,
                         struct sctp_chunk *chunk,
                         sctp_cmd_seq_t *commands);
-static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc,
+static struct sctp_packet *sctp_ootb_pkt_new(struct net *net,
+                                            const struct sctp_association *asoc,
                                             const struct sctp_chunk *chunk);
-static void sctp_send_stale_cookie_err(const struct sctp_endpoint *ep,
+static void sctp_send_stale_cookie_err(struct net *net,
+                                      const struct sctp_endpoint *ep,
                                       const struct sctp_association *asoc,
                                       const struct sctp_chunk *chunk,
                                       sctp_cmd_seq_t *commands,
                                       struct sctp_chunk *err_chunk);
-static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_do_5_2_6_stale(struct net *net,
+                                                const struct sctp_endpoint *ep,
                                                 const struct sctp_association *asoc,
                                                 const sctp_subtype_t type,
                                                 void *arg,
                                                 sctp_cmd_seq_t *commands);
-static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_shut_8_4_5(struct net *net,
+                                            const struct sctp_endpoint *ep,
                                             const struct sctp_association *asoc,
                                             const sctp_subtype_t type,
                                             void *arg,
                                             sctp_cmd_seq_t *commands);
-static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_tabort_8_4_8(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
                                        sctp_cmd_seq_t *commands);
 static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk);
 
-static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
+static sctp_disposition_t sctp_stop_t1_and_abort(struct net *net,
+                                          sctp_cmd_seq_t *commands,
                                           __be16 error, int sk_err,
                                           const struct sctp_association *asoc,
                                           struct sctp_transport *transport);
 
 static sctp_disposition_t sctp_sf_abort_violation(
+                                    struct net *net,
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     void *arg,
@@ -112,6 +120,7 @@ static sctp_disposition_t sctp_sf_abort_violation(
                                     const size_t paylen);
 
 static sctp_disposition_t sctp_sf_violation_chunklen(
+                                    struct net *net,
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
@@ -119,6 +128,7 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
                                     sctp_cmd_seq_t *commands);
 
 static sctp_disposition_t sctp_sf_violation_paramlen(
+                                    struct net *net,
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
@@ -126,6 +136,7 @@ static sctp_disposition_t sctp_sf_violation_paramlen(
                                     sctp_cmd_seq_t *commands);
 
 static sctp_disposition_t sctp_sf_violation_ctsn(
+                                    struct net *net,
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
@@ -133,18 +144,21 @@ static sctp_disposition_t sctp_sf_violation_ctsn(
                                     sctp_cmd_seq_t *commands);
 
 static sctp_disposition_t sctp_sf_violation_chunk(
+                                    struct net *net,
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
                                     void *arg,
                                     sctp_cmd_seq_t *commands);
 
-static sctp_ierror_t sctp_sf_authenticate(const struct sctp_endpoint *ep,
+static sctp_ierror_t sctp_sf_authenticate(struct net *net,
+                                   const struct sctp_endpoint *ep,
                                    const struct sctp_association *asoc,
                                    const sctp_subtype_t type,
                                    struct sctp_chunk *chunk);
 
-static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
+static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -204,7 +218,8 @@ sctp_chunk_length_valid(struct sctp_chunk *chunk,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_4_C(struct net *net,
+                                 const struct sctp_endpoint *ep,
                                  const struct sctp_association *asoc,
                                  const sctp_subtype_t type,
                                  void *arg,
@@ -214,7 +229,7 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
        struct sctp_ulpevent *ev;
 
        if (!sctp_vtag_verify_either(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* RFC 2960 6.10 Bundling
         *
@@ -222,11 +237,11 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
         * SHUTDOWN COMPLETE with any other chunks.
         */
        if (!chunk->singleton)
-               return sctp_sf_violation_chunk(ep, asoc, type, arg, commands);
+               return sctp_sf_violation_chunk(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the SHUTDOWN_COMPLETE chunk has a valid length. */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        /* RFC 2960 10.2 SCTP-to-ULP
@@ -259,8 +274,8 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
                        SCTP_STATE(SCTP_STATE_CLOSED));
 
-       SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS);
-       SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+       SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS);
+       SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
 
        sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
 
@@ -289,7 +304,8 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_1B_init(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -313,21 +329,21 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
         * with an INIT chunk that is bundled with other chunks.
         */
        if (!chunk->singleton)
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* If the packet is an OOTB packet which is temporarily on the
         * control endpoint, respond with an ABORT.
         */
-       if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) {
-               SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES);
-               return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+       if (ep == sctp_sk(net->sctp.ctl_sock)->ep) {
+               SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
+               return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
        }
 
        /* 3.1 A packet containing an INIT chunk MUST have a zero Verification
         * Tag.
         */
        if (chunk->sctp_hdr->vtag != 0)
-               return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+               return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the INIT chunk has a valid length.
         * Normally, this would cause an ABORT with a Protocol Violation
@@ -335,7 +351,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
         * just discard the packet.
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t)))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* If the INIT is coming toward a closing socket, we'll send back
         * and ABORT.  Essentially, this catches the race of INIT being
@@ -344,18 +360,18 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
         * can treat this OOTB
         */
        if (sctp_sstate(ep->base.sk, CLOSING))
-               return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+               return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
 
        /* Verify the INIT chunk before processing it. */
        err_chunk = NULL;
-       if (!sctp_verify_init(asoc, chunk->chunk_hdr->type,
+       if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type,
                              (sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
                              &err_chunk)) {
                /* This chunk contains fatal error. It is to be discarded.
                 * Send an ABORT, with causes if there is any.
                 */
                if (err_chunk) {
-                       packet = sctp_abort_pkt_new(ep, asoc, arg,
+                       packet = sctp_abort_pkt_new(net, ep, asoc, arg,
                                        (__u8 *)(err_chunk->chunk_hdr) +
                                        sizeof(sctp_chunkhdr_t),
                                        ntohs(err_chunk->chunk_hdr->length) -
@@ -366,13 +382,13 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
                        if (packet) {
                                sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
                                                SCTP_PACKET(packet));
-                               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+                               SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
                                return SCTP_DISPOSITION_CONSUME;
                        } else {
                                return SCTP_DISPOSITION_NOMEM;
                        }
                } else {
-                       return sctp_sf_tabort_8_4_8(ep, asoc, type, arg,
+                       return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg,
                                                    commands);
                }
        }
@@ -484,7 +500,8 @@ nomem:
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_1C_ack(struct net *net,
+                                      const struct sctp_endpoint *ep,
                                       const struct sctp_association *asoc,
                                       const sctp_subtype_t type,
                                       void *arg,
@@ -496,25 +513,25 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
        struct sctp_packet *packet;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* 6.10 Bundling
         * An endpoint MUST NOT bundle INIT, INIT ACK or
         * SHUTDOWN COMPLETE with any other chunks.
         */
        if (!chunk->singleton)
-               return sctp_sf_violation_chunk(ep, asoc, type, arg, commands);
+               return sctp_sf_violation_chunk(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the INIT-ACK chunk has a valid length */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
        /* Grab the INIT header.  */
        chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data;
 
        /* Verify the INIT chunk before processing it. */
        err_chunk = NULL;
-       if (!sctp_verify_init(asoc, chunk->chunk_hdr->type,
+       if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type,
                              (sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
                              &err_chunk)) {
 
@@ -526,7 +543,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
                 * the association.
                 */
                if (err_chunk) {
-                       packet = sctp_abort_pkt_new(ep, asoc, arg,
+                       packet = sctp_abort_pkt_new(net, ep, asoc, arg,
                                        (__u8 *)(err_chunk->chunk_hdr) +
                                        sizeof(sctp_chunkhdr_t),
                                        ntohs(err_chunk->chunk_hdr->length) -
@@ -537,7 +554,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
                        if (packet) {
                                sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
                                                SCTP_PACKET(packet));
-                               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+                               SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
                                error = SCTP_ERROR_INV_PARAM;
                        }
                }
@@ -554,10 +571,10 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
                 * was malformed.
                 */
                if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
-                       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
-               SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-               return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED,
+               SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+               return sctp_stop_t1_and_abort(net, commands, error, ECONNREFUSED,
                                                asoc, chunk->transport);
        }
 
@@ -633,7 +650,8 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
+                                     const struct sctp_endpoint *ep,
                                      const struct sctp_association *asoc,
                                      const sctp_subtype_t type, void *arg,
                                      sctp_cmd_seq_t *commands)
@@ -650,9 +668,9 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
        /* If the packet is an OOTB packet which is temporarily on the
         * control endpoint, respond with an ABORT.
         */
-       if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) {
-               SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES);
-               return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+       if (ep == sctp_sk(net->sctp.ctl_sock)->ep) {
+               SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
+               return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
        }
 
        /* Make sure that the COOKIE_ECHO chunk has a valid length.
@@ -661,7 +679,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
         * in sctp_unpack_cookie().
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* If the endpoint is not listening or if the number of associations
         * on the TCP-style socket exceed the max backlog, respond with an
@@ -670,7 +688,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
        sk = ep->base.sk;
        if (!sctp_sstate(sk, LISTENING) ||
            (sctp_style(sk, TCP) && sk_acceptq_is_full(sk)))
-               return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+               return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
 
        /* "Decode" the chunk.  We have no optional parameters so we
         * are in good shape.
@@ -703,13 +721,13 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
                        goto nomem;
 
                case -SCTP_IERROR_STALE_COOKIE:
-                       sctp_send_stale_cookie_err(ep, asoc, chunk, commands,
+                       sctp_send_stale_cookie_err(net, ep, asoc, chunk, commands,
                                                   err_chk_p);
-                       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
                case -SCTP_IERROR_BAD_SIG:
                default:
-                       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
                }
        }
 
@@ -756,14 +774,14 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
                skb_pull(chunk->auth_chunk, sizeof(sctp_chunkhdr_t));
                auth.transport = chunk->transport;
 
-               ret = sctp_sf_authenticate(ep, new_asoc, type, &auth);
+               ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth);
 
                /* We can now safely free the auth_chunk clone */
                kfree_skb(chunk->auth_chunk);
 
                if (ret != SCTP_IERROR_NO_ERROR) {
                        sctp_association_free(new_asoc);
-                       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
                }
        }
 
@@ -804,8 +822,8 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
                        SCTP_STATE(SCTP_STATE_ESTABLISHED));
-       SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
-       SCTP_INC_STATS(SCTP_MIB_PASSIVEESTABS);
+       SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
+       SCTP_INC_STATS(net, SCTP_MIB_PASSIVEESTABS);
        sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
 
        if (new_asoc->autoclose)
@@ -856,7 +874,8 @@ nomem:
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_5_1E_ca(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_1E_ca(struct net *net,
+                                     const struct sctp_endpoint *ep,
                                      const struct sctp_association *asoc,
                                      const sctp_subtype_t type, void *arg,
                                      sctp_cmd_seq_t *commands)
@@ -865,13 +884,13 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const struct sctp_endpoint *ep,
        struct sctp_ulpevent *ev;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Verify that the chunk length for the COOKIE-ACK is OK.
         * If we don't do this, any bundled chunks may be junked.
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        /* Reset init error count upon receipt of COOKIE-ACK,
@@ -892,8 +911,8 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const struct sctp_endpoint *ep,
                        SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
                        SCTP_STATE(SCTP_STATE_ESTABLISHED));
-       SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
-       SCTP_INC_STATS(SCTP_MIB_ACTIVEESTABS);
+       SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
+       SCTP_INC_STATS(net, SCTP_MIB_ACTIVEESTABS);
        sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
        if (asoc->autoclose)
                sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
@@ -958,7 +977,8 @@ static sctp_disposition_t sctp_sf_heartbeat(const struct sctp_endpoint *ep,
 }
 
 /* Generate a HEARTBEAT packet on the given transport.  */
-sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_sendbeat_8_3(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -972,8 +992,8 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
                /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
                sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                SCTP_PERR(SCTP_ERROR_NO_ERROR));
-               SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-               SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+               SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+               SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
                return SCTP_DISPOSITION_DELETE_TCB;
        }
 
@@ -1028,7 +1048,8 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_beat_8_3(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_beat_8_3(struct net *net,
+                                   const struct sctp_endpoint *ep,
                                    const struct sctp_association *asoc,
                                    const sctp_subtype_t type,
                                    void *arg,
@@ -1039,11 +1060,11 @@ sctp_disposition_t sctp_sf_beat_8_3(const struct sctp_endpoint *ep,
        size_t paylen = 0;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the HEARTBEAT chunk has a valid length. */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_heartbeat_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        /* 8.3 The receiver of the HEARTBEAT should immediately
@@ -1095,7 +1116,8 @@ nomem:
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_backbeat_8_3(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -1108,12 +1130,12 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
        unsigned long max_interval;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the HEARTBEAT-ACK chunk has a valid length.  */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t) +
                                            sizeof(sctp_sender_hb_info_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data;
@@ -1171,7 +1193,7 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
 /* Helper function to send out an abort for the restart
  * condition.
  */
-static int sctp_sf_send_restart_abort(union sctp_addr *ssa,
+static int sctp_sf_send_restart_abort(struct net *net, union sctp_addr *ssa,
                                      struct sctp_chunk *init,
                                      sctp_cmd_seq_t *commands)
 {
@@ -1197,18 +1219,18 @@ static int sctp_sf_send_restart_abort(union sctp_addr *ssa,
        errhdr->length = htons(len);
 
        /* Assign to the control socket. */
-       ep = sctp_sk((sctp_get_ctl_sock()))->ep;
+       ep = sctp_sk(net->sctp.ctl_sock)->ep;
 
        /* Association is NULL since this may be a restart attack and we
         * want to send back the attacker's vtag.
         */
-       pkt = sctp_abort_pkt_new(ep, NULL, init, errhdr, len);
+       pkt = sctp_abort_pkt_new(net, ep, NULL, init, errhdr, len);
 
        if (!pkt)
                goto out;
        sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(pkt));
 
-       SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+       SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
 
        /* Discard the rest of the inbound packet. */
        sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
@@ -1240,6 +1262,7 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc,
                                       struct sctp_chunk *init,
                                       sctp_cmd_seq_t *commands)
 {
+       struct net *net = sock_net(new_asoc->base.sk);
        struct sctp_transport *new_addr;
        int ret = 1;
 
@@ -1258,7 +1281,7 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc,
                            transports) {
                if (!list_has_sctp_addr(&asoc->peer.transport_addr_list,
                                        &new_addr->ipaddr)) {
-                       sctp_sf_send_restart_abort(&new_addr->ipaddr, init,
+                       sctp_sf_send_restart_abort(net, &new_addr->ipaddr, init,
                                                   commands);
                        ret = 0;
                        break;
@@ -1358,6 +1381,7 @@ static char sctp_tietags_compare(struct sctp_association *new_asoc,
  * chunk handling.
  */
 static sctp_disposition_t sctp_sf_do_unexpected_init(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -1382,20 +1406,20 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
         * with an INIT chunk that is bundled with other chunks.
         */
        if (!chunk->singleton)
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* 3.1 A packet containing an INIT chunk MUST have a zero Verification
         * Tag.
         */
        if (chunk->sctp_hdr->vtag != 0)
-               return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+               return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the INIT chunk has a valid length.
         * In this case, we generate a protocol violation since we have
         * an association established.
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
        /* Grab the INIT header.  */
        chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data;
@@ -1405,14 +1429,14 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
 
        /* Verify the INIT chunk before processing it. */
        err_chunk = NULL;
-       if (!sctp_verify_init(asoc, chunk->chunk_hdr->type,
+       if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type,
                              (sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
                              &err_chunk)) {
                /* This chunk contains fatal error. It is to be discarded.
                 * Send an ABORT, with causes if there is any.
                 */
                if (err_chunk) {
-                       packet = sctp_abort_pkt_new(ep, asoc, arg,
+                       packet = sctp_abort_pkt_new(net, ep, asoc, arg,
                                        (__u8 *)(err_chunk->chunk_hdr) +
                                        sizeof(sctp_chunkhdr_t),
                                        ntohs(err_chunk->chunk_hdr->length) -
@@ -1421,14 +1445,14 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
                        if (packet) {
                                sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
                                                SCTP_PACKET(packet));
-                               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+                               SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
                                retval = SCTP_DISPOSITION_CONSUME;
                        } else {
                                retval = SCTP_DISPOSITION_NOMEM;
                        }
                        goto cleanup;
                } else {
-                       return sctp_sf_tabort_8_4_8(ep, asoc, type, arg,
+                       return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg,
                                                    commands);
                }
        }
@@ -1570,7 +1594,8 @@ cleanup:
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_5_2_1_siminit(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_2_1_siminit(struct net *net,
+                                   const struct sctp_endpoint *ep,
                                    const struct sctp_association *asoc,
                                    const sctp_subtype_t type,
                                    void *arg,
@@ -1579,7 +1604,7 @@ sctp_disposition_t sctp_sf_do_5_2_1_siminit(const struct sctp_endpoint *ep,
        /* Call helper to do the real work for both simulataneous and
         * duplicate INIT chunk handling.
         */
-       return sctp_sf_do_unexpected_init(ep, asoc, type, arg, commands);
+       return sctp_sf_do_unexpected_init(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -1623,7 +1648,8 @@ sctp_disposition_t sctp_sf_do_5_2_1_siminit(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_5_2_2_dupinit(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_2_2_dupinit(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -1632,7 +1658,7 @@ sctp_disposition_t sctp_sf_do_5_2_2_dupinit(const struct sctp_endpoint *ep,
        /* Call helper to do the real work for both simulataneous and
         * duplicate INIT chunk handling.
         */
-       return sctp_sf_do_unexpected_init(ep, asoc, type, arg, commands);
+       return sctp_sf_do_unexpected_init(net, ep, asoc, type, arg, commands);
 }
 
 
@@ -1645,7 +1671,8 @@ sctp_disposition_t sctp_sf_do_5_2_2_dupinit(const struct sctp_endpoint *ep,
  * An unexpected INIT ACK usually indicates the processing of an old or
  * duplicated INIT chunk.
 */
-sctp_disposition_t sctp_sf_do_5_2_3_initack(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_2_3_initack(struct net *net,
+                                           const struct sctp_endpoint *ep,
                                            const struct sctp_association *asoc,
                                            const sctp_subtype_t type,
                                            void *arg, sctp_cmd_seq_t *commands)
@@ -1653,10 +1680,10 @@ sctp_disposition_t sctp_sf_do_5_2_3_initack(const struct sctp_endpoint *ep,
        /* Per the above section, we'll discard the chunk if we have an
         * endpoint.  If this is an OOTB INIT-ACK, treat it as such.
         */
-       if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
-               return sctp_sf_ootb(ep, asoc, type, arg, commands);
+       if (ep == sctp_sk(net->sctp.ctl_sock)->ep)
+               return sctp_sf_ootb(net, ep, asoc, type, arg, commands);
        else
-               return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
+               return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
 }
 
 /* Unexpected COOKIE-ECHO handler for peer restart (Table 2, action 'A')
@@ -1664,7 +1691,8 @@ sctp_disposition_t sctp_sf_do_5_2_3_initack(const struct sctp_endpoint *ep,
  * Section 5.2.4
  *  A)  In this case, the peer may have restarted.
  */
-static sctp_disposition_t sctp_sf_do_dupcook_a(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        struct sctp_chunk *chunk,
                                        sctp_cmd_seq_t *commands,
@@ -1700,7 +1728,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const struct sctp_endpoint *ep,
         * its peer.
        */
        if (sctp_state(asoc, SHUTDOWN_ACK_SENT)) {
-               disposition = sctp_sf_do_9_2_reshutack(ep, asoc,
+               disposition = sctp_sf_do_9_2_reshutack(net, ep, asoc,
                                SCTP_ST_CHUNK(chunk->chunk_hdr->type),
                                chunk, commands);
                if (SCTP_DISPOSITION_NOMEM == disposition)
@@ -1763,7 +1791,8 @@ nomem:
  *      after responding to the local endpoint's INIT
  */
 /* This case represents an initialization collision.  */
-static sctp_disposition_t sctp_sf_do_dupcook_b(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_do_dupcook_b(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        struct sctp_chunk *chunk,
                                        sctp_cmd_seq_t *commands,
@@ -1784,7 +1813,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(const struct sctp_endpoint *ep,
        sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
                        SCTP_STATE(SCTP_STATE_ESTABLISHED));
-       SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
+       SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
        sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
 
        repl = sctp_make_cookie_ack(new_asoc, chunk);
@@ -1833,7 +1862,8 @@ nomem:
  *     but a new tag of its own.
  */
 /* This case represents an initialization collision.  */
-static sctp_disposition_t sctp_sf_do_dupcook_c(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_do_dupcook_c(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        struct sctp_chunk *chunk,
                                        sctp_cmd_seq_t *commands,
@@ -1854,7 +1884,8 @@ static sctp_disposition_t sctp_sf_do_dupcook_c(const struct sctp_endpoint *ep,
  *    enter the ESTABLISHED state, if it has not already done so.
  */
 /* This case represents an initialization collision.  */
-static sctp_disposition_t sctp_sf_do_dupcook_d(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_do_dupcook_d(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        struct sctp_chunk *chunk,
                                        sctp_cmd_seq_t *commands,
@@ -1876,7 +1907,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const struct sctp_endpoint *ep,
                                SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
                sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
                                SCTP_STATE(SCTP_STATE_ESTABLISHED));
-               SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
+               SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
                sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START,
                                SCTP_NULL());
 
@@ -1948,7 +1979,8 @@ nomem:
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -1967,7 +1999,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
         * done later.
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        /* "Decode" the chunk.  We have no optional parameters so we
@@ -2001,12 +2033,12 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
                        goto nomem;
 
                case -SCTP_IERROR_STALE_COOKIE:
-                       sctp_send_stale_cookie_err(ep, asoc, chunk, commands,
+                       sctp_send_stale_cookie_err(net, ep, asoc, chunk, commands,
                                                   err_chk_p);
-                       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
                case -SCTP_IERROR_BAD_SIG:
                default:
-                       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
                }
        }
 
@@ -2017,27 +2049,27 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
 
        switch (action) {
        case 'A': /* Association restart. */
-               retval = sctp_sf_do_dupcook_a(ep, asoc, chunk, commands,
+               retval = sctp_sf_do_dupcook_a(net, ep, asoc, chunk, commands,
                                              new_asoc);
                break;
 
        case 'B': /* Collision case B. */
-               retval = sctp_sf_do_dupcook_b(ep, asoc, chunk, commands,
+               retval = sctp_sf_do_dupcook_b(net, ep, asoc, chunk, commands,
                                              new_asoc);
                break;
 
        case 'C': /* Collision case C. */
-               retval = sctp_sf_do_dupcook_c(ep, asoc, chunk, commands,
+               retval = sctp_sf_do_dupcook_c(net, ep, asoc, chunk, commands,
                                              new_asoc);
                break;
 
        case 'D': /* Collision case D. */
-               retval = sctp_sf_do_dupcook_d(ep, asoc, chunk, commands,
+               retval = sctp_sf_do_dupcook_d(net, ep, asoc, chunk, commands,
                                              new_asoc);
                break;
 
        default: /* Discard packet for all others. */
-               retval = sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               retval = sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
                break;
        }
 
@@ -2063,6 +2095,7 @@ nomem:
  * See sctp_sf_do_9_1_abort().
  */
 sctp_disposition_t sctp_sf_shutdown_pending_abort(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -2072,7 +2105,7 @@ sctp_disposition_t sctp_sf_shutdown_pending_abort(
        struct sctp_chunk *chunk = arg;
 
        if (!sctp_vtag_verify_either(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the ABORT chunk has a valid length.
         * Since this is an ABORT chunk, we have to discard it
@@ -2085,7 +2118,7 @@ sctp_disposition_t sctp_sf_shutdown_pending_abort(
         * packet.
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* ADD-IP: Special case for ABORT chunks
         * F4)  One special consideration is that ABORT Chunks arriving
@@ -2094,9 +2127,9 @@ sctp_disposition_t sctp_sf_shutdown_pending_abort(
         */
        if (SCTP_ADDR_DEL ==
                    sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
-               return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
+               return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
 
-       return __sctp_sf_do_9_1_abort(ep, asoc, type, arg, commands);
+       return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -2104,7 +2137,8 @@ sctp_disposition_t sctp_sf_shutdown_pending_abort(
  *
  * See sctp_sf_do_9_1_abort().
  */
-sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_shutdown_sent_abort(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -2113,7 +2147,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep,
        struct sctp_chunk *chunk = arg;
 
        if (!sctp_vtag_verify_either(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the ABORT chunk has a valid length.
         * Since this is an ABORT chunk, we have to discard it
@@ -2126,7 +2160,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep,
         * packet.
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* ADD-IP: Special case for ABORT chunks
         * F4)  One special consideration is that ABORT Chunks arriving
@@ -2135,7 +2169,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep,
         */
        if (SCTP_ADDR_DEL ==
                    sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
-               return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
+               return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
 
        /* Stop the T2-shutdown timer. */
        sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
@@ -2145,7 +2179,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep,
        sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
                        SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
 
-       return __sctp_sf_do_9_1_abort(ep, asoc, type, arg, commands);
+       return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -2154,6 +2188,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep,
  * See sctp_sf_do_9_1_abort().
  */
 sctp_disposition_t sctp_sf_shutdown_ack_sent_abort(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -2163,7 +2198,7 @@ sctp_disposition_t sctp_sf_shutdown_ack_sent_abort(
        /* The same T2 timer, so we should be able to use
         * common function with the SHUTDOWN-SENT state.
         */
-       return sctp_sf_shutdown_sent_abort(ep, asoc, type, arg, commands);
+       return sctp_sf_shutdown_sent_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -2180,7 +2215,8 @@ sctp_disposition_t sctp_sf_shutdown_ack_sent_abort(
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_cookie_echoed_err(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_cookie_echoed_err(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -2190,13 +2226,13 @@ sctp_disposition_t sctp_sf_cookie_echoed_err(const struct sctp_endpoint *ep,
        sctp_errhdr_t *err;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the ERROR chunk has a valid length.
         * The parameter walking depends on this as well.
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        /* Process the error here */
@@ -2206,7 +2242,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_err(const struct sctp_endpoint *ep,
         */
        sctp_walk_errors(err, chunk->chunk_hdr) {
                if (SCTP_ERROR_STALE_COOKIE == err->cause)
-                       return sctp_sf_do_5_2_6_stale(ep, asoc, type,
+                       return sctp_sf_do_5_2_6_stale(net, ep, asoc, type,
                                                        arg, commands);
        }
 
@@ -2215,7 +2251,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_err(const struct sctp_endpoint *ep,
         * we are discarding the packet, there should be no adverse
         * affects.
         */
-       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -2243,7 +2279,8 @@ sctp_disposition_t sctp_sf_cookie_echoed_err(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_do_5_2_6_stale(struct net *net,
+                                                const struct sctp_endpoint *ep,
                                                 const struct sctp_association *asoc,
                                                 const sctp_subtype_t type,
                                                 void *arg,
@@ -2365,7 +2402,8 @@ nomem:
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_9_1_abort(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -2374,7 +2412,7 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
        struct sctp_chunk *chunk = arg;
 
        if (!sctp_vtag_verify_either(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the ABORT chunk has a valid length.
         * Since this is an ABORT chunk, we have to discard it
@@ -2387,7 +2425,7 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
         * packet.
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* ADD-IP: Special case for ABORT chunks
         * F4)  One special consideration is that ABORT Chunks arriving
@@ -2396,12 +2434,13 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
         */
        if (SCTP_ADDR_DEL ==
                    sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
-               return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
+               return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
 
-       return __sctp_sf_do_9_1_abort(ep, asoc, type, arg, commands);
+       return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
 }
 
-static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
+static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -2418,7 +2457,7 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
                sctp_errhdr_t *err;
                sctp_walk_errors(err, chunk->chunk_hdr);
                if ((void *)err != (void *)chunk->chunk_end)
-                       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
                error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
        }
@@ -2426,8 +2465,8 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
        sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
        /* ASSOC_FAILED will DELETE_TCB. */
        sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(error));
-       SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-       SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+       SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+       SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
 
        return SCTP_DISPOSITION_ABORT;
 }
@@ -2437,7 +2476,8 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
  *
  * See sctp_sf_do_9_1_abort() above.
  */
-sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_cookie_wait_abort(struct net *net,
+                                    const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
                                     void *arg,
@@ -2448,7 +2488,7 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep,
        __be16 error = SCTP_ERROR_NO_ERROR;
 
        if (!sctp_vtag_verify_either(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the ABORT chunk has a valid length.
         * Since this is an ABORT chunk, we have to discard it
@@ -2461,27 +2501,28 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep,
         * packet.
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* See if we have an error cause code in the chunk.  */
        len = ntohs(chunk->chunk_hdr->length);
        if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
                error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
 
-       return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED, asoc,
+       return sctp_stop_t1_and_abort(net, commands, error, ECONNREFUSED, asoc,
                                      chunk->transport);
 }
 
 /*
  * Process an incoming ICMP as an ABORT.  (COOKIE-WAIT state)
  */
-sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
                                        sctp_cmd_seq_t *commands)
 {
-       return sctp_stop_t1_and_abort(commands, SCTP_ERROR_NO_ERROR,
+       return sctp_stop_t1_and_abort(net, commands, SCTP_ERROR_NO_ERROR,
                                      ENOPROTOOPT, asoc,
                                      (struct sctp_transport *)arg);
 }
@@ -2489,7 +2530,8 @@ sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(const struct sctp_endpoint *ep
 /*
  * Process an ABORT.  (COOKIE-ECHOED state)
  */
-sctp_disposition_t sctp_sf_cookie_echoed_abort(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_cookie_echoed_abort(struct net *net,
+                                              const struct sctp_endpoint *ep,
                                               const struct sctp_association *asoc,
                                               const sctp_subtype_t type,
                                               void *arg,
@@ -2498,7 +2540,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_abort(const struct sctp_endpoint *ep,
        /* There is a single T1 timer, so we should be able to use
         * common function with the COOKIE-WAIT state.
         */
-       return sctp_sf_cookie_wait_abort(ep, asoc, type, arg, commands);
+       return sctp_sf_cookie_wait_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -2506,7 +2548,8 @@ sctp_disposition_t sctp_sf_cookie_echoed_abort(const struct sctp_endpoint *ep,
  *
  * This is common code called by several sctp_sf_*_abort() functions above.
  */
-static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
+static sctp_disposition_t sctp_stop_t1_and_abort(struct net *net,
+                                          sctp_cmd_seq_t *commands,
                                           __be16 error, int sk_err,
                                           const struct sctp_association *asoc,
                                           struct sctp_transport *transport)
@@ -2514,7 +2557,7 @@ static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
        SCTP_DEBUG_PRINTK("ABORT received (INIT).\n");
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
                        SCTP_STATE(SCTP_STATE_CLOSED));
-       SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+       SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
        sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
                        SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
        sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(sk_err));
@@ -2557,7 +2600,8 @@ static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_9_2_shutdown(struct net *net,
+                                          const struct sctp_endpoint *ep,
                                           const struct sctp_association *asoc,
                                           const sctp_subtype_t type,
                                           void *arg,
@@ -2570,12 +2614,12 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep,
        __u32 ctsn;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the SHUTDOWN chunk has a valid length. */
        if (!sctp_chunk_length_valid(chunk,
                                      sizeof(struct sctp_shutdown_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        /* Convert the elaborate header.  */
@@ -2595,7 +2639,7 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep,
         * sender with an ABORT.
         */
        if (!TSN_lt(ctsn, asoc->next_tsn))
-               return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands);
+               return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands);
 
        /* API 5.3.1.5 SCTP_SHUTDOWN_EVENT
         * When a peer sends a SHUTDOWN, SCTP delivers this notification to
@@ -2619,7 +2663,7 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep,
        disposition = SCTP_DISPOSITION_CONSUME;
 
        if (sctp_outq_is_empty(&asoc->outqueue)) {
-               disposition = sctp_sf_do_9_2_shutdown_ack(ep, asoc, type,
+               disposition = sctp_sf_do_9_2_shutdown_ack(net, ep, asoc, type,
                                                          arg, commands);
        }
 
@@ -2645,7 +2689,8 @@ out:
  * The Cumulative TSN Ack of the received SHUTDOWN chunk
  * MUST be processed.
  */
-sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(struct net *net,
+                                          const struct sctp_endpoint *ep,
                                           const struct sctp_association *asoc,
                                           const sctp_subtype_t type,
                                           void *arg,
@@ -2656,12 +2701,12 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep,
        __u32 ctsn;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the SHUTDOWN chunk has a valid length. */
        if (!sctp_chunk_length_valid(chunk,
                                      sizeof(struct sctp_shutdown_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        sdh = (sctp_shutdownhdr_t *)chunk->skb->data;
@@ -2678,7 +2723,7 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep,
         * sender with an ABORT.
         */
        if (!TSN_lt(ctsn, asoc->next_tsn))
-               return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands);
+               return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands);
 
        /* verify, by checking the Cumulative TSN Ack field of the
         * chunk, that all its outstanding DATA chunks have been
@@ -2697,7 +2742,8 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep,
  * that belong to this association, it should discard the INIT chunk and
  * retransmit the SHUTDOWN ACK chunk.
  */
-sctp_disposition_t sctp_sf_do_9_2_reshutack(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_9_2_reshutack(struct net *net,
+                                   const struct sctp_endpoint *ep,
                                    const struct sctp_association *asoc,
                                    const sctp_subtype_t type,
                                    void *arg,
@@ -2708,7 +2754,7 @@ sctp_disposition_t sctp_sf_do_9_2_reshutack(const struct sctp_endpoint *ep,
 
        /* Make sure that the chunk has a valid length */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        /* Since we are not going to really process this INIT, there
@@ -2760,7 +2806,8 @@ nomem:
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_ecn_cwr(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_ecn_cwr(struct net *net,
+                                     const struct sctp_endpoint *ep,
                                      const struct sctp_association *asoc,
                                      const sctp_subtype_t type,
                                      void *arg,
@@ -2771,10 +2818,10 @@ sctp_disposition_t sctp_sf_do_ecn_cwr(const struct sctp_endpoint *ep,
        u32 lowest_tsn;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        cwr = (sctp_cwrhdr_t *) chunk->skb->data;
@@ -2815,7 +2862,8 @@ sctp_disposition_t sctp_sf_do_ecn_cwr(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_ecne(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_ecne(struct net *net,
+                                  const struct sctp_endpoint *ep,
                                   const struct sctp_association *asoc,
                                   const sctp_subtype_t type,
                                   void *arg,
@@ -2825,10 +2873,10 @@ sctp_disposition_t sctp_sf_do_ecne(const struct sctp_endpoint *ep,
        struct sctp_chunk *chunk = arg;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        ecne = (sctp_ecnehdr_t *) chunk->skb->data;
@@ -2871,7 +2919,8 @@ sctp_disposition_t sctp_sf_do_ecne(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_eat_data_6_2(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -2884,11 +2933,11 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
        if (!sctp_vtag_verify(chunk, asoc)) {
                sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
                                SCTP_NULL());
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
 
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_data_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        error = sctp_eat_data(asoc, chunk, commands );
@@ -2897,16 +2946,16 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
                break;
        case SCTP_IERROR_HIGH_TSN:
        case SCTP_IERROR_BAD_STREAM:
-               SCTP_INC_STATS(SCTP_MIB_IN_DATA_CHUNK_DISCARDS);
+               SCTP_INC_STATS(net, SCTP_MIB_IN_DATA_CHUNK_DISCARDS);
                goto discard_noforce;
        case SCTP_IERROR_DUP_TSN:
        case SCTP_IERROR_IGNORE_TSN:
-               SCTP_INC_STATS(SCTP_MIB_IN_DATA_CHUNK_DISCARDS);
+               SCTP_INC_STATS(net, SCTP_MIB_IN_DATA_CHUNK_DISCARDS);
                goto discard_force;
        case SCTP_IERROR_NO_DATA:
                goto consume;
        case SCTP_IERROR_PROTO_VIOLATION:
-               return sctp_sf_abort_violation(ep, asoc, chunk, commands,
+               return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
                        (u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t));
        default:
                BUG();
@@ -2992,7 +3041,8 @@ consume:
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_eat_data_fast_4_4(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_eat_data_fast_4_4(struct net *net,
+                                    const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
                                     void *arg,
@@ -3004,11 +3054,11 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const struct sctp_endpoint *ep,
        if (!sctp_vtag_verify(chunk, asoc)) {
                sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
                                SCTP_NULL());
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
 
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_data_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        error = sctp_eat_data(asoc, chunk, commands );
@@ -3022,7 +3072,7 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const struct sctp_endpoint *ep,
        case SCTP_IERROR_NO_DATA:
                goto consume;
        case SCTP_IERROR_PROTO_VIOLATION:
-               return sctp_sf_abort_violation(ep, asoc, chunk, commands,
+               return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
                        (u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t));
        default:
                BUG();
@@ -3082,7 +3132,8 @@ consume:
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_eat_sack_6_2(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -3093,18 +3144,18 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep,
        __u32 ctsn;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the SACK chunk has a valid length. */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_sack_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        /* Pull the SACK chunk from the data buffer */
        sackh = sctp_sm_pull_sack(chunk);
        /* Was this a bogus SACK? */
        if (!sackh)
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        chunk->subh.sack_hdr = sackh;
        ctsn = ntohl(sackh->cum_tsn_ack);
 
@@ -3125,7 +3176,7 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep,
         * sender with an ABORT.
         */
        if (!TSN_lt(ctsn, asoc->next_tsn))
-               return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands);
+               return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands);
 
        /* Return this SACK for further processing.  */
        sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, SCTP_SACKH(sackh));
@@ -3154,7 +3205,8 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
 */
-static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_tabort_8_4_8(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -3164,7 +3216,7 @@ static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
        struct sctp_chunk *chunk = arg;
        struct sctp_chunk *abort;
 
-       packet = sctp_ootb_pkt_new(asoc, chunk);
+       packet = sctp_ootb_pkt_new(net, asoc, chunk);
 
        if (packet) {
                /* Make an ABORT. The T bit will be set if the asoc
@@ -3188,9 +3240,9 @@ static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
                sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
                                SCTP_PACKET(packet));
 
-               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+               SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
 
-               sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
                return SCTP_DISPOSITION_CONSUME;
        }
 
@@ -3205,7 +3257,8 @@ static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
 */
-sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_operr_notify(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -3215,15 +3268,15 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
        sctp_errhdr_t *err;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the ERROR chunk has a valid length. */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
        sctp_walk_errors(err, chunk->chunk_hdr);
        if ((void *)err != (void *)chunk->chunk_end)
-               return sctp_sf_violation_paramlen(ep, asoc, type, arg,
+               return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
                                                  (void *)err, commands);
 
        sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR,
@@ -3242,7 +3295,8 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition.
  */
-sctp_disposition_t sctp_sf_do_9_2_final(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_9_2_final(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -3253,11 +3307,11 @@ sctp_disposition_t sctp_sf_do_9_2_final(const struct sctp_endpoint *ep,
        struct sctp_ulpevent *ev;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
        /* 10.2 H) SHUTDOWN COMPLETE notification
         *
@@ -3290,8 +3344,8 @@ sctp_disposition_t sctp_sf_do_9_2_final(const struct sctp_endpoint *ep,
 
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
                        SCTP_STATE(SCTP_STATE_CLOSED));
-       SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS);
-       SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+       SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS);
+       SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
        sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
 
        /* ...and remove all record of the association. */
@@ -3324,7 +3378,8 @@ nomem:
  *    receiver of the OOTB packet shall discard the OOTB packet and take
  *    no further action.
  */
-sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_ootb(struct net *net,
+                               const struct sctp_endpoint *ep,
                                const struct sctp_association *asoc,
                                const sctp_subtype_t type,
                                void *arg,
@@ -3338,13 +3393,13 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
        int ootb_shut_ack = 0;
        int ootb_cookie_ack = 0;
 
-       SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES);
+       SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
 
        ch = (sctp_chunkhdr_t *) chunk->chunk_hdr;
        do {
                /* Report violation if the chunk is less then minimal */
                if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
-                       return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+                       return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
                /* Now that we know we at least have a chunk header,
@@ -3359,7 +3414,7 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
                 *   sending an ABORT of its own.
                 */
                if (SCTP_CID_ABORT == ch->type)
-                       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
                /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR
                 * or a COOKIE ACK the SCTP Packet should be silently
@@ -3381,18 +3436,18 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
                /* Report violation if chunk len overflows */
                ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
                if (ch_end > skb_tail_pointer(skb))
-                       return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+                       return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
                ch = (sctp_chunkhdr_t *) ch_end;
        } while (ch_end < skb_tail_pointer(skb));
 
        if (ootb_shut_ack)
-               return sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands);
+               return sctp_sf_shut_8_4_5(net, ep, asoc, type, arg, commands);
        else if (ootb_cookie_ack)
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        else
-               return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+               return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -3416,7 +3471,8 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_shut_8_4_5(struct net *net,
+                                            const struct sctp_endpoint *ep,
                                             const struct sctp_association *asoc,
                                             const sctp_subtype_t type,
                                             void *arg,
@@ -3426,7 +3482,7 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
        struct sctp_chunk *chunk = arg;
        struct sctp_chunk *shut;
 
-       packet = sctp_ootb_pkt_new(asoc, chunk);
+       packet = sctp_ootb_pkt_new(net, asoc, chunk);
 
        if (packet) {
                /* Make an SHUTDOWN_COMPLETE.
@@ -3450,19 +3506,19 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
                sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
                                SCTP_PACKET(packet));
 
-               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+               SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
 
                /* If the chunk length is invalid, we don't want to process
                 * the reset of the packet.
                 */
                if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-                       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
                /* We need to discard the rest of the packet to prevent
                 * potential bomming attacks from additional bundled chunks.
                 * This is documented in SCTP Threats ID.
                 */
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
 
        return SCTP_DISPOSITION_NOMEM;
@@ -3479,7 +3535,8 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
  *   chunks. --piggy ]
  *
  */
-sctp_disposition_t sctp_sf_do_8_5_1_E_sa(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_8_5_1_E_sa(struct net *net,
+                                     const struct sctp_endpoint *ep,
                                      const struct sctp_association *asoc,
                                      const sctp_subtype_t type,
                                      void *arg,
@@ -3489,7 +3546,7 @@ sctp_disposition_t sctp_sf_do_8_5_1_E_sa(const struct sctp_endpoint *ep,
 
        /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        /* Although we do have an association in this case, it corresponds
@@ -3497,13 +3554,14 @@ sctp_disposition_t sctp_sf_do_8_5_1_E_sa(const struct sctp_endpoint *ep,
         * packet and the state function that handles OOTB SHUTDOWN_ACK is
         * called with a NULL association.
         */
-       SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES);
+       SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
 
-       return sctp_sf_shut_8_4_5(ep, NULL, type, arg, commands);
+       return sctp_sf_shut_8_4_5(net, ep, NULL, type, arg, commands);
 }
 
 /* ADDIP Section 4.2 Upon reception of an ASCONF Chunk.  */
-sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_asconf(struct net *net,
+                                    const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type, void *arg,
                                     sctp_cmd_seq_t *commands)
@@ -3519,7 +3577,7 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
        if (!sctp_vtag_verify(chunk, asoc)) {
                sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
                                SCTP_NULL());
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
 
        /* ADD-IP: Section 4.1.1
@@ -3528,12 +3586,12 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
         * is received unauthenticated it MUST be silently discarded as
         * described in [I-D.ietf-tsvwg-sctp-auth].
         */
-       if (!sctp_addip_noauth && !chunk->auth)
-               return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
+       if (!net->sctp.addip_noauth && !chunk->auth)
+               return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the ASCONF ADDIP chunk has a valid length.  */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_addip_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        hdr = (sctp_addiphdr_t *)chunk->skb->data;
@@ -3542,7 +3600,7 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
        addr_param = (union sctp_addr_param *)hdr->params;
        length = ntohs(addr_param->p.length);
        if (length < sizeof(sctp_paramhdr_t))
-               return sctp_sf_violation_paramlen(ep, asoc, type, arg,
+               return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
                           (void *)addr_param, commands);
 
        /* Verify the ASCONF chunk before processing it. */
@@ -3550,7 +3608,7 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
                            (sctp_paramhdr_t *)((void *)addr_param + length),
                            (void *)chunk->chunk_end,
                            &err_param))
-               return sctp_sf_violation_paramlen(ep, asoc, type, arg,
+               return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
                                                  (void *)err_param, commands);
 
        /* ADDIP 5.2 E1) Compare the value of the serial number to the value
@@ -3630,7 +3688,8 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
  * When building TLV parameters for the ASCONF Chunk that will add or
  * delete IP addresses the D0 to D13 rules should be applied:
  */
-sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net,
+                                        const struct sctp_endpoint *ep,
                                         const struct sctp_association *asoc,
                                         const sctp_subtype_t type, void *arg,
                                         sctp_cmd_seq_t *commands)
@@ -3645,7 +3704,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
        if (!sctp_vtag_verify(asconf_ack, asoc)) {
                sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
                                SCTP_NULL());
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
 
        /* ADD-IP, Section 4.1.2:
@@ -3654,12 +3713,12 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
         * is received unauthenticated it MUST be silently discarded as
         * described in [I-D.ietf-tsvwg-sctp-auth].
         */
-       if (!sctp_addip_noauth && !asconf_ack->auth)
-               return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
+       if (!net->sctp.addip_noauth && !asconf_ack->auth)
+               return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the ADDIP chunk has a valid length.  */
        if (!sctp_chunk_length_valid(asconf_ack, sizeof(sctp_addip_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        addip_hdr = (sctp_addiphdr_t *)asconf_ack->skb->data;
@@ -3670,7 +3729,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
            (sctp_paramhdr_t *)addip_hdr->params,
            (void *)asconf_ack->chunk_end,
            &err_param))
-               return sctp_sf_violation_paramlen(ep, asoc, type, arg,
+               return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
                           (void *)err_param, commands);
 
        if (last_asconf) {
@@ -3705,8 +3764,8 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
                                SCTP_ERROR(ECONNABORTED));
                sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
-               SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-               SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+               SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+               SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
                return SCTP_DISPOSITION_ABORT;
        }
 
@@ -3739,8 +3798,8 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
                                SCTP_ERROR(ECONNABORTED));
                sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
-               SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-               SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+               SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+               SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
                return SCTP_DISPOSITION_ABORT;
        }
 
@@ -3761,7 +3820,8 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_eat_fwd_tsn(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net,
+                                      const struct sctp_endpoint *ep,
                                       const struct sctp_association *asoc,
                                       const sctp_subtype_t type,
                                       void *arg,
@@ -3776,12 +3836,12 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(const struct sctp_endpoint *ep,
        if (!sctp_vtag_verify(chunk, asoc)) {
                sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
                                SCTP_NULL());
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
 
        /* Make sure that the FORWARD_TSN chunk has valid length.  */
        if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
@@ -3828,6 +3888,7 @@ discard_noforce:
 }
 
 sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -3843,12 +3904,12 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
        if (!sctp_vtag_verify(chunk, asoc)) {
                sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
                                SCTP_NULL());
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
 
        /* Make sure that the FORWARD_TSN chunk has a valid length.  */
        if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
@@ -3915,7 +3976,8 @@ gen_shutdown:
  *
  * The return value is the disposition of the chunk.
  */
-static sctp_ierror_t sctp_sf_authenticate(const struct sctp_endpoint *ep,
+static sctp_ierror_t sctp_sf_authenticate(struct net *net,
+                                   const struct sctp_endpoint *ep,
                                    const struct sctp_association *asoc,
                                    const sctp_subtype_t type,
                                    struct sctp_chunk *chunk)
@@ -3988,7 +4050,8 @@ nomem:
        return SCTP_IERROR_NOMEM;
 }
 
-sctp_disposition_t sctp_sf_eat_auth(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_eat_auth(struct net *net,
+                                   const struct sctp_endpoint *ep,
                                    const struct sctp_association *asoc,
                                    const sctp_subtype_t type,
                                    void *arg,
@@ -4001,21 +4064,21 @@ sctp_disposition_t sctp_sf_eat_auth(const struct sctp_endpoint *ep,
 
        /* Make sure that the peer has AUTH capable */
        if (!asoc->peer.auth_capable)
-               return sctp_sf_unk_chunk(ep, asoc, type, arg, commands);
+               return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands);
 
        if (!sctp_vtag_verify(chunk, asoc)) {
                sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
                                SCTP_NULL());
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
 
        /* Make sure that the AUTH chunk has valid length.  */
        if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_auth_chunk)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
-       error = sctp_sf_authenticate(ep, asoc, type, chunk);
+       error = sctp_sf_authenticate(net, ep, asoc, type, chunk);
        switch (error) {
        case SCTP_IERROR_AUTH_BAD_HMAC:
                /* Generate the ERROR chunk and discard the rest
@@ -4032,10 +4095,10 @@ sctp_disposition_t sctp_sf_eat_auth(const struct sctp_endpoint *ep,
                /* Fall Through */
        case SCTP_IERROR_AUTH_BAD_KEYID:
        case SCTP_IERROR_BAD_SIG:
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        case SCTP_IERROR_PROTO_VIOLATION:
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        case SCTP_IERROR_NOMEM:
@@ -4084,7 +4147,8 @@ sctp_disposition_t sctp_sf_eat_auth(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_unk_chunk(struct net *net,
+                                    const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
                                     void *arg,
@@ -4097,20 +4161,20 @@ sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep,
        SCTP_DEBUG_PRINTK("Processing the unknown chunk id %d.\n", type.chunk);
 
        if (!sctp_vtag_verify(unk_chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the chunk has a valid length.
         * Since we don't know the chunk type, we use a general
         * chunkhdr structure to make a comparison.
         */
        if (!sctp_chunk_length_valid(unk_chunk, sizeof(sctp_chunkhdr_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        switch (type.chunk & SCTP_CID_ACTION_MASK) {
        case SCTP_CID_ACTION_DISCARD:
                /* Discard the packet.  */
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
                break;
        case SCTP_CID_ACTION_DISCARD_ERR:
                /* Generate an ERROR chunk as response. */
@@ -4125,7 +4189,7 @@ sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep,
                }
 
                /* Discard the packet.  */
-               sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
                return SCTP_DISPOSITION_CONSUME;
                break;
        case SCTP_CID_ACTION_SKIP:
@@ -4167,7 +4231,8 @@ sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_discard_chunk(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_discard_chunk(struct net *net,
+                                        const struct sctp_endpoint *ep,
                                         const struct sctp_association *asoc,
                                         const sctp_subtype_t type,
                                         void *arg,
@@ -4180,7 +4245,7 @@ sctp_disposition_t sctp_sf_discard_chunk(const struct sctp_endpoint *ep,
         * chunkhdr structure to make a comparison.
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        SCTP_DEBUG_PRINTK("Chunk %d is discarded\n", type.chunk);
@@ -4205,13 +4270,14 @@ sctp_disposition_t sctp_sf_discard_chunk(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_pdiscard(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_pdiscard(struct net *net,
+                                   const struct sctp_endpoint *ep,
                                    const struct sctp_association *asoc,
                                    const sctp_subtype_t type,
                                    void *arg,
                                    sctp_cmd_seq_t *commands)
 {
-       SCTP_INC_STATS(SCTP_MIB_IN_PKT_DISCARDS);
+       SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_DISCARDS);
        sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
 
        return SCTP_DISPOSITION_CONSUME;
@@ -4232,7 +4298,8 @@ sctp_disposition_t sctp_sf_pdiscard(const struct sctp_endpoint *ep,
  * We simply tag the chunk as a violation.  The state machine will log
  * the violation and continue.
  */
-sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_violation(struct net *net,
+                                    const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
                                     void *arg,
@@ -4242,7 +4309,7 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep,
 
        /* Make sure that the chunk has a valid length. */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        return SCTP_DISPOSITION_VIOLATION;
@@ -4252,6 +4319,7 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep,
  * Common function to handle a protocol violation.
  */
 static sctp_disposition_t sctp_sf_abort_violation(
+                                    struct net *net,
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     void *arg,
@@ -4302,7 +4370,7 @@ static sctp_disposition_t sctp_sf_abort_violation(
                }
 
                sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
-               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+               SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
 
                if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) {
                        sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
@@ -4316,10 +4384,10 @@ static sctp_disposition_t sctp_sf_abort_violation(
                                        SCTP_ERROR(ECONNABORTED));
                        sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                        SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
-                       SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+                       SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
                }
        } else {
-               packet = sctp_ootb_pkt_new(asoc, chunk);
+               packet = sctp_ootb_pkt_new(net, asoc, chunk);
 
                if (!packet)
                        goto nomem_pkt;
@@ -4334,13 +4402,13 @@ static sctp_disposition_t sctp_sf_abort_violation(
                sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
                        SCTP_PACKET(packet));
 
-               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+               SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
        }
 
-       SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+       SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
 
 discard:
-       sctp_sf_pdiscard(ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
+       sctp_sf_pdiscard(net, ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
        return SCTP_DISPOSITION_ABORT;
 
 nomem_pkt:
@@ -4369,6 +4437,7 @@ nomem:
  * Generate an  ABORT chunk and terminate the association.
  */
 static sctp_disposition_t sctp_sf_violation_chunklen(
+                                    struct net *net,
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
@@ -4377,7 +4446,7 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
 {
        static const char err_str[]="The following chunk had invalid length:";
 
-       return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
+       return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
                                        sizeof(err_str));
 }
 
@@ -4388,6 +4457,7 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
  * the length is considered as invalid.
  */
 static sctp_disposition_t sctp_sf_violation_paramlen(
+                                    struct net *net,
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
@@ -4407,17 +4477,17 @@ static sctp_disposition_t sctp_sf_violation_paramlen(
                goto nomem;
 
        sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
-       SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+       SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
 
        sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
                        SCTP_ERROR(ECONNABORTED));
        sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                        SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
-       SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
-       SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+       SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
+       SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
 
 discard:
-       sctp_sf_pdiscard(ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
+       sctp_sf_pdiscard(net, ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
        return SCTP_DISPOSITION_ABORT;
 nomem:
        return SCTP_DISPOSITION_NOMEM;
@@ -4430,6 +4500,7 @@ nomem:
  * error code.
  */
 static sctp_disposition_t sctp_sf_violation_ctsn(
+                                    struct net *net,
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
@@ -4438,7 +4509,7 @@ static sctp_disposition_t sctp_sf_violation_ctsn(
 {
        static const char err_str[]="The cumulative tsn ack beyond the max tsn currently sent:";
 
-       return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
+       return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
                                        sizeof(err_str));
 }
 
@@ -4449,6 +4520,7 @@ static sctp_disposition_t sctp_sf_violation_ctsn(
  * on the path and we may not want to continue this communication.
  */
 static sctp_disposition_t sctp_sf_violation_chunk(
+                                    struct net *net,
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
@@ -4458,9 +4530,9 @@ static sctp_disposition_t sctp_sf_violation_chunk(
        static const char err_str[]="The following chunk violates protocol:";
 
        if (!asoc)
-               return sctp_sf_violation(ep, asoc, type, arg, commands);
+               return sctp_sf_violation(net, ep, asoc, type, arg, commands);
 
-       return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
+       return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
                                        sizeof(err_str));
 }
 /***************************************************************************
@@ -4523,7 +4595,8 @@ static sctp_disposition_t sctp_sf_violation_chunk(
  *
  * The return value is a disposition.
  */
-sctp_disposition_t sctp_sf_do_prm_asoc(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_prm_asoc(struct net *net,
+                                      const struct sctp_endpoint *ep,
                                       const struct sctp_association *asoc,
                                       const sctp_subtype_t type,
                                       void *arg,
@@ -4634,7 +4707,8 @@ nomem:
  *
  * The return value is the disposition.
  */
-sctp_disposition_t sctp_sf_do_prm_send(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_prm_send(struct net *net,
+                                      const struct sctp_endpoint *ep,
                                       const struct sctp_association *asoc,
                                       const sctp_subtype_t type,
                                       void *arg,
@@ -4673,6 +4747,7 @@ sctp_disposition_t sctp_sf_do_prm_send(const struct sctp_endpoint *ep,
  * The return value is the disposition.
  */
 sctp_disposition_t sctp_sf_do_9_2_prm_shutdown(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -4694,7 +4769,7 @@ sctp_disposition_t sctp_sf_do_9_2_prm_shutdown(
 
        disposition = SCTP_DISPOSITION_CONSUME;
        if (sctp_outq_is_empty(&asoc->outqueue)) {
-               disposition = sctp_sf_do_9_2_start_shutdown(ep, asoc, type,
+               disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type,
                                                            arg, commands);
        }
        return disposition;
@@ -4728,6 +4803,7 @@ sctp_disposition_t sctp_sf_do_9_2_prm_shutdown(
  * The return value is the disposition.
  */
 sctp_disposition_t sctp_sf_do_9_1_prm_abort(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -4759,14 +4835,15 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort(
        sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                        SCTP_PERR(SCTP_ERROR_USER_ABORT));
 
-       SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-       SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+       SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+       SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
 
        return retval;
 }
 
 /* We tried an illegal operation on an association which is closed.  */
-sctp_disposition_t sctp_sf_error_closed(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_error_closed(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -4779,7 +4856,8 @@ sctp_disposition_t sctp_sf_error_closed(const struct sctp_endpoint *ep,
 /* We tried an illegal operation on an association which is shutting
  * down.
  */
-sctp_disposition_t sctp_sf_error_shutdown(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_error_shutdown(struct net *net,
+                                         const struct sctp_endpoint *ep,
                                          const struct sctp_association *asoc,
                                          const sctp_subtype_t type,
                                          void *arg,
@@ -4805,6 +4883,7 @@ sctp_disposition_t sctp_sf_error_shutdown(const struct sctp_endpoint *ep,
  * (timers)
  */
 sctp_disposition_t sctp_sf_cookie_wait_prm_shutdown(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -4817,7 +4896,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_shutdown(
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
                        SCTP_STATE(SCTP_STATE_CLOSED));
 
-       SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS);
+       SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS);
 
        sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
 
@@ -4839,6 +4918,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_shutdown(
  * (timers)
  */
 sctp_disposition_t sctp_sf_cookie_echoed_prm_shutdown(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -4847,7 +4927,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_prm_shutdown(
        /* There is a single T1 timer, so we should be able to use
         * common function with the COOKIE-WAIT state.
         */
-       return sctp_sf_cookie_wait_prm_shutdown(ep, asoc, type, arg, commands);
+       return sctp_sf_cookie_wait_prm_shutdown(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -4865,6 +4945,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_prm_shutdown(
  * (timers)
  */
 sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -4884,7 +4965,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
                        SCTP_STATE(SCTP_STATE_CLOSED));
 
-       SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+       SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
 
        /* Even if we can't send the ABORT due to low memory delete the
         * TCB.  This is a departure from our typical NOMEM handling.
@@ -4914,6 +4995,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
  * (timers)
  */
 sctp_disposition_t sctp_sf_cookie_echoed_prm_abort(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -4923,7 +5005,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_prm_abort(
        /* There is a single T1 timer, so we should be able to use
         * common function with the COOKIE-WAIT state.
         */
-       return sctp_sf_cookie_wait_prm_abort(ep, asoc, type, arg, commands);
+       return sctp_sf_cookie_wait_prm_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -4939,6 +5021,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_prm_abort(
  * (timers)
  */
 sctp_disposition_t sctp_sf_shutdown_pending_prm_abort(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -4949,7 +5032,7 @@ sctp_disposition_t sctp_sf_shutdown_pending_prm_abort(
        sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
                        SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
 
-       return sctp_sf_do_9_1_prm_abort(ep, asoc, type, arg, commands);
+       return sctp_sf_do_9_1_prm_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -4965,6 +5048,7 @@ sctp_disposition_t sctp_sf_shutdown_pending_prm_abort(
  * (timers)
  */
 sctp_disposition_t sctp_sf_shutdown_sent_prm_abort(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -4979,7 +5063,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_prm_abort(
        sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
                        SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
 
-       return sctp_sf_do_9_1_prm_abort(ep, asoc, type, arg, commands);
+       return sctp_sf_do_9_1_prm_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -4995,6 +5079,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_prm_abort(
  * (timers)
  */
 sctp_disposition_t sctp_sf_shutdown_ack_sent_prm_abort(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -5004,7 +5089,7 @@ sctp_disposition_t sctp_sf_shutdown_ack_sent_prm_abort(
        /* The same T2 timer, so we should be able to use
         * common function with the SHUTDOWN-SENT state.
         */
-       return sctp_sf_shutdown_sent_prm_abort(ep, asoc, type, arg, commands);
+       return sctp_sf_shutdown_sent_prm_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -5030,6 +5115,7 @@ sctp_disposition_t sctp_sf_shutdown_ack_sent_prm_abort(
  *   association on which a heartbeat should be issued.
  */
 sctp_disposition_t sctp_sf_do_prm_requestheartbeat(
+                                       struct net *net,
                                        const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
@@ -5061,7 +5147,8 @@ sctp_disposition_t sctp_sf_do_prm_requestheartbeat(
  * When an endpoint has an ASCONF signaled change to be sent to the
  * remote endpoint it should do A1 to A9
  */
-sctp_disposition_t sctp_sf_do_prm_asconf(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_prm_asconf(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -5082,6 +5169,7 @@ sctp_disposition_t sctp_sf_do_prm_asconf(const struct sctp_endpoint *ep,
  * The return value is the disposition of the primitive.
  */
 sctp_disposition_t sctp_sf_ignore_primitive(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -5103,6 +5191,7 @@ sctp_disposition_t sctp_sf_ignore_primitive(
  * retransmit, the stack will immediately send up this notification.
  */
 sctp_disposition_t sctp_sf_do_no_pending_tsn(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -5134,6 +5223,7 @@ sctp_disposition_t sctp_sf_do_no_pending_tsn(
  * The return value is the disposition.
  */
 sctp_disposition_t sctp_sf_do_9_2_start_shutdown(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -5203,6 +5293,7 @@ nomem:
  * The return value is the disposition.
  */
 sctp_disposition_t sctp_sf_do_9_2_shutdown_ack(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -5221,11 +5312,11 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown_ack(
         */
        if (chunk) {
                if (!sctp_vtag_verify(chunk, asoc))
-                       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
                /* Make sure that the SHUTDOWN chunk has a valid length. */
                if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_shutdown_chunk_t)))
-                       return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+                       return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                          commands);
        }
 
@@ -5273,7 +5364,8 @@ nomem:
  *
  * The return value is the disposition of the event.
  */
-sctp_disposition_t sctp_sf_ignore_other(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_ignore_other(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -5298,7 +5390,8 @@ sctp_disposition_t sctp_sf_ignore_other(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_6_3_3_rtx(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -5306,7 +5399,7 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
 {
        struct sctp_transport *transport = arg;
 
-       SCTP_INC_STATS(SCTP_MIB_T3_RTX_EXPIREDS);
+       SCTP_INC_STATS(net, SCTP_MIB_T3_RTX_EXPIREDS);
 
        if (asoc->overall_error_count >= asoc->max_retrans) {
                if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) {
@@ -5327,8 +5420,8 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
                        /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
                        sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                        SCTP_PERR(SCTP_ERROR_NO_ERROR));
-                       SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-                       SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+                       SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+                       SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
                        return SCTP_DISPOSITION_DELETE_TCB;
                }
        }
@@ -5384,13 +5477,14 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
  * allow. However, an SCTP transmitter MUST NOT be more aggressive than
  * the following algorithms allow.
  */
-sctp_disposition_t sctp_sf_do_6_2_sack(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_6_2_sack(struct net *net,
+                                      const struct sctp_endpoint *ep,
                                       const struct sctp_association *asoc,
                                       const sctp_subtype_t type,
                                       void *arg,
                                       sctp_cmd_seq_t *commands)
 {
-       SCTP_INC_STATS(SCTP_MIB_DELAY_SACK_EXPIREDS);
+       SCTP_INC_STATS(net, SCTP_MIB_DELAY_SACK_EXPIREDS);
        sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE());
        return SCTP_DISPOSITION_CONSUME;
 }
@@ -5414,7 +5508,8 @@ sctp_disposition_t sctp_sf_do_6_2_sack(const struct sctp_endpoint *ep,
  * (timers, events)
  *
  */
-sctp_disposition_t sctp_sf_t1_init_timer_expire(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_t1_init_timer_expire(struct net *net,
+                                          const struct sctp_endpoint *ep,
                                           const struct sctp_association *asoc,
                                           const sctp_subtype_t type,
                                           void *arg,
@@ -5425,7 +5520,7 @@ sctp_disposition_t sctp_sf_t1_init_timer_expire(const struct sctp_endpoint *ep,
        int attempts = asoc->init_err_counter + 1;
 
        SCTP_DEBUG_PRINTK("Timer T1 expired (INIT).\n");
-       SCTP_INC_STATS(SCTP_MIB_T1_INIT_EXPIREDS);
+       SCTP_INC_STATS(net, SCTP_MIB_T1_INIT_EXPIREDS);
 
        if (attempts <= asoc->max_init_attempts) {
                bp = (struct sctp_bind_addr *) &asoc->base.bind_addr;
@@ -5475,7 +5570,8 @@ sctp_disposition_t sctp_sf_t1_init_timer_expire(const struct sctp_endpoint *ep,
  * (timers, events)
  *
  */
-sctp_disposition_t sctp_sf_t1_cookie_timer_expire(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_t1_cookie_timer_expire(struct net *net,
+                                          const struct sctp_endpoint *ep,
                                           const struct sctp_association *asoc,
                                           const sctp_subtype_t type,
                                           void *arg,
@@ -5485,7 +5581,7 @@ sctp_disposition_t sctp_sf_t1_cookie_timer_expire(const struct sctp_endpoint *ep
        int attempts = asoc->init_err_counter + 1;
 
        SCTP_DEBUG_PRINTK("Timer T1 expired (COOKIE-ECHO).\n");
-       SCTP_INC_STATS(SCTP_MIB_T1_COOKIE_EXPIREDS);
+       SCTP_INC_STATS(net, SCTP_MIB_T1_COOKIE_EXPIREDS);
 
        if (attempts <= asoc->max_init_attempts) {
                repl = sctp_make_cookie_echo(asoc, NULL);
@@ -5523,7 +5619,8 @@ sctp_disposition_t sctp_sf_t1_cookie_timer_expire(const struct sctp_endpoint *ep
  * the T2-Shutdown timer,  giving its peer ample opportunity to transmit
  * all of its queued DATA chunks that have not yet been sent.
  */
-sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_t2_timer_expire(struct net *net,
+                                          const struct sctp_endpoint *ep,
                                           const struct sctp_association *asoc,
                                           const sctp_subtype_t type,
                                           void *arg,
@@ -5532,7 +5629,7 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep,
        struct sctp_chunk *reply = NULL;
 
        SCTP_DEBUG_PRINTK("Timer T2 expired.\n");
-       SCTP_INC_STATS(SCTP_MIB_T2_SHUTDOWN_EXPIREDS);
+       SCTP_INC_STATS(net, SCTP_MIB_T2_SHUTDOWN_EXPIREDS);
 
        ((struct sctp_association *)asoc)->shutdown_retries++;
 
@@ -5542,8 +5639,8 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep,
                /* Note:  CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
                sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                SCTP_PERR(SCTP_ERROR_NO_ERROR));
-               SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-               SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+               SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+               SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
                return SCTP_DISPOSITION_DELETE_TCB;
        }
 
@@ -5592,6 +5689,7 @@ nomem:
  * If the T4 RTO timer expires the endpoint should do B1 to B5
  */
 sctp_disposition_t sctp_sf_t4_timer_expire(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -5601,7 +5699,7 @@ sctp_disposition_t sctp_sf_t4_timer_expire(
        struct sctp_chunk *chunk = asoc->addip_last_asconf;
        struct sctp_transport *transport = chunk->transport;
 
-       SCTP_INC_STATS(SCTP_MIB_T4_RTO_EXPIREDS);
+       SCTP_INC_STATS(net, SCTP_MIB_T4_RTO_EXPIREDS);
 
        /* ADDIP 4.1 B1) Increment the error counters and perform path failure
         * detection on the appropriate destination address as defined in
@@ -5626,8 +5724,8 @@ sctp_disposition_t sctp_sf_t4_timer_expire(
                                SCTP_ERROR(ETIMEDOUT));
                sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                SCTP_PERR(SCTP_ERROR_NO_ERROR));
-               SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-               SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+               SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+               SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
                return SCTP_DISPOSITION_ABORT;
        }
 
@@ -5662,7 +5760,8 @@ sctp_disposition_t sctp_sf_t4_timer_expire(
  * At the expiration of this timer the sender SHOULD abort the association
  * by sending an ABORT chunk.
  */
-sctp_disposition_t sctp_sf_t5_timer_expire(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_t5_timer_expire(struct net *net,
+                                          const struct sctp_endpoint *ep,
                                           const struct sctp_association *asoc,
                                           const sctp_subtype_t type,
                                           void *arg,
@@ -5671,7 +5770,7 @@ sctp_disposition_t sctp_sf_t5_timer_expire(const struct sctp_endpoint *ep,
        struct sctp_chunk *reply = NULL;
 
        SCTP_DEBUG_PRINTK("Timer T5 expired.\n");
-       SCTP_INC_STATS(SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS);
+       SCTP_INC_STATS(net, SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS);
 
        reply = sctp_make_abort(asoc, NULL, 0);
        if (!reply)
@@ -5683,8 +5782,8 @@ sctp_disposition_t sctp_sf_t5_timer_expire(const struct sctp_endpoint *ep,
        sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                        SCTP_PERR(SCTP_ERROR_NO_ERROR));
 
-       SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-       SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+       SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+       SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
 
        return SCTP_DISPOSITION_DELETE_TCB;
 nomem:
@@ -5697,6 +5796,7 @@ nomem:
  * the user.  So this routine looks same as sctp_sf_do_9_2_prm_shutdown().
  */
 sctp_disposition_t sctp_sf_autoclose_timer_expire(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -5705,7 +5805,7 @@ sctp_disposition_t sctp_sf_autoclose_timer_expire(
 {
        int disposition;
 
-       SCTP_INC_STATS(SCTP_MIB_AUTOCLOSE_EXPIREDS);
+       SCTP_INC_STATS(net, SCTP_MIB_AUTOCLOSE_EXPIREDS);
 
        /* From 9.2 Shutdown of an Association
         * Upon receipt of the SHUTDOWN primitive from its upper
@@ -5720,7 +5820,7 @@ sctp_disposition_t sctp_sf_autoclose_timer_expire(
 
        disposition = SCTP_DISPOSITION_CONSUME;
        if (sctp_outq_is_empty(&asoc->outqueue)) {
-               disposition = sctp_sf_do_9_2_start_shutdown(ep, asoc, type,
+               disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type,
                                                            arg, commands);
        }
        return disposition;
@@ -5738,7 +5838,8 @@ sctp_disposition_t sctp_sf_autoclose_timer_expire(
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_not_impl(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_not_impl(struct net *net,
+                                   const struct sctp_endpoint *ep,
                                    const struct sctp_association *asoc,
                                    const sctp_subtype_t type,
                                    void *arg,
@@ -5755,7 +5856,8 @@ sctp_disposition_t sctp_sf_not_impl(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_bug(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_bug(struct net *net,
+                              const struct sctp_endpoint *ep,
                               const struct sctp_association *asoc,
                               const sctp_subtype_t type,
                               void *arg,
@@ -5775,7 +5877,8 @@ sctp_disposition_t sctp_sf_bug(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_timer_ignore(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_timer_ignore(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -5817,7 +5920,8 @@ static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk)
 /* Create an ABORT packet to be sent as a response, with the specified
  * error causes.
  */
-static struct sctp_packet *sctp_abort_pkt_new(const struct sctp_endpoint *ep,
+static struct sctp_packet *sctp_abort_pkt_new(struct net *net,
+                                 const struct sctp_endpoint *ep,
                                  const struct sctp_association *asoc,
                                  struct sctp_chunk *chunk,
                                  const void *payload,
@@ -5826,7 +5930,7 @@ static struct sctp_packet *sctp_abort_pkt_new(const struct sctp_endpoint *ep,
        struct sctp_packet *packet;
        struct sctp_chunk *abort;
 
-       packet = sctp_ootb_pkt_new(asoc, chunk);
+       packet = sctp_ootb_pkt_new(net, asoc, chunk);
 
        if (packet) {
                /* Make an ABORT.
@@ -5858,7 +5962,8 @@ static struct sctp_packet *sctp_abort_pkt_new(const struct sctp_endpoint *ep,
 }
 
 /* Allocate a packet for responding in the OOTB conditions.  */
-static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc,
+static struct sctp_packet *sctp_ootb_pkt_new(struct net *net,
+                                            const struct sctp_association *asoc,
                                             const struct sctp_chunk *chunk)
 {
        struct sctp_packet *packet;
@@ -5911,7 +6016,7 @@ static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc
        }
 
        /* Make a transport for the bucket, Eliza... */
-       transport = sctp_transport_new(sctp_source(chunk), GFP_ATOMIC);
+       transport = sctp_transport_new(net, sctp_source(chunk), GFP_ATOMIC);
        if (!transport)
                goto nomem;
 
@@ -5919,7 +6024,7 @@ static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc
         * the source address.
         */
        sctp_transport_route(transport, (union sctp_addr *)&chunk->dest,
-                            sctp_sk(sctp_get_ctl_sock()));
+                            sctp_sk(net->sctp.ctl_sock));
 
        packet = sctp_packet_init(&transport->packet, transport, sport, dport);
        packet = sctp_packet_config(packet, vtag, 0);
@@ -5937,7 +6042,8 @@ void sctp_ootb_pkt_free(struct sctp_packet *packet)
 }
 
 /* Send a stale cookie error when a invalid COOKIE ECHO chunk is found  */
-static void sctp_send_stale_cookie_err(const struct sctp_endpoint *ep,
+static void sctp_send_stale_cookie_err(struct net *net,
+                                      const struct sctp_endpoint *ep,
                                       const struct sctp_association *asoc,
                                       const struct sctp_chunk *chunk,
                                       sctp_cmd_seq_t *commands,
@@ -5946,7 +6052,7 @@ static void sctp_send_stale_cookie_err(const struct sctp_endpoint *ep,
        struct sctp_packet *packet;
 
        if (err_chunk) {
-               packet = sctp_ootb_pkt_new(asoc, chunk);
+               packet = sctp_ootb_pkt_new(net, asoc, chunk);
                if (packet) {
                        struct sctp_signed_cookie *cookie;
 
@@ -5959,7 +6065,7 @@ static void sctp_send_stale_cookie_err(const struct sctp_endpoint *ep,
                        sctp_packet_append_chunk(packet, err_chunk);
                        sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
                                        SCTP_PACKET(packet));
-                       SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+                       SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
                } else
                        sctp_chunk_free (err_chunk);
        }
@@ -5979,6 +6085,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
        __u32 tsn;
        struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
        struct sock *sk = asoc->base.sk;
+       struct net *net = sock_net(sk);
        u16 ssn;
        u16 sid;
        u8 ordered = 0;
@@ -6109,8 +6216,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
                                SCTP_ERROR(ECONNABORTED));
                sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                SCTP_PERR(SCTP_ERROR_NO_DATA));
-               SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-               SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+               SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+               SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
                return SCTP_IERROR_NO_DATA;
        }
 
@@ -6120,9 +6227,9 @@ static int sctp_eat_data(const struct sctp_association *asoc,
         * if we renege and the chunk arrives again.
         */
        if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
-               SCTP_INC_STATS(SCTP_MIB_INUNORDERCHUNKS);
+               SCTP_INC_STATS(net, SCTP_MIB_INUNORDERCHUNKS);
        else {
-               SCTP_INC_STATS(SCTP_MIB_INORDERCHUNKS);
+               SCTP_INC_STATS(net, SCTP_MIB_INORDERCHUNKS);
                ordered = 1;
        }
 
index 7c211a7f90f4d065eec82baa0cb751373e7eb0be..84d98d8a5a7417bd92ea919c56e0f8033073a6c4 100644 (file)
@@ -59,7 +59,8 @@ other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_STATE_NUM_STATES];
 static const sctp_sm_table_entry_t
 timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STATE_NUM_STATES];
 
-static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid,
+static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(struct net *net,
+                                                           sctp_cid_t cid,
                                                            sctp_state_t state);
 
 
@@ -82,13 +83,14 @@ static const sctp_sm_table_entry_t bug = {
        rtn;                                                            \
 })
 
-const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
+const sctp_sm_table_entry_t *sctp_sm_lookup_event(struct net *net,
+                                                 sctp_event_t event_type,
                                                  sctp_state_t state,
                                                  sctp_subtype_t event_subtype)
 {
        switch (event_type) {
        case SCTP_EVENT_T_CHUNK:
-               return sctp_chunk_event_lookup(event_subtype.chunk, state);
+               return sctp_chunk_event_lookup(net, event_subtype.chunk, state);
        case SCTP_EVENT_T_TIMEOUT:
                return DO_LOOKUP(SCTP_EVENT_TIMEOUT_MAX, timeout,
                                 timeout_event_table);
@@ -906,7 +908,8 @@ static const sctp_sm_table_entry_t timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][S
        TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE,
 };
 
-static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid,
+static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(struct net *net,
+                                                           sctp_cid_t cid,
                                                            sctp_state_t state)
 {
        if (state > SCTP_STATE_MAX)
@@ -915,12 +918,12 @@ static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid,
        if (cid <= SCTP_CID_BASE_MAX)
                return &chunk_event_table[cid][state];
 
-       if (sctp_prsctp_enable) {
+       if (net->sctp.prsctp_enable) {
                if (cid == SCTP_CID_FWD_TSN)
                        return &prsctp_chunk_event_table[0][state];
        }
 
-       if (sctp_addip_enable) {
+       if (net->sctp.addip_enable) {
                if (cid == SCTP_CID_ASCONF)
                        return &addip_chunk_event_table[0][state];
 
@@ -928,7 +931,7 @@ static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid,
                        return &addip_chunk_event_table[1][state];
        }
 
-       if (sctp_auth_enable) {
+       if (net->sctp.auth_enable) {
                if (cid == SCTP_CID_AUTH)
                        return &auth_chunk_event_table[0][state];
        }
index 5e259817a7f34cd4a183139fe9c4bf5ee2ab6689..d37d24ff197f094d5200fd9e51a692a08112157e 100644 (file)
@@ -427,6 +427,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
 static int sctp_send_asconf(struct sctp_association *asoc,
                            struct sctp_chunk *chunk)
 {
+       struct net      *net = sock_net(asoc->base.sk);
        int             retval = 0;
 
        /* If there is an outstanding ASCONF chunk, queue it for later
@@ -439,7 +440,7 @@ static int sctp_send_asconf(struct sctp_association *asoc,
 
        /* Hold the chunk until an ASCONF_ACK is received. */
        sctp_chunk_hold(chunk);
-       retval = sctp_primitive_ASCONF(asoc, chunk);
+       retval = sctp_primitive_ASCONF(net, asoc, chunk);
        if (retval)
                sctp_chunk_free(chunk);
        else
@@ -515,6 +516,7 @@ static int sctp_send_asconf_add_ip(struct sock              *sk,
                                   struct sockaddr      *addrs,
                                   int                  addrcnt)
 {
+       struct net *net = sock_net(sk);
        struct sctp_sock                *sp;
        struct sctp_endpoint            *ep;
        struct sctp_association         *asoc;
@@ -529,7 +531,7 @@ static int sctp_send_asconf_add_ip(struct sock              *sk,
        int                             i;
        int                             retval = 0;
 
-       if (!sctp_addip_enable)
+       if (!net->sctp.addip_enable)
                return retval;
 
        sp = sctp_sk(sk);
@@ -717,6 +719,7 @@ static int sctp_send_asconf_del_ip(struct sock              *sk,
                                   struct sockaddr      *addrs,
                                   int                  addrcnt)
 {
+       struct net *net = sock_net(sk);
        struct sctp_sock        *sp;
        struct sctp_endpoint    *ep;
        struct sctp_association *asoc;
@@ -732,7 +735,7 @@ static int sctp_send_asconf_del_ip(struct sock              *sk,
        int                     stored = 0;
 
        chunk = NULL;
-       if (!sctp_addip_enable)
+       if (!net->sctp.addip_enable)
                return retval;
 
        sp = sctp_sk(sk);
@@ -1050,6 +1053,7 @@ static int __sctp_connect(struct sock* sk,
                          int addrs_size,
                          sctp_assoc_t *assoc_id)
 {
+       struct net *net = sock_net(sk);
        struct sctp_sock *sp;
        struct sctp_endpoint *ep;
        struct sctp_association *asoc = NULL;
@@ -1200,7 +1204,7 @@ static int __sctp_connect(struct sock* sk,
                        goto out_free;
        }
 
-       err = sctp_primitive_ASSOCIATE(asoc, NULL);
+       err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
        if (err < 0) {
                goto out_free;
        }
@@ -1458,6 +1462,7 @@ SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len,
  */
 SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
 {
+       struct net *net = sock_net(sk);
        struct sctp_endpoint *ep;
        struct sctp_association *asoc;
        struct list_head *pos, *temp;
@@ -1499,9 +1504,9 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
 
                        chunk = sctp_make_abort_user(asoc, NULL, 0);
                        if (chunk)
-                               sctp_primitive_ABORT(asoc, chunk);
+                               sctp_primitive_ABORT(net, asoc, chunk);
                } else
-                       sctp_primitive_SHUTDOWN(asoc, NULL);
+                       sctp_primitive_SHUTDOWN(net, asoc, NULL);
        }
 
        /* On a TCP-style socket, block for at most linger_time if set. */
@@ -1569,6 +1574,7 @@ SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *);
 SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
                             struct msghdr *msg, size_t msg_len)
 {
+       struct net *net = sock_net(sk);
        struct sctp_sock *sp;
        struct sctp_endpoint *ep;
        struct sctp_association *new_asoc=NULL, *asoc=NULL;
@@ -1714,7 +1720,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
                if (sinfo_flags & SCTP_EOF) {
                        SCTP_DEBUG_PRINTK("Shutting down association: %p\n",
                                          asoc);
-                       sctp_primitive_SHUTDOWN(asoc, NULL);
+                       sctp_primitive_SHUTDOWN(net, asoc, NULL);
                        err = 0;
                        goto out_unlock;
                }
@@ -1727,7 +1733,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
                        }
 
                        SCTP_DEBUG_PRINTK("Aborting association: %p\n", asoc);
-                       sctp_primitive_ABORT(asoc, chunk);
+                       sctp_primitive_ABORT(net, asoc, chunk);
                        err = 0;
                        goto out_unlock;
                }
@@ -1900,7 +1906,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
 
        /* Auto-connect, if we aren't connected already. */
        if (sctp_state(asoc, CLOSED)) {
-               err = sctp_primitive_ASSOCIATE(asoc, NULL);
+               err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
                if (err < 0)
                        goto out_free;
                SCTP_DEBUG_PRINTK("We associated primitively.\n");
@@ -1928,7 +1934,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
         * works that way today.  Keep it that way or this
         * breaks.
         */
-       err = sctp_primitive_SEND(asoc, datamsg);
+       err = sctp_primitive_SEND(net, asoc, datamsg);
        /* Did the lower layer accept the chunk? */
        if (err)
                sctp_datamsg_free(datamsg);
@@ -2320,7 +2326,9 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
        int error;
 
        if (params->spp_flags & SPP_HB_DEMAND && trans) {
-               error = sctp_primitive_REQUESTHEARTBEAT (trans->asoc, trans);
+               struct net *net = sock_net(trans->asoc->base.sk);
+
+               error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans);
                if (error)
                        return error;
        }
@@ -3033,6 +3041,7 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned
 static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval,
                                             unsigned int optlen)
 {
+       struct net *net = sock_net(sk);
        struct sctp_sock        *sp;
        struct sctp_association *asoc = NULL;
        struct sctp_setpeerprim prim;
@@ -3042,7 +3051,7 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optva
 
        sp = sctp_sk(sk);
 
-       if (!sctp_addip_enable)
+       if (!net->sctp.addip_enable)
                return -EPERM;
 
        if (optlen != sizeof(struct sctp_setpeerprim))
@@ -3279,9 +3288,10 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk,
                                      char __user *optval,
                                      unsigned int optlen)
 {
+       struct net *net = sock_net(sk);
        struct sctp_authchunk val;
 
-       if (!sctp_auth_enable)
+       if (!net->sctp.auth_enable)
                return -EACCES;
 
        if (optlen != sizeof(struct sctp_authchunk))
@@ -3311,11 +3321,12 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
                                      char __user *optval,
                                      unsigned int optlen)
 {
+       struct net *net = sock_net(sk);
        struct sctp_hmacalgo *hmacs;
        u32 idents;
        int err;
 
-       if (!sctp_auth_enable)
+       if (!net->sctp.auth_enable)
                return -EACCES;
 
        if (optlen < sizeof(struct sctp_hmacalgo))
@@ -3348,11 +3359,12 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
                                    char __user *optval,
                                    unsigned int optlen)
 {
+       struct net *net = sock_net(sk);
        struct sctp_authkey *authkey;
        struct sctp_association *asoc;
        int ret;
 
-       if (!sctp_auth_enable)
+       if (!net->sctp.auth_enable)
                return -EACCES;
 
        if (optlen <= sizeof(struct sctp_authkey))
@@ -3389,10 +3401,11 @@ static int sctp_setsockopt_active_key(struct sock *sk,
                                      char __user *optval,
                                      unsigned int optlen)
 {
+       struct net *net = sock_net(sk);
        struct sctp_authkeyid val;
        struct sctp_association *asoc;
 
-       if (!sctp_auth_enable)
+       if (!net->sctp.auth_enable)
                return -EACCES;
 
        if (optlen != sizeof(struct sctp_authkeyid))
@@ -3417,10 +3430,11 @@ static int sctp_setsockopt_del_key(struct sock *sk,
                                   char __user *optval,
                                   unsigned int optlen)
 {
+       struct net *net = sock_net(sk);
        struct sctp_authkeyid val;
        struct sctp_association *asoc;
 
-       if (!sctp_auth_enable)
+       if (!net->sctp.auth_enable)
                return -EACCES;
 
        if (optlen != sizeof(struct sctp_authkeyid))
@@ -3471,7 +3485,7 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
                sp->do_auto_asconf = 0;
        } else if (val && !sp->do_auto_asconf) {
                list_add_tail(&sp->auto_asconf_list,
-                   &sctp_auto_asconf_splist);
+                   &sock_net(sk)->sctp.auto_asconf_splist);
                sp->do_auto_asconf = 1;
        }
        return 0;
@@ -3843,6 +3857,7 @@ out:
  */
 SCTP_STATIC int sctp_init_sock(struct sock *sk)
 {
+       struct net *net = sock_net(sk);
        struct sctp_endpoint *ep;
        struct sctp_sock *sp;
 
@@ -3872,7 +3887,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
        sp->default_timetolive = 0;
 
        sp->default_rcv_context = 0;
-       sp->max_burst = sctp_max_burst;
+       sp->max_burst = net->sctp.max_burst;
 
        /* Initialize default setup parameters. These parameters
         * can be modified with the SCTP_INITMSG socket option or
@@ -3880,24 +3895,24 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
         */
        sp->initmsg.sinit_num_ostreams   = sctp_max_outstreams;
        sp->initmsg.sinit_max_instreams  = sctp_max_instreams;
-       sp->initmsg.sinit_max_attempts   = sctp_max_retrans_init;
-       sp->initmsg.sinit_max_init_timeo = sctp_rto_max;
+       sp->initmsg.sinit_max_attempts   = net->sctp.max_retrans_init;
+       sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max;
 
        /* Initialize default RTO related parameters.  These parameters can
         * be modified for with the SCTP_RTOINFO socket option.
         */
-       sp->rtoinfo.srto_initial = sctp_rto_initial;
-       sp->rtoinfo.srto_max     = sctp_rto_max;
-       sp->rtoinfo.srto_min     = sctp_rto_min;
+       sp->rtoinfo.srto_initial = net->sctp.rto_initial;
+       sp->rtoinfo.srto_max     = net->sctp.rto_max;
+       sp->rtoinfo.srto_min     = net->sctp.rto_min;
 
        /* Initialize default association related parameters. These parameters
         * can be modified with the SCTP_ASSOCINFO socket option.
         */
-       sp->assocparams.sasoc_asocmaxrxt = sctp_max_retrans_association;
+       sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association;
        sp->assocparams.sasoc_number_peer_destinations = 0;
        sp->assocparams.sasoc_peer_rwnd = 0;
        sp->assocparams.sasoc_local_rwnd = 0;
-       sp->assocparams.sasoc_cookie_life = sctp_valid_cookie_life;
+       sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life;
 
        /* Initialize default event subscriptions. By default, all the
         * options are off.
@@ -3907,10 +3922,10 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
        /* Default Peer Address Parameters.  These defaults can
         * be modified via SCTP_PEER_ADDR_PARAMS
         */
-       sp->hbinterval  = sctp_hb_interval;
-       sp->pathmaxrxt  = sctp_max_retrans_path;
+       sp->hbinterval  = net->sctp.hb_interval;
+       sp->pathmaxrxt  = net->sctp.max_retrans_path;
        sp->pathmtu     = 0; // allow default discovery
-       sp->sackdelay   = sctp_sack_timeout;
+       sp->sackdelay   = net->sctp.sack_timeout;
        sp->sackfreq    = 2;
        sp->param_flags = SPP_HB_ENABLE |
                          SPP_PMTUD_ENABLE |
@@ -3961,10 +3976,10 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
 
        local_bh_disable();
        percpu_counter_inc(&sctp_sockets_allocated);
-       sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
-       if (sctp_default_auto_asconf) {
+       sock_prot_inuse_add(net, sk->sk_prot, 1);
+       if (net->sctp.default_auto_asconf) {
                list_add_tail(&sp->auto_asconf_list,
-                   &sctp_auto_asconf_splist);
+                   &net->sctp.auto_asconf_splist);
                sp->do_auto_asconf = 1;
        } else
                sp->do_auto_asconf = 0;
@@ -4011,6 +4026,7 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
  */
 SCTP_STATIC void sctp_shutdown(struct sock *sk, int how)
 {
+       struct net *net = sock_net(sk);
        struct sctp_endpoint *ep;
        struct sctp_association *asoc;
 
@@ -4022,7 +4038,7 @@ SCTP_STATIC void sctp_shutdown(struct sock *sk, int how)
                if (!list_empty(&ep->asocs)) {
                        asoc = list_entry(ep->asocs.next,
                                          struct sctp_association, asocs);
-                       sctp_primitive_SHUTDOWN(asoc, NULL);
+                       sctp_primitive_SHUTDOWN(net, asoc, NULL);
                }
        }
 }
@@ -4653,9 +4669,10 @@ static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
        union sctp_addr temp;
        int cnt = 0;
        int addrlen;
+       struct net *net = sock_net(sk);
 
        rcu_read_lock();
-       list_for_each_entry_rcu(addr, &sctp_local_addr_list, list) {
+       list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) {
                if (!addr->valid)
                        continue;
 
@@ -5299,12 +5316,13 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len,
 static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
                                    char __user *optval, int __user *optlen)
 {
+       struct net *net = sock_net(sk);
        struct sctp_hmacalgo  __user *p = (void __user *)optval;
        struct sctp_hmac_algo_param *hmacs;
        __u16 data_len = 0;
        u32 num_idents;
 
-       if (!sctp_auth_enable)
+       if (!net->sctp.auth_enable)
                return -EACCES;
 
        hmacs = sctp_sk(sk)->ep->auth_hmacs_list;
@@ -5328,10 +5346,11 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
 static int sctp_getsockopt_active_key(struct sock *sk, int len,
                                    char __user *optval, int __user *optlen)
 {
+       struct net *net = sock_net(sk);
        struct sctp_authkeyid val;
        struct sctp_association *asoc;
 
-       if (!sctp_auth_enable)
+       if (!net->sctp.auth_enable)
                return -EACCES;
 
        if (len < sizeof(struct sctp_authkeyid))
@@ -5360,6 +5379,7 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
 static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
                                    char __user *optval, int __user *optlen)
 {
+       struct net *net = sock_net(sk);
        struct sctp_authchunks __user *p = (void __user *)optval;
        struct sctp_authchunks val;
        struct sctp_association *asoc;
@@ -5367,7 +5387,7 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
        u32    num_chunks = 0;
        char __user *to;
 
-       if (!sctp_auth_enable)
+       if (!net->sctp.auth_enable)
                return -EACCES;
 
        if (len < sizeof(struct sctp_authchunks))
@@ -5403,6 +5423,7 @@ num:
 static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
                                    char __user *optval, int __user *optlen)
 {
+       struct net *net = sock_net(sk);
        struct sctp_authchunks __user *p = (void __user *)optval;
        struct sctp_authchunks val;
        struct sctp_association *asoc;
@@ -5410,7 +5431,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
        u32    num_chunks = 0;
        char __user *to;
 
-       if (!sctp_auth_enable)
+       if (!net->sctp.auth_enable)
                return -EACCES;
 
        if (len < sizeof(struct sctp_authchunks))
@@ -5769,7 +5790,7 @@ static void sctp_unhash(struct sock *sk)
  * a fastreuse flag (FIXME: NPI ipg).
  */
 static struct sctp_bind_bucket *sctp_bucket_create(
-       struct sctp_bind_hashbucket *head, unsigned short snum);
+       struct sctp_bind_hashbucket *head, struct net *, unsigned short snum);
 
 static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
 {
@@ -5799,11 +5820,12 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
                                rover = low;
                        if (inet_is_reserved_local_port(rover))
                                continue;
-                       index = sctp_phashfn(rover);
+                       index = sctp_phashfn(sock_net(sk), rover);
                        head = &sctp_port_hashtable[index];
                        sctp_spin_lock(&head->lock);
                        sctp_for_each_hentry(pp, node, &head->chain)
-                               if (pp->port == rover)
+                               if ((pp->port == rover) &&
+                                   net_eq(sock_net(sk), pp->net))
                                        goto next;
                        break;
                next:
@@ -5827,10 +5849,10 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
                 * to the port number (snum) - we detect that with the
                 * port iterator, pp being NULL.
                 */
-               head = &sctp_port_hashtable[sctp_phashfn(snum)];
+               head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)];
                sctp_spin_lock(&head->lock);
                sctp_for_each_hentry(pp, node, &head->chain) {
-                       if (pp->port == snum)
+                       if ((pp->port == snum) && net_eq(pp->net, sock_net(sk)))
                                goto pp_found;
                }
        }
@@ -5881,7 +5903,7 @@ pp_found:
 pp_not_found:
        /* If there was a hash table miss, create a new port.  */
        ret = 1;
-       if (!pp && !(pp = sctp_bucket_create(head, snum)))
+       if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum)))
                goto fail_unlock;
 
        /* In either case (hit or miss), make sure fastreuse is 1 only
@@ -6113,7 +6135,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
  ********************************************************************/
 
 static struct sctp_bind_bucket *sctp_bucket_create(
-       struct sctp_bind_hashbucket *head, unsigned short snum)
+       struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum)
 {
        struct sctp_bind_bucket *pp;
 
@@ -6123,6 +6145,7 @@ static struct sctp_bind_bucket *sctp_bucket_create(
                pp->port = snum;
                pp->fastreuse = 0;
                INIT_HLIST_HEAD(&pp->owner);
+               pp->net = net;
                hlist_add_head(&pp->node, &head->chain);
        }
        return pp;
@@ -6142,7 +6165,8 @@ static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)
 static inline void __sctp_put_port(struct sock *sk)
 {
        struct sctp_bind_hashbucket *head =
-               &sctp_port_hashtable[sctp_phashfn(inet_sk(sk)->inet_num)];
+               &sctp_port_hashtable[sctp_phashfn(sock_net(sk),
+                                                 inet_sk(sk)->inet_num)];
        struct sctp_bind_bucket *pp;
 
        sctp_spin_lock(&head->lock);
@@ -6809,7 +6833,8 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
        newsp->hmac = NULL;
 
        /* Hook this new socket in to the bind_hash list. */
-       head = &sctp_port_hashtable[sctp_phashfn(inet_sk(oldsk)->inet_num)];
+       head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk),
+                                                inet_sk(oldsk)->inet_num)];
        sctp_local_bh_disable();
        sctp_spin_lock(&head->lock);
        pp = sctp_sk(oldsk)->bind_hash;
index 2b2bfe933ff14413aa4970391eb25d038ff3d90a..70e3ba5cb50b319319e60c7bfa6fae69bc5c1fed 100644 (file)
@@ -63,9 +63,35 @@ extern int sysctl_sctp_rmem[3];
 extern int sysctl_sctp_wmem[3];
 
 static ctl_table sctp_table[] = {
+       {
+               .procname       = "sctp_mem",
+               .data           = &sysctl_sctp_mem,
+               .maxlen         = sizeof(sysctl_sctp_mem),
+               .mode           = 0644,
+               .proc_handler   = proc_doulongvec_minmax
+       },
+       {
+               .procname       = "sctp_rmem",
+               .data           = &sysctl_sctp_rmem,
+               .maxlen         = sizeof(sysctl_sctp_rmem),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "sctp_wmem",
+               .data           = &sysctl_sctp_wmem,
+               .maxlen         = sizeof(sysctl_sctp_wmem),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+
+       { /* sentinel */ }
+};
+
+static ctl_table sctp_net_table[] = {
        {
                .procname       = "rto_initial",
-               .data           = &sctp_rto_initial,
+               .data           = &init_net.sctp.rto_initial,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
@@ -74,7 +100,7 @@ static ctl_table sctp_table[] = {
        },
        {
                .procname       = "rto_min",
-               .data           = &sctp_rto_min,
+               .data           = &init_net.sctp.rto_min,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
@@ -83,7 +109,7 @@ static ctl_table sctp_table[] = {
        },
        {
                .procname       = "rto_max",
-               .data           = &sctp_rto_max,
+               .data           = &init_net.sctp.rto_max,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
@@ -91,17 +117,22 @@ static ctl_table sctp_table[] = {
                .extra2         = &timer_max
        },
        {
-               .procname       = "valid_cookie_life",
-               .data           = &sctp_valid_cookie_life,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &one,
-               .extra2         = &timer_max
+               .procname       = "rto_alpha_exp_divisor",
+               .data           = &init_net.sctp.rto_alpha,
+               .maxlen         = sizeof(int),
+               .mode           = 0444,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "rto_beta_exp_divisor",
+               .data           = &init_net.sctp.rto_beta,
+               .maxlen         = sizeof(int),
+               .mode           = 0444,
+               .proc_handler   = proc_dointvec,
        },
        {
                .procname       = "max_burst",
-               .data           = &sctp_max_burst,
+               .data           = &init_net.sctp.max_burst,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
@@ -109,31 +140,42 @@ static ctl_table sctp_table[] = {
                .extra2         = &int_max
        },
        {
-               .procname       = "association_max_retrans",
-               .data           = &sctp_max_retrans_association,
+               .procname       = "cookie_preserve_enable",
+               .data           = &init_net.sctp.cookie_preserve_enable,
                .maxlen         = sizeof(int),
                .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "valid_cookie_life",
+               .data           = &init_net.sctp.valid_cookie_life,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &one,
-               .extra2         = &int_max
+               .extra1         = &one,
+               .extra2         = &timer_max
        },
        {
-               .procname       = "sndbuf_policy",
-               .data           = &sctp_sndbuf_policy,
+               .procname       = "sack_timeout",
+               .data           = &init_net.sctp.sack_timeout,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &sack_timer_min,
+               .extra2         = &sack_timer_max,
        },
        {
-               .procname       = "rcvbuf_policy",
-               .data           = &sctp_rcvbuf_policy,
-               .maxlen         = sizeof(int),
+               .procname       = "hb_interval",
+               .data           = &init_net.sctp.hb_interval,
+               .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &one,
+               .extra2         = &timer_max
        },
        {
-               .procname       = "path_max_retrans",
-               .data           = &sctp_max_retrans_path,
+               .procname       = "association_max_retrans",
+               .data           = &init_net.sctp.max_retrans_association,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
@@ -141,17 +183,17 @@ static ctl_table sctp_table[] = {
                .extra2         = &int_max
        },
        {
-               .procname       = "pf_retrans",
-               .data           = &sctp_pf_retrans,
+               .procname       = "path_max_retrans",
+               .data           = &init_net.sctp.max_retrans_path,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &zero,
+               .extra1         = &one,
                .extra2         = &int_max
        },
        {
                .procname       = "max_init_retransmits",
-               .data           = &sctp_max_retrans_init,
+               .data           = &init_net.sctp.max_retrans_init,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
@@ -159,103 +201,66 @@ static ctl_table sctp_table[] = {
                .extra2         = &int_max
        },
        {
-               .procname       = "hb_interval",
-               .data           = &sctp_hb_interval,
-               .maxlen         = sizeof(unsigned int),
+               .procname       = "pf_retrans",
+               .data           = &init_net.sctp.pf_retrans,
+               .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &one,
-               .extra2         = &timer_max
+               .extra1         = &zero,
+               .extra2         = &int_max
        },
        {
-               .procname       = "cookie_preserve_enable",
-               .data           = &sctp_cookie_preserve_enable,
+               .procname       = "sndbuf_policy",
+               .data           = &init_net.sctp.sndbuf_policy,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
        {
-               .procname       = "rto_alpha_exp_divisor",
-               .data           = &sctp_rto_alpha,
-               .maxlen         = sizeof(int),
-               .mode           = 0444,
-               .proc_handler   = proc_dointvec,
-       },
-       {
-               .procname       = "rto_beta_exp_divisor",
-               .data           = &sctp_rto_beta,
-               .maxlen         = sizeof(int),
-               .mode           = 0444,
-               .proc_handler   = proc_dointvec,
-       },
-       {
-               .procname       = "addip_enable",
-               .data           = &sctp_addip_enable,
+               .procname       = "rcvbuf_policy",
+               .data           = &init_net.sctp.rcvbuf_policy,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
        {
                .procname       = "default_auto_asconf",
-               .data           = &sctp_default_auto_asconf,
+               .data           = &init_net.sctp.default_auto_asconf,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
        {
-               .procname       = "prsctp_enable",
-               .data           = &sctp_prsctp_enable,
+               .procname       = "addip_enable",
+               .data           = &init_net.sctp.addip_enable,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
        {
-               .procname       = "sack_timeout",
-               .data           = &sctp_sack_timeout,
+               .procname       = "addip_noauth_enable",
+               .data           = &init_net.sctp.addip_noauth,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &sack_timer_min,
-               .extra2         = &sack_timer_max,
-       },
-       {
-               .procname       = "sctp_mem",
-               .data           = &sysctl_sctp_mem,
-               .maxlen         = sizeof(sysctl_sctp_mem),
-               .mode           = 0644,
-               .proc_handler   = proc_doulongvec_minmax
-       },
-       {
-               .procname       = "sctp_rmem",
-               .data           = &sysctl_sctp_rmem,
-               .maxlen         = sizeof(sysctl_sctp_rmem),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-       },
-       {
-               .procname       = "sctp_wmem",
-               .data           = &sysctl_sctp_wmem,
-               .maxlen         = sizeof(sysctl_sctp_wmem),
-               .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
        {
-               .procname       = "auth_enable",
-               .data           = &sctp_auth_enable,
+               .procname       = "prsctp_enable",
+               .data           = &init_net.sctp.prsctp_enable,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
        {
-               .procname       = "addip_noauth_enable",
-               .data           = &sctp_addip_noauth,
+               .procname       = "auth_enable",
+               .data           = &init_net.sctp.auth_enable,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
        {
                .procname       = "addr_scope_policy",
-               .data           = &sctp_scope_policy,
+               .data           = &init_net.sctp.scope_policy,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
@@ -264,7 +269,7 @@ static ctl_table sctp_table[] = {
        },
        {
                .procname       = "rwnd_update_shift",
-               .data           = &sctp_rwnd_upd_shift,
+               .data           = &init_net.sctp.rwnd_upd_shift,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = &proc_dointvec_minmax,
@@ -273,7 +278,7 @@ static ctl_table sctp_table[] = {
        },
        {
                .procname       = "max_autoclose",
-               .data           = &sctp_max_autoclose,
+               .data           = &init_net.sctp.max_autoclose,
                .maxlen         = sizeof(unsigned long),
                .mode           = 0644,
                .proc_handler   = &proc_doulongvec_minmax,
@@ -284,6 +289,27 @@ static ctl_table sctp_table[] = {
        { /* sentinel */ }
 };
 
+int sctp_sysctl_net_register(struct net *net)
+{
+       struct ctl_table *table;
+       int i;
+
+       table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL);
+       if (!table)
+               return -ENOMEM;
+
+       for (i = 0; table[i].data; i++)
+               table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp;
+
+       net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table);
+       return 0;
+}
+
+void sctp_sysctl_net_unregister(struct net *net)
+{
+       unregister_net_sysctl_table(net->sctp.sysctl_header);
+}
+
 static struct ctl_table_header * sctp_sysctl_header;
 
 /* Sysctl registration.  */
index c97472b248a2b257972cd9e4a353e89874ad87aa..953c21e4af977a752362187976e84b578bdb085c 100644 (file)
@@ -59,7 +59,8 @@
 /* 1st Level Abstractions.  */
 
 /* Initialize a new transport from provided memory.  */
-static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
+static struct sctp_transport *sctp_transport_init(struct net *net,
+                                                 struct sctp_transport *peer,
                                                  const union sctp_addr *addr,
                                                  gfp_t gfp)
 {
@@ -76,7 +77,7 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
         * given destination transport address, set RTO to the protocol
         * parameter 'RTO.Initial'.
         */
-       peer->rto = msecs_to_jiffies(sctp_rto_initial);
+       peer->rto = msecs_to_jiffies(net->sctp.rto_initial);
 
        peer->last_time_heard = jiffies;
        peer->last_time_ecne_reduced = jiffies;
@@ -86,8 +87,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
                            SPP_SACKDELAY_ENABLE;
 
        /* Initialize the default path max_retrans.  */
-       peer->pathmaxrxt  = sctp_max_retrans_path;
-       peer->pf_retrans  = sctp_pf_retrans;
+       peer->pathmaxrxt  = net->sctp.max_retrans_path;
+       peer->pf_retrans  = net->sctp.pf_retrans;
 
        INIT_LIST_HEAD(&peer->transmitted);
        INIT_LIST_HEAD(&peer->send_ready);
@@ -109,7 +110,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
 }
 
 /* Allocate and initialize a new transport.  */
-struct sctp_transport *sctp_transport_new(const union sctp_addr *addr,
+struct sctp_transport *sctp_transport_new(struct net *net,
+                                         const union sctp_addr *addr,
                                          gfp_t gfp)
 {
        struct sctp_transport *transport;
@@ -118,7 +120,7 @@ struct sctp_transport *sctp_transport_new(const union sctp_addr *addr,
        if (!transport)
                goto fail;
 
-       if (!sctp_transport_init(transport, addr, gfp))
+       if (!sctp_transport_init(net, transport, addr, gfp))
                goto fail_init;
 
        transport->malloced = 1;
@@ -316,6 +318,7 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
        SCTP_ASSERT(tp->rto_pending, "rto_pending not set", return);
 
        if (tp->rttvar || tp->srtt) {
+               struct net *net = sock_net(tp->asoc->base.sk);
                /* 6.3.1 C3) When a new RTT measurement R' is made, set
                 * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'|
                 * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R'
@@ -327,10 +330,10 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
                 * For example, assuming the default value of RTO.Alpha of
                 * 1/8, rto_alpha would be expressed as 3.
                 */
-               tp->rttvar = tp->rttvar - (tp->rttvar >> sctp_rto_beta)
-                       + ((abs(tp->srtt - rtt)) >> sctp_rto_beta);
-               tp->srtt = tp->srtt - (tp->srtt >> sctp_rto_alpha)
-                       + (rtt >> sctp_rto_alpha);
+               tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta)
+                       + ((abs(tp->srtt - rtt)) >> net->sctp.rto_beta);
+               tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha)
+                       + (rtt >> net->sctp.rto_alpha);
        } else {
                /* 6.3.1 C2) When the first RTT measurement R is made, set
                 * SRTT <- R, RTTVAR <- R/2.
index f5a6a4f4faf721af4874538093cb003f4efc202c..360d8697b95c33408d6a4913b9b1d497d27e5ee7 100644 (file)
@@ -326,7 +326,9 @@ static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
  * payload was fragmented on the way and ip had to reassemble them.
  * We add the rest of skb's to the first skb's fraglist.
  */
-static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag)
+static struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
+       struct sk_buff_head *queue, struct sk_buff *f_frag,
+       struct sk_buff *l_frag)
 {
        struct sk_buff *pos;
        struct sk_buff *new = NULL;
@@ -394,7 +396,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *qu
        }
 
        event = sctp_skb2event(f_frag);
-       SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS);
+       SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
 
        return event;
 }
@@ -493,7 +495,8 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ul
                cevent = sctp_skb2event(pd_first);
                pd_point = sctp_sk(asoc->base.sk)->pd_point;
                if (pd_point && pd_point <= pd_len) {
-                       retval = sctp_make_reassembled_event(&ulpq->reasm,
+                       retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
+                                                            &ulpq->reasm,
                                                             pd_first,
                                                             pd_last);
                        if (retval)
@@ -503,7 +506,8 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ul
 done:
        return retval;
 found:
-       retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, pos);
+       retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
+                                            &ulpq->reasm, first_frag, pos);
        if (retval)
                retval->msg_flags |= MSG_EOR;
        goto done;
@@ -563,7 +567,8 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
         * further.
         */
 done:
-       retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
+       retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
+                                       &ulpq->reasm, first_frag, last_frag);
        if (retval && is_last)
                retval->msg_flags |= MSG_EOR;
 
@@ -655,7 +660,8 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
         * further.
         */
 done:
-       retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
+       retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
+                                       &ulpq->reasm, first_frag, last_frag);
        return retval;
 }
 
index edc3c4af9085362c7227e31babfc19a489bf9cf6..80dc7e84b046358581f454ec87993d4195e6edc3 100644 (file)
@@ -88,6 +88,7 @@
 #include <linux/nsproxy.h>
 #include <linux/magic.h>
 #include <linux/slab.h>
+#include <linux/xattr.h>
 
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
@@ -346,7 +347,8 @@ static struct file_system_type sock_fs_type = {
  *     but we take care of internal coherence yet.
  */
 
-static int sock_alloc_file(struct socket *sock, struct file **f, int flags)
+static int sock_alloc_file(struct socket *sock, struct file **f, int flags,
+                          const char *dname)
 {
        struct qstr name = { .name = "" };
        struct path path;
@@ -357,6 +359,13 @@ static int sock_alloc_file(struct socket *sock, struct file **f, int flags)
        if (unlikely(fd < 0))
                return fd;
 
+       if (dname) {
+               name.name = dname;
+               name.len = strlen(name.name);
+       } else if (sock->sk) {
+               name.name = sock->sk->sk_prot_creator->name;
+               name.len = strlen(name.name);
+       }
        path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name);
        if (unlikely(!path.dentry)) {
                put_unused_fd(fd);
@@ -389,7 +398,7 @@ static int sock_alloc_file(struct socket *sock, struct file **f, int flags)
 int sock_map_fd(struct socket *sock, int flags)
 {
        struct file *newfile;
-       int fd = sock_alloc_file(sock, &newfile, flags);
+       int fd = sock_alloc_file(sock, &newfile, flags, NULL);
 
        if (likely(fd >= 0))
                fd_install(fd, newfile);
@@ -455,6 +464,68 @@ static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)
        return NULL;
 }
 
+#define XATTR_SOCKPROTONAME_SUFFIX "sockprotoname"
+#define XATTR_NAME_SOCKPROTONAME (XATTR_SYSTEM_PREFIX XATTR_SOCKPROTONAME_SUFFIX)
+#define XATTR_NAME_SOCKPROTONAME_LEN (sizeof(XATTR_NAME_SOCKPROTONAME)-1)
+static ssize_t sockfs_getxattr(struct dentry *dentry,
+                              const char *name, void *value, size_t size)
+{
+       const char *proto_name;
+       size_t proto_size;
+       int error;
+
+       error = -ENODATA;
+       if (!strncmp(name, XATTR_NAME_SOCKPROTONAME, XATTR_NAME_SOCKPROTONAME_LEN)) {
+               proto_name = dentry->d_name.name;
+               proto_size = strlen(proto_name);
+
+               if (value) {
+                       error = -ERANGE;
+                       if (proto_size + 1 > size)
+                               goto out;
+
+                       strncpy(value, proto_name, proto_size + 1);
+               }
+               error = proto_size + 1;
+       }
+
+out:
+       return error;
+}
+
+static ssize_t sockfs_listxattr(struct dentry *dentry, char *buffer,
+                               size_t size)
+{
+       ssize_t len;
+       ssize_t used = 0;
+
+       len = security_inode_listsecurity(dentry->d_inode, buffer, size);
+       if (len < 0)
+               return len;
+       used += len;
+       if (buffer) {
+               if (size < used)
+                       return -ERANGE;
+               buffer += len;
+       }
+
+       len = (XATTR_NAME_SOCKPROTONAME_LEN + 1);
+       used += len;
+       if (buffer) {
+               if (size < used)
+                       return -ERANGE;
+               memcpy(buffer, XATTR_NAME_SOCKPROTONAME, len);
+               buffer += len;
+       }
+
+       return used;
+}
+
+static const struct inode_operations sockfs_inode_ops = {
+       .getxattr = sockfs_getxattr,
+       .listxattr = sockfs_listxattr,
+};
+
 /**
  *     sock_alloc      -       allocate a socket
  *
@@ -479,6 +550,7 @@ static struct socket *sock_alloc(void)
        inode->i_mode = S_IFSOCK | S_IRWXUGO;
        inode->i_uid = current_fsuid();
        inode->i_gid = current_fsgid();
+       inode->i_op = &sockfs_inode_ops;
 
        this_cpu_add(sockets_in_use, 1);
        return sock;
@@ -1394,13 +1466,13 @@ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol,
        if (err < 0)
                goto out_release_both;
 
-       fd1 = sock_alloc_file(sock1, &newfile1, flags);
+       fd1 = sock_alloc_file(sock1, &newfile1, flags, NULL);
        if (unlikely(fd1 < 0)) {
                err = fd1;
                goto out_release_both;
        }
 
-       fd2 = sock_alloc_file(sock2, &newfile2, flags);
+       fd2 = sock_alloc_file(sock2, &newfile2, flags, NULL);
        if (unlikely(fd2 < 0)) {
                err = fd2;
                fput(newfile1);
@@ -1536,7 +1608,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
         */
        __module_get(newsock->ops->owner);
 
-       newfd = sock_alloc_file(newsock, &newfile, flags);
+       newfd = sock_alloc_file(newsock, &newfile, flags,
+                               sock->sk->sk_prot_creator->name);
        if (unlikely(newfd < 0)) {
                err = newfd;
                sock_release(newsock);
@@ -2527,12 +2600,6 @@ static int __init sock_init(void)
        if (err)
                goto out;
 
-       /*
-        *      Initialize sock SLAB cache.
-        */
-
-       sk_init();
-
        /*
         *      Initialize skbuff SLAB cache
         */
index 09e71241265ddf11ffec7ac0505c6377396f247b..4ec5c80e8a7ca0b20c7291db9641636f4dc0a3a6 100644 (file)
@@ -48,21 +48,6 @@ struct tipc_bearer tipc_bearers[MAX_BEARERS];
 
 static void bearer_disable(struct tipc_bearer *b_ptr);
 
-/**
- * media_name_valid - validate media name
- *
- * Returns 1 if media name is valid, otherwise 0.
- */
-static int media_name_valid(const char *name)
-{
-       u32 len;
-
-       len = strlen(name);
-       if ((len + 1) > TIPC_MAX_MEDIA_NAME)
-               return 0;
-       return strspn(name, tipc_alphabet) == len;
-}
-
 /**
  * tipc_media_find - locates specified media object by name
  */
@@ -102,7 +87,7 @@ int tipc_register_media(struct tipc_media *m_ptr)
 
        write_lock_bh(&tipc_net_lock);
 
-       if (!media_name_valid(m_ptr->name))
+       if ((strlen(m_ptr->name) + 1) > TIPC_MAX_MEDIA_NAME)
                goto exit;
        if ((m_ptr->bcast_addr.media_id != m_ptr->type_id) ||
            !m_ptr->bcast_addr.broadcast)
@@ -206,9 +191,7 @@ static int bearer_name_validate(const char *name,
 
        /* validate component parts of bearer name */
        if ((media_len <= 1) || (media_len > TIPC_MAX_MEDIA_NAME) ||
-           (if_len <= 1) || (if_len > TIPC_MAX_IF_NAME) ||
-           (strspn(media_name, tipc_alphabet) != (media_len - 1)) ||
-           (strspn(if_name, tipc_alphabet) != (if_len - 1)))
+           (if_len <= 1) || (if_len > TIPC_MAX_IF_NAME))
                return 0;
 
        /* return bearer name components, if necessary */
index a056a3852f71f0a63109c7fe5188f8117e7144c7..f67866c765dd574130bb17d5476c8c9723d4612a 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/config.c: TIPC configuration management code
  *
  * Copyright (c) 2002-2006, Ericsson AB
- * Copyright (c) 2004-2007, 2010-2011, Wind River Systems
+ * Copyright (c) 2004-2007, 2010-2012, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -208,36 +208,6 @@ static struct sk_buff *cfg_set_remote_mng(void)
        return tipc_cfg_reply_none();
 }
 
-static struct sk_buff *cfg_set_max_publications(void)
-{
-       u32 value;
-
-       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
-               return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-
-       value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
-       if (value < 1 || value > 65535)
-               return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
-                                                  " (max publications must be 1-65535)");
-       tipc_max_publications = value;
-       return tipc_cfg_reply_none();
-}
-
-static struct sk_buff *cfg_set_max_subscriptions(void)
-{
-       u32 value;
-
-       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
-               return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-
-       value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
-       if (value < 1 || value > 65535)
-               return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
-                                                  " (max subscriptions must be 1-65535");
-       tipc_max_subscriptions = value;
-       return tipc_cfg_reply_none();
-}
-
 static struct sk_buff *cfg_set_max_ports(void)
 {
        u32 value;
@@ -357,12 +327,6 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
        case TIPC_CMD_SET_MAX_PORTS:
                rep_tlv_buf = cfg_set_max_ports();
                break;
-       case TIPC_CMD_SET_MAX_PUBL:
-               rep_tlv_buf = cfg_set_max_publications();
-               break;
-       case TIPC_CMD_SET_MAX_SUBSCR:
-               rep_tlv_buf = cfg_set_max_subscriptions();
-               break;
        case TIPC_CMD_SET_NETID:
                rep_tlv_buf = cfg_set_netid();
                break;
@@ -372,12 +336,6 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
        case TIPC_CMD_GET_MAX_PORTS:
                rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_ports);
                break;
-       case TIPC_CMD_GET_MAX_PUBL:
-               rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_publications);
-               break;
-       case TIPC_CMD_GET_MAX_SUBSCR:
-               rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_subscriptions);
-               break;
        case TIPC_CMD_GET_NETID:
                rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_net_id);
                break;
@@ -393,6 +351,10 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
        case TIPC_CMD_GET_MAX_CLUSTERS:
        case TIPC_CMD_SET_MAX_NODES:
        case TIPC_CMD_GET_MAX_NODES:
+       case TIPC_CMD_SET_MAX_SUBSCR:
+       case TIPC_CMD_GET_MAX_SUBSCR:
+       case TIPC_CMD_SET_MAX_PUBL:
+       case TIPC_CMD_GET_MAX_PUBL:
        case TIPC_CMD_SET_LOG_SIZE:
        case TIPC_CMD_DUMP_LOG:
                rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
index 6586eac6a50eb5508447c8d505b67d0d261d1f00..bfe8af88469a95b5012d1cb34e3e9415120a2808 100644 (file)
 
 
 /* global variables used by multiple sub-systems within TIPC */
-int tipc_random;
-
-const char tipc_alphabet[] =
-       "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_.";
+int tipc_random __read_mostly;
 
 /* configurable TIPC parameters */
-u32 tipc_own_addr;
-int tipc_max_ports;
-int tipc_max_subscriptions;
-int tipc_max_publications;
-int tipc_net_id;
-int tipc_remote_management;
+u32 tipc_own_addr __read_mostly;
+int tipc_max_ports __read_mostly;
+int tipc_net_id __read_mostly;
+int tipc_remote_management __read_mostly;
 
 
 /**
@@ -101,9 +96,8 @@ int tipc_core_start_net(unsigned long addr)
 {
        int res;
 
-       res = tipc_net_start(addr);
-       if (!res)
-               res = tipc_eth_media_start();
+       tipc_net_start(addr);
+       res = tipc_eth_media_start();
        if (res)
                tipc_core_stop_net();
        return res;
@@ -160,8 +154,6 @@ static int __init tipc_init(void)
 
        tipc_own_addr = 0;
        tipc_remote_management = 1;
-       tipc_max_publications = 10000;
-       tipc_max_subscriptions = 2000;
        tipc_max_ports = CONFIG_TIPC_PORTS;
        tipc_net_id = 4711;
 
index fd42e106c18539152822d1bfdceefc25a036b164..0207db04179a00feecf6f2cc04ded91f40f1db45 100644 (file)
@@ -60,7 +60,9 @@
 
 #define TIPC_MOD_VER "2.0.0"
 
-#define ULTRA_STRING_MAX_LEN 32768
+#define ULTRA_STRING_MAX_LEN   32768
+#define TIPC_MAX_SUBSCRIPTIONS 65535
+#define TIPC_MAX_PUBLICATIONS  65535
 
 struct tipc_msg;       /* msg.h */
 
@@ -74,19 +76,15 @@ int tipc_snprintf(char *buf, int len, const char *fmt, ...);
 /*
  * Global configuration variables
  */
-extern u32 tipc_own_addr;
-extern int tipc_max_ports;
-extern int tipc_max_subscriptions;
-extern int tipc_max_publications;
-extern int tipc_net_id;
-extern int tipc_remote_management;
+extern u32 tipc_own_addr __read_mostly;
+extern int tipc_max_ports __read_mostly;
+extern int tipc_net_id __read_mostly;
+extern int tipc_remote_management __read_mostly;
 
 /*
  * Other global variables
  */
-extern int tipc_random;
-extern const char tipc_alphabet[];
-
+extern int tipc_random __read_mostly;
 
 /*
  * Routines available to privileged subsystems
index 90ac9bfa7abb2d593d30580a16bfd5b78812753f..2132c1ef2951aa3c907e0650805f41aa88448a9b 100644 (file)
  * @bearer: ptr to associated "generic" bearer structure
  * @dev: ptr to associated Ethernet network device
  * @tipc_packet_type: used in binding TIPC to Ethernet driver
+ * @setup: work item used when enabling bearer
  * @cleanup: work item used when disabling bearer
  */
 struct eth_bearer {
        struct tipc_bearer *bearer;
        struct net_device *dev;
        struct packet_type tipc_packet_type;
+       struct work_struct setup;
        struct work_struct cleanup;
 };
 
 static struct tipc_media eth_media_info;
 static struct eth_bearer eth_bearers[MAX_ETH_BEARERS];
 static int eth_started;
-static struct notifier_block notifier;
+
+static int recv_notification(struct notifier_block *nb, unsigned long evt,
+                             void *dv);
+/*
+ * Network device notifier info
+ */
+static struct notifier_block notifier = {
+       .notifier_call  = recv_notification,
+       .priority       = 0
+};
 
 /**
  * eth_media_addr_set - initialize Ethernet media address structure
@@ -133,6 +144,17 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev,
        return 0;
 }
 
+/**
+ * setup_bearer - setup association between Ethernet bearer and interface
+ */
+static void setup_bearer(struct work_struct *work)
+{
+       struct eth_bearer *eb_ptr =
+               container_of(work, struct eth_bearer, setup);
+
+       dev_add_pack(&eb_ptr->tipc_packet_type);
+}
+
 /**
  * enable_bearer - attach TIPC bearer to an Ethernet interface
  */
@@ -173,7 +195,8 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
        eb_ptr->tipc_packet_type.func = recv_msg;
        eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
        INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list));
-       dev_add_pack(&eb_ptr->tipc_packet_type);
+       INIT_WORK(&eb_ptr->setup, setup_bearer);
+       schedule_work(&eb_ptr->setup);
 
        /* Associate TIPC bearer with Ethernet bearer */
        eb_ptr->bearer = tb_ptr;
@@ -357,8 +380,6 @@ int tipc_eth_media_start(void)
        if (res)
                return res;
 
-       notifier.notifier_call = &recv_notification;
-       notifier.priority = 0;
        res = register_netdevice_notifier(&notifier);
        if (!res)
                eth_started = 1;
index 7a52d3922f3c2bde5b220cc96db7964fbe566e7a..111ff8300ae52ed43226f3ec8ab079bdb2e00b9c 100644 (file)
@@ -45,7 +45,7 @@ struct queue_item {
 static struct kmem_cache *tipc_queue_item_cache;
 static struct list_head signal_queue_head;
 static DEFINE_SPINLOCK(qitem_lock);
-static int handler_enabled;
+static int handler_enabled __read_mostly;
 
 static void process_signal_queue(unsigned long dummy);
 
index 1c1e6151875e6a16c93096063b2065a419dd5e0d..a79c755cb41714bf40c66de615ce6d0cc737cb3b 100644 (file)
@@ -210,9 +210,7 @@ static int link_name_validate(const char *name,
            (z_local > 255) || (c_local > 4095) || (n_local > 4095) ||
            (z_peer  > 255) || (c_peer  > 4095) || (n_peer  > 4095) ||
            (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) ||
-           (if_peer_len  <= 1) || (if_peer_len  > TIPC_MAX_IF_NAME) ||
-           (strspn(if_local, tipc_alphabet) != (if_local_len - 1)) ||
-           (strspn(if_peer, tipc_alphabet) != (if_peer_len - 1)))
+           (if_peer_len  <= 1) || (if_peer_len  > TIPC_MAX_IF_NAME))
                return 0;
 
        /* return link name components, if necessary */
index 360c478b0b533511b344cf143c4c42197c9ccd76..46754779fd3d78537faab41c07c1758632cf78e4 100644 (file)
@@ -41,7 +41,7 @@
 #include "subscr.h"
 #include "port.h"
 
-static int tipc_nametbl_size = 1024;           /* must be a power of 2 */
+#define TIPC_NAMETBL_SIZE 1024         /* must be a power of 2 */
 
 /**
  * struct name_info - name sequence publication info
@@ -114,7 +114,7 @@ DEFINE_RWLOCK(tipc_nametbl_lock);
 
 static int hash(int x)
 {
-       return x & (tipc_nametbl_size - 1);
+       return x & (TIPC_NAMETBL_SIZE - 1);
 }
 
 /**
@@ -667,9 +667,9 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
 {
        struct publication *publ;
 
-       if (table.local_publ_count >= tipc_max_publications) {
+       if (table.local_publ_count >= TIPC_MAX_PUBLICATIONS) {
                pr_warn("Publication failed, local publication limit reached (%u)\n",
-                       tipc_max_publications);
+                       TIPC_MAX_PUBLICATIONS);
                return NULL;
        }
 
@@ -783,7 +783,7 @@ static int subseq_list(struct sub_seq *sseq, char *buf, int len, u32 depth,
                if (!list_is_last(&publ->zone_list, &info->zone_list))
                        ret += tipc_snprintf(buf + ret, len - ret,
                                             "\n%33s", " ");
-       };
+       }
 
        ret += tipc_snprintf(buf + ret, len - ret, "\n");
        return ret;
@@ -871,7 +871,7 @@ static int nametbl_list(char *buf, int len, u32 depth_info,
                ret += nametbl_header(buf, len, depth);
                lowbound = 0;
                upbound = ~0;
-               for (i = 0; i < tipc_nametbl_size; i++) {
+               for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
                        seq_head = &table.types[i];
                        hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
                                ret += nameseq_list(seq, buf + ret, len - ret,
@@ -935,7 +935,7 @@ struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space)
 
 int tipc_nametbl_init(void)
 {
-       table.types = kcalloc(tipc_nametbl_size, sizeof(struct hlist_head),
+       table.types = kcalloc(TIPC_NAMETBL_SIZE, sizeof(struct hlist_head),
                              GFP_ATOMIC);
        if (!table.types)
                return -ENOMEM;
@@ -953,7 +953,7 @@ void tipc_nametbl_stop(void)
 
        /* Verify name table is empty, then release it */
        write_lock_bh(&tipc_nametbl_lock);
-       for (i = 0; i < tipc_nametbl_size; i++) {
+       for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
                if (hlist_empty(&table.types[i]))
                        continue;
                pr_err("nametbl_stop(): orphaned hash chain detected\n");
index 5b5cea259caf5efde1151318f498ca3924099eb1..7d305ecc09c2bf053376bb147c5cf917113022ae 100644 (file)
@@ -171,7 +171,7 @@ void tipc_net_route_msg(struct sk_buff *buf)
        tipc_link_send(buf, dnode, msg_link_selector(msg));
 }
 
-int tipc_net_start(u32 addr)
+void tipc_net_start(u32 addr)
 {
        char addr_string[16];
 
@@ -187,7 +187,6 @@ int tipc_net_start(u32 addr)
        pr_info("Started in network mode\n");
        pr_info("Own node address %s, network identity %u\n",
                tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
-       return 0;
 }
 
 void tipc_net_stop(void)
index 9eb4b9e220ebbb5146a2d455126b6b46d6cf48f6..079daadb3f7286471cd5146798f6b06328bf99ad 100644 (file)
@@ -41,7 +41,7 @@ extern rwlock_t tipc_net_lock;
 
 void tipc_net_route_msg(struct sk_buff *buf);
 
-int tipc_net_start(u32 addr);
+void tipc_net_start(u32 addr);
 void tipc_net_stop(void);
 
 #endif
index 47a839df27dc2387b0067ef38228872135860993..6675914dc592cd54b13e6320051f80296a7223c8 100644 (file)
@@ -62,7 +62,7 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
                rep_nlh = nlmsg_hdr(rep_buf);
                memcpy(rep_nlh, req_nlh, hdr_space);
                rep_nlh->nlmsg_len = rep_buf->len;
-               genlmsg_unicast(&init_net, rep_buf, NETLINK_CB(skb).pid);
+               genlmsg_unicast(&init_net, rep_buf, NETLINK_CB(skb).portid);
        }
 
        return 0;
index 5ed5965eb0bee40ec7e475d814370426b1e45a88..0f7d0d007e22b9cbe94665c19b1d1ac2d6328f02 100644 (file)
@@ -304,9 +304,9 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
        }
 
        /* Refuse subscription if global limit exceeded */
-       if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) {
+       if (atomic_read(&topsrv.subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) {
                pr_warn("Subscription rejected, limit reached (%u)\n",
-                       tipc_max_subscriptions);
+                       TIPC_MAX_SUBSCRIPTIONS);
                subscr_terminate(subscriber);
                return NULL;
        }
index c5ee4ff613641b3f8439f1c9cb6b22d55a1ff7f2..5b5c876c80e9b543bd7b773c1a1f45f83f696e7b 100644 (file)
@@ -441,7 +441,7 @@ static int unix_release_sock(struct sock *sk, int embrion)
        /* ---- Socket is dead now and most probably destroyed ---- */
 
        /*
-        * Fixme: BSD difference: In BSD all sockets connected to use get
+        * Fixme: BSD difference: In BSD all sockets connected to us get
         *        ECONNRESET and we die on the spot. In Linux we behave
         *        like files and pipes do and wait for the last
         *        dereference.
@@ -481,7 +481,6 @@ static int unix_listen(struct socket *sock, int backlog)
        struct sock *sk = sock->sk;
        struct unix_sock *u = unix_sk(sk);
        struct pid *old_pid = NULL;
-       const struct cred *old_cred = NULL;
 
        err = -EOPNOTSUPP;
        if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
@@ -503,8 +502,6 @@ static int unix_listen(struct socket *sock, int backlog)
 out_unlock:
        unix_state_unlock(sk);
        put_pid(old_pid);
-       if (old_cred)
-               put_cred(old_cred);
 out:
        return err;
 }
@@ -2060,10 +2057,14 @@ static int unix_shutdown(struct socket *sock, int mode)
        struct sock *sk = sock->sk;
        struct sock *other;
 
-       mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
-
-       if (!mode)
-               return 0;
+       if (mode < SHUT_RD || mode > SHUT_RDWR)
+               return -EINVAL;
+       /* This maps:
+        * SHUT_RD   (0) -> RCV_SHUTDOWN  (1)
+        * SHUT_WR   (1) -> SEND_SHUTDOWN (2)
+        * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
+        */
+       ++mode;
 
        unix_state_lock(sk);
        sk->sk_shutdown |= mode;
index 750b13408449ac018b3d8ca1bad4def492ffaea6..06748f108a5732e9f847cdffd0dafe0cb996c191 100644 (file)
@@ -110,12 +110,12 @@ static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
 }
 
 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
-               u32 pid, u32 seq, u32 flags, int sk_ino)
+               u32 portid, u32 seq, u32 flags, int sk_ino)
 {
        struct nlmsghdr *nlh;
        struct unix_diag_msg *rep;
 
-       nlh = nlmsg_put(skb, pid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep),
+       nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep),
                        flags);
        if (!nlh)
                return -EMSGSIZE;
@@ -159,7 +159,7 @@ out_nlmsg_trim:
 }
 
 static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
-               u32 pid, u32 seq, u32 flags)
+               u32 portid, u32 seq, u32 flags)
 {
        int sk_ino;
 
@@ -170,7 +170,7 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
        if (!sk_ino)
                return 0;
 
-       return sk_diag_fill(sk, skb, req, pid, seq, flags, sk_ino);
+       return sk_diag_fill(sk, skb, req, portid, seq, flags, sk_ino);
 }
 
 static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
@@ -200,7 +200,7 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
                        if (!(req->udiag_states & (1 << sk->sk_state)))
                                goto next;
                        if (sk_diag_dump(sk, skb, req,
-                                        NETLINK_CB(cb->skb).pid,
+                                        NETLINK_CB(cb->skb).portid,
                                         cb->nlh->nlmsg_seq,
                                         NLM_F_MULTI) < 0)
                                goto done;
@@ -267,7 +267,7 @@ again:
        if (!rep)
                goto out;
 
-       err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).pid,
+       err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).portid,
                           nlh->nlmsg_seq, 0, req->udiag_ino);
        if (err < 0) {
                nlmsg_free(rep);
@@ -277,7 +277,7 @@ again:
 
                goto again;
        }
-       err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).pid,
+       err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
                              MSG_DONTWAIT);
        if (err > 0)
                err = 0;
index d355f67d0cdd1ff64ac68f917c2eb8c11e8b1af2..2f876b9ee3443b05efc54445b747e7ee7101e50d 100644 (file)
@@ -105,7 +105,7 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
 
        ASSERT_WDEV_LOCK(wdev);
 
-       if (!netif_running(wdev->netdev))
+       if (wdev->netdev && !netif_running(wdev->netdev))
                return;
 
        switch (wdev->iftype) {
@@ -143,6 +143,11 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
        case NL80211_IFTYPE_WDS:
                /* these interface types don't really have a channel */
                return;
+       case NL80211_IFTYPE_P2P_DEVICE:
+               if (wdev->wiphy->features &
+                               NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL)
+                       *chanmode = CHAN_MODE_EXCLUSIVE;
+               return;
        case NL80211_IFTYPE_UNSPECIFIED:
        case NUM_NL80211_IFTYPES:
                WARN_ON(1);
index dcd64d5b07aadfba26a799506452a9b04fe8e7d3..443d4d7deea299c7e997045d22d8b2b146d2c877 100644 (file)
@@ -230,9 +230,24 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked)
        rtnl_lock();
        mutex_lock(&rdev->devlist_mtx);
 
-       list_for_each_entry(wdev, &rdev->wdev_list, list)
-               if (wdev->netdev)
+       list_for_each_entry(wdev, &rdev->wdev_list, list) {
+               if (wdev->netdev) {
                        dev_close(wdev->netdev);
+                       continue;
+               }
+               /* otherwise, check iftype */
+               switch (wdev->iftype) {
+               case NL80211_IFTYPE_P2P_DEVICE:
+                       if (!wdev->p2p_started)
+                               break;
+                       rdev->ops->stop_p2p_device(&rdev->wiphy, wdev);
+                       wdev->p2p_started = false;
+                       rdev->opencount--;
+                       break;
+               default:
+                       break;
+               }
+       }
 
        mutex_unlock(&rdev->devlist_mtx);
        rtnl_unlock();
@@ -407,6 +422,11 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
                        if (WARN_ON(wiphy->software_iftypes & types))
                                return -EINVAL;
 
+                       /* Only a single P2P_DEVICE can be allowed */
+                       if (WARN_ON(types & BIT(NL80211_IFTYPE_P2P_DEVICE) &&
+                                   c->limits[j].max > 1))
+                               return -EINVAL;
+
                        cnt += c->limits[j].max;
                        /*
                         * Don't advertise an unsupported type
@@ -734,6 +754,35 @@ static void wdev_cleanup_work(struct work_struct *work)
        dev_put(wdev->netdev);
 }
 
+void cfg80211_unregister_wdev(struct wireless_dev *wdev)
+{
+       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+
+       ASSERT_RTNL();
+
+       if (WARN_ON(wdev->netdev))
+               return;
+
+       mutex_lock(&rdev->devlist_mtx);
+       list_del_rcu(&wdev->list);
+       rdev->devlist_generation++;
+
+       switch (wdev->iftype) {
+       case NL80211_IFTYPE_P2P_DEVICE:
+               if (!wdev->p2p_started)
+                       break;
+               rdev->ops->stop_p2p_device(&rdev->wiphy, wdev);
+               wdev->p2p_started = false;
+               rdev->opencount--;
+               break;
+       default:
+               WARN_ON_ONCE(1);
+               break;
+       }
+       mutex_unlock(&rdev->devlist_mtx);
+}
+EXPORT_SYMBOL(cfg80211_unregister_wdev);
+
 static struct device_type wiphy_type = {
        .name   = "wlan",
 };
index bc7430b54771af18e903ee1d263ede3b4eb1b78f..a343be4a52bd0e16b0fdb41e565f39f3d705c823 100644 (file)
@@ -55,7 +55,7 @@ struct cfg80211_registered_device {
        int opencount; /* also protected by devlist_mtx */
        wait_queue_head_t dev_wait;
 
-       u32 ap_beacons_nlpid;
+       u32 ap_beacons_nlportid;
 
        /* protected by RTNL only */
        int num_running_ifaces;
index 1cdb1d5e6b0f4bef5cc9f49a7cd499aa8c48924e..8016fee0752b0325a20409b7b1b93b5433c73872 100644 (file)
@@ -612,10 +612,21 @@ void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp)
 }
 EXPORT_SYMBOL(cfg80211_del_sta);
 
+void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
+                         enum nl80211_connect_failed_reason reason,
+                         gfp_t gfp)
+{
+       struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
+       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+
+       nl80211_send_conn_failed_event(rdev, dev, mac_addr, reason, gfp);
+}
+EXPORT_SYMBOL(cfg80211_conn_failed);
+
 struct cfg80211_mgmt_registration {
        struct list_head list;
 
-       u32 nlpid;
+       u32 nlportid;
 
        int match_len;
 
@@ -624,7 +635,7 @@ struct cfg80211_mgmt_registration {
        u8 match[];
 };
 
-int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid,
+int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
                                u16 frame_type, const u8 *match_data,
                                int match_len)
 {
@@ -672,7 +683,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid,
 
        memcpy(nreg->match, match_data, match_len);
        nreg->match_len = match_len;
-       nreg->nlpid = snd_pid;
+       nreg->nlportid = snd_portid;
        nreg->frame_type = cpu_to_le16(frame_type);
        list_add(&nreg->list, &wdev->mgmt_registrations);
 
@@ -685,7 +696,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid,
        return err;
 }
 
-void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid)
+void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid)
 {
        struct wiphy *wiphy = wdev->wiphy;
        struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
@@ -694,7 +705,7 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid)
        spin_lock_bh(&wdev->mgmt_registrations_lock);
 
        list_for_each_entry_safe(reg, tmp, &wdev->mgmt_registrations, list) {
-               if (reg->nlpid != nlpid)
+               if (reg->nlportid != nlportid)
                        continue;
 
                if (rdev->ops->mgmt_frame_register) {
@@ -710,8 +721,8 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid)
 
        spin_unlock_bh(&wdev->mgmt_registrations_lock);
 
-       if (nlpid == wdev->ap_unexpected_nlpid)
-               wdev->ap_unexpected_nlpid = 0;
+       if (nlportid == wdev->ap_unexpected_nlportid)
+               wdev->ap_unexpected_nlportid = 0;
 }
 
 void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev)
@@ -736,7 +747,6 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
                          const u8 *buf, size_t len, bool no_cck,
                          bool dont_wait_for_ack, u64 *cookie)
 {
-       struct net_device *dev = wdev->netdev;
        const struct ieee80211_mgmt *mgmt;
        u16 stype;
 
@@ -796,7 +806,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
                case NL80211_IFTYPE_AP:
                case NL80211_IFTYPE_P2P_GO:
                case NL80211_IFTYPE_AP_VLAN:
-                       if (!ether_addr_equal(mgmt->bssid, dev->dev_addr))
+                       if (!ether_addr_equal(mgmt->bssid, wdev_address(wdev)))
                                err = -EINVAL;
                        break;
                case NL80211_IFTYPE_MESH_POINT:
@@ -809,6 +819,11 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
                         * cfg80211 doesn't track the stations
                         */
                        break;
+               case NL80211_IFTYPE_P2P_DEVICE:
+                       /*
+                        * fall through, P2P device only supports
+                        * public action frames
+                        */
                default:
                        err = -EOPNOTSUPP;
                        break;
@@ -819,7 +834,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
                        return err;
        }
 
-       if (!ether_addr_equal(mgmt->sa, dev->dev_addr))
+       if (!ether_addr_equal(mgmt->sa, wdev_address(wdev)))
                return -EINVAL;
 
        /* Transmit the Action frame as requested by user space */
@@ -868,7 +883,7 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
                /* found match! */
 
                /* Indicate the received Action frame to user space */
-               if (nl80211_send_mgmt(rdev, wdev, reg->nlpid,
+               if (nl80211_send_mgmt(rdev, wdev, reg->nlportid,
                                      freq, sig_mbm,
                                      buf, len, gfp))
                        continue;
index 1e37dbf00cb3f3850d3785827f896ca09339873b..0418a6d5c1a683f95542c64628e66f487ddea196 100644 (file)
@@ -496,11 +496,11 @@ static bool is_valid_ie_attr(const struct nlattr *attr)
 }
 
 /* message building helper */
-static inline void *nl80211hdr_put(struct sk_buff *skb, u32 pid, u32 seq,
+static inline void *nl80211hdr_put(struct sk_buff *skb, u32 portid, u32 seq,
                                   int flags, u8 cmd)
 {
        /* since there is no private header just add the generic one */
-       return genlmsg_put(skb, pid, seq, &nl80211_fam, flags, cmd);
+       return genlmsg_put(skb, portid, seq, &nl80211_fam, flags, cmd);
 }
 
 static int nl80211_msg_put_channel(struct sk_buff *msg,
@@ -851,7 +851,7 @@ nla_put_failure:
        return -ENOBUFS;
 }
 
-static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
+static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flags,
                              struct cfg80211_registered_device *dev)
 {
        void *hdr;
@@ -866,7 +866,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
        const struct ieee80211_txrx_stypes *mgmt_stypes =
                                dev->wiphy.mgmt_stypes;
 
-       hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_WIPHY);
+       hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_WIPHY);
        if (!hdr)
                return -1;
 
@@ -1100,6 +1100,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
                if (nla_put_u32(msg, i, NL80211_CMD_REGISTER_BEACONS))
                        goto nla_put_failure;
        }
+       CMD(start_p2p_device, START_P2P_DEVICE);
 
 #ifdef CONFIG_NL80211_TESTMODE
        CMD(testmode_cmd, TESTMODE);
@@ -1266,7 +1267,7 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
                        continue;
                if (++idx <= start)
                        continue;
-               if (nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).pid,
+               if (nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).portid,
                                       cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                       dev) < 0) {
                        idx--;
@@ -1289,7 +1290,7 @@ static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info)
        if (!msg)
                return -ENOMEM;
 
-       if (nl80211_send_wiphy(msg, info->snd_pid, info->snd_seq, 0, dev) < 0) {
+       if (nl80211_send_wiphy(msg, info->snd_portid, info->snd_seq, 0, dev) < 0) {
                nlmsg_free(msg);
                return -ENOBUFS;
        }
@@ -1735,26 +1736,26 @@ static inline u64 wdev_id(struct wireless_dev *wdev)
               ((u64)wiphy_to_dev(wdev->wiphy)->wiphy_idx << 32);
 }
 
-static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags,
+static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags,
                              struct cfg80211_registered_device *rdev,
                              struct wireless_dev *wdev)
 {
        struct net_device *dev = wdev->netdev;
        void *hdr;
 
-       hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_INTERFACE);
+       hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_INTERFACE);
        if (!hdr)
                return -1;
 
        if (dev &&
            (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
-            nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name) ||
-            nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, dev->dev_addr)))
+            nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name)))
                goto nla_put_failure;
 
        if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
            nla_put_u32(msg, NL80211_ATTR_IFTYPE, wdev->iftype) ||
            nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
+           nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, wdev_address(wdev)) ||
            nla_put_u32(msg, NL80211_ATTR_GENERATION,
                        rdev->devlist_generation ^
                        (cfg80211_rdev_list_generation << 2)))
@@ -1806,7 +1807,7 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
                                if_idx++;
                                continue;
                        }
-                       if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).pid,
+                       if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).portid,
                                               cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                               rdev, wdev) < 0) {
                                mutex_unlock(&rdev->devlist_mtx);
@@ -1837,7 +1838,7 @@ static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
        if (!msg)
                return -ENOMEM;
 
-       if (nl80211_send_iface(msg, info->snd_pid, info->snd_seq, 0,
+       if (nl80211_send_iface(msg, info->snd_portid, info->snd_seq, 0,
                               dev, wdev) < 0) {
                nlmsg_free(msg);
                return -ENOBUFS;
@@ -2021,8 +2022,10 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
                return PTR_ERR(wdev);
        }
 
-       if (type == NL80211_IFTYPE_MESH_POINT &&
-           info->attrs[NL80211_ATTR_MESH_ID]) {
+       switch (type) {
+       case NL80211_IFTYPE_MESH_POINT:
+               if (!info->attrs[NL80211_ATTR_MESH_ID])
+                       break;
                wdev_lock(wdev);
                BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN !=
                             IEEE80211_MAX_MESH_ID_LEN);
@@ -2031,9 +2034,29 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
                memcpy(wdev->ssid, nla_data(info->attrs[NL80211_ATTR_MESH_ID]),
                       wdev->mesh_id_up_len);
                wdev_unlock(wdev);
+               break;
+       case NL80211_IFTYPE_P2P_DEVICE:
+               /*
+                * P2P Device doesn't have a netdev, so doesn't go
+                * through the netdev notifier and must be added here
+                */
+               mutex_init(&wdev->mtx);
+               INIT_LIST_HEAD(&wdev->event_list);
+               spin_lock_init(&wdev->event_lock);
+               INIT_LIST_HEAD(&wdev->mgmt_registrations);
+               spin_lock_init(&wdev->mgmt_registrations_lock);
+
+               mutex_lock(&rdev->devlist_mtx);
+               wdev->identifier = ++rdev->wdev_id;
+               list_add_rcu(&wdev->list, &rdev->wdev_list);
+               rdev->devlist_generation++;
+               mutex_unlock(&rdev->devlist_mtx);
+               break;
+       default:
+               break;
        }
 
-       if (nl80211_send_iface(msg, info->snd_pid, info->snd_seq, 0,
+       if (nl80211_send_iface(msg, info->snd_portid, info->snd_seq, 0,
                               rdev, wdev) < 0) {
                nlmsg_free(msg);
                return -ENOBUFS;
@@ -2168,7 +2191,7 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
        if (!msg)
                return -ENOMEM;
 
-       hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
+       hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                             NL80211_CMD_NEW_KEY);
        if (IS_ERR(hdr))
                return PTR_ERR(hdr);
@@ -2746,7 +2769,7 @@ nla_put_failure:
        return false;
 }
 
-static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
+static int nl80211_send_station(struct sk_buff *msg, u32 portid, u32 seq,
                                int flags,
                                struct cfg80211_registered_device *rdev,
                                struct net_device *dev,
@@ -2755,7 +2778,7 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
        void *hdr;
        struct nlattr *sinfoattr, *bss_param;
 
-       hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_STATION);
+       hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_STATION);
        if (!hdr)
                return -1;
 
@@ -2908,7 +2931,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
                        goto out_err;
 
                if (nl80211_send_station(skb,
-                               NETLINK_CB(cb->skb).pid,
+                               NETLINK_CB(cb->skb).portid,
                                cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                dev, netdev, mac_addr,
                                &sinfo) < 0)
@@ -2954,7 +2977,7 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
        if (!msg)
                return -ENOMEM;
 
-       if (nl80211_send_station(msg, info->snd_pid, info->snd_seq, 0,
+       if (nl80211_send_station(msg, info->snd_portid, info->snd_seq, 0,
                                 rdev, dev, mac_addr, &sinfo) < 0) {
                nlmsg_free(msg);
                return -ENOBUFS;
@@ -3280,7 +3303,7 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
        return rdev->ops->del_station(&rdev->wiphy, dev, mac_addr);
 }
 
-static int nl80211_send_mpath(struct sk_buff *msg, u32 pid, u32 seq,
+static int nl80211_send_mpath(struct sk_buff *msg, u32 portid, u32 seq,
                                int flags, struct net_device *dev,
                                u8 *dst, u8 *next_hop,
                                struct mpath_info *pinfo)
@@ -3288,7 +3311,7 @@ static int nl80211_send_mpath(struct sk_buff *msg, u32 pid, u32 seq,
        void *hdr;
        struct nlattr *pinfoattr;
 
-       hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_STATION);
+       hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_STATION);
        if (!hdr)
                return -1;
 
@@ -3366,7 +3389,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
                if (err)
                        goto out_err;
 
-               if (nl80211_send_mpath(skb, NETLINK_CB(cb->skb).pid,
+               if (nl80211_send_mpath(skb, NETLINK_CB(cb->skb).portid,
                                       cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                       netdev, dst, next_hop,
                                       &pinfo) < 0)
@@ -3415,7 +3438,7 @@ static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info)
        if (!msg)
                return -ENOMEM;
 
-       if (nl80211_send_mpath(msg, info->snd_pid, info->snd_seq, 0,
+       if (nl80211_send_mpath(msg, info->snd_portid, info->snd_seq, 0,
                                 dev, dst, next_hop, &pinfo) < 0) {
                nlmsg_free(msg);
                return -ENOBUFS;
@@ -3656,7 +3679,7 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
        if (!msg)
                return -ENOMEM;
-       hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
+       hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                             NL80211_CMD_GET_MESH_CONFIG);
        if (!hdr)
                goto out;
@@ -3975,7 +3998,7 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
                goto out;
        }
 
-       hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
+       hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                             NL80211_CMD_GET_REG);
        if (!hdr)
                goto put_failure;
@@ -4593,7 +4616,7 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
 
        ASSERT_WDEV_LOCK(wdev);
 
-       hdr = nl80211hdr_put(msg, NETLINK_CB(cb->skb).pid, seq, flags,
+       hdr = nl80211hdr_put(msg, NETLINK_CB(cb->skb).portid, seq, flags,
                             NL80211_CMD_NEW_SCAN_RESULTS);
        if (!hdr)
                return -1;
@@ -4712,14 +4735,14 @@ static int nl80211_dump_scan(struct sk_buff *skb,
        return skb->len;
 }
 
-static int nl80211_send_survey(struct sk_buff *msg, u32 pid, u32 seq,
+static int nl80211_send_survey(struct sk_buff *msg, u32 portid, u32 seq,
                                int flags, struct net_device *dev,
                                struct survey_info *survey)
 {
        void *hdr;
        struct nlattr *infoattr;
 
-       hdr = nl80211hdr_put(msg, pid, seq, flags,
+       hdr = nl80211hdr_put(msg, portid, seq, flags,
                             NL80211_CMD_NEW_SURVEY_RESULTS);
        if (!hdr)
                return -ENOMEM;
@@ -4813,7 +4836,7 @@ static int nl80211_dump_survey(struct sk_buff *skb,
                }
 
                if (nl80211_send_survey(skb,
-                               NETLINK_CB(cb->skb).pid,
+                               NETLINK_CB(cb->skb).portid,
                                cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                netdev,
                                &survey) < 0)
@@ -5428,7 +5451,7 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
        }
 
        while (1) {
-               void *hdr = nl80211hdr_put(skb, NETLINK_CB(cb->skb).pid,
+               void *hdr = nl80211hdr_put(skb, NETLINK_CB(cb->skb).portid,
                                           cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                           NL80211_CMD_TESTMODE);
                struct nlattr *tmdata;
@@ -5468,7 +5491,7 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
 
 static struct sk_buff *
 __cfg80211_testmode_alloc_skb(struct cfg80211_registered_device *rdev,
-                             int approxlen, u32 pid, u32 seq, gfp_t gfp)
+                             int approxlen, u32 portid, u32 seq, gfp_t gfp)
 {
        struct sk_buff *skb;
        void *hdr;
@@ -5478,7 +5501,7 @@ __cfg80211_testmode_alloc_skb(struct cfg80211_registered_device *rdev,
        if (!skb)
                return NULL;
 
-       hdr = nl80211hdr_put(skb, pid, seq, 0, NL80211_CMD_TESTMODE);
+       hdr = nl80211hdr_put(skb, portid, seq, 0, NL80211_CMD_TESTMODE);
        if (!hdr) {
                kfree_skb(skb);
                return NULL;
@@ -5508,7 +5531,7 @@ struct sk_buff *cfg80211_testmode_alloc_reply_skb(struct wiphy *wiphy,
                return NULL;
 
        return __cfg80211_testmode_alloc_skb(rdev, approxlen,
-                               rdev->testmode_info->snd_pid,
+                               rdev->testmode_info->snd_portid,
                                rdev->testmode_info->snd_seq,
                                GFP_KERNEL);
 }
@@ -5846,7 +5869,7 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
        if (!msg)
                return -ENOMEM;
 
-       hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
+       hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                             NL80211_CMD_REMAIN_ON_CHANNEL);
 
        if (IS_ERR(hdr)) {
@@ -6055,6 +6078,7 @@ static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
        case NL80211_IFTYPE_AP_VLAN:
        case NL80211_IFTYPE_MESH_POINT:
        case NL80211_IFTYPE_P2P_GO:
+       case NL80211_IFTYPE_P2P_DEVICE:
                break;
        default:
                return -EOPNOTSUPP;
@@ -6064,7 +6088,7 @@ static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
        if (!rdev->ops->mgmt_tx)
                return -EOPNOTSUPP;
 
-       return cfg80211_mlme_register_mgmt(wdev, info->snd_pid, frame_type,
+       return cfg80211_mlme_register_mgmt(wdev, info->snd_portid, frame_type,
                        nla_data(info->attrs[NL80211_ATTR_FRAME_MATCH]),
                        nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH]));
 }
@@ -6101,6 +6125,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
        case NL80211_IFTYPE_AP_VLAN:
        case NL80211_IFTYPE_MESH_POINT:
        case NL80211_IFTYPE_P2P_GO:
+       case NL80211_IFTYPE_P2P_DEVICE:
                break;
        default:
                return -EOPNOTSUPP;
@@ -6144,7 +6169,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
                if (!msg)
                        return -ENOMEM;
 
-               hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
+               hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                                     NL80211_CMD_FRAME);
 
                if (IS_ERR(hdr)) {
@@ -6197,6 +6222,7 @@ static int nl80211_tx_mgmt_cancel_wait(struct sk_buff *skb, struct genl_info *in
        case NL80211_IFTYPE_AP:
        case NL80211_IFTYPE_AP_VLAN:
        case NL80211_IFTYPE_P2P_GO:
+       case NL80211_IFTYPE_P2P_DEVICE:
                break;
        default:
                return -EOPNOTSUPP;
@@ -6260,7 +6286,7 @@ static int nl80211_get_power_save(struct sk_buff *skb, struct genl_info *info)
        if (!msg)
                return -ENOMEM;
 
-       hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
+       hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                             NL80211_CMD_GET_POWER_SAVE);
        if (!hdr) {
                err = -ENOBUFS;
@@ -6462,7 +6488,7 @@ static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info)
        if (!msg)
                return -ENOMEM;
 
-       hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
+       hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                             NL80211_CMD_GET_WOWLAN);
        if (!hdr)
                goto nla_put_failure;
@@ -6736,10 +6762,10 @@ static int nl80211_register_unexpected_frame(struct sk_buff *skb,
            wdev->iftype != NL80211_IFTYPE_P2P_GO)
                return -EINVAL;
 
-       if (wdev->ap_unexpected_nlpid)
+       if (wdev->ap_unexpected_nlportid)
                return -EBUSY;
 
-       wdev->ap_unexpected_nlpid = info->snd_pid;
+       wdev->ap_unexpected_nlportid = info->snd_portid;
        return 0;
 }
 
@@ -6769,7 +6795,7 @@ static int nl80211_probe_client(struct sk_buff *skb,
        if (!msg)
                return -ENOMEM;
 
-       hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
+       hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                             NL80211_CMD_PROBE_CLIENT);
 
        if (IS_ERR(hdr)) {
@@ -6804,10 +6830,72 @@ static int nl80211_register_beacons(struct sk_buff *skb, struct genl_info *info)
        if (!(rdev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS))
                return -EOPNOTSUPP;
 
-       if (rdev->ap_beacons_nlpid)
+       if (rdev->ap_beacons_nlportid)
                return -EBUSY;
 
-       rdev->ap_beacons_nlpid = info->snd_pid;
+       rdev->ap_beacons_nlportid = info->snd_portid;
+
+       return 0;
+}
+
+static int nl80211_start_p2p_device(struct sk_buff *skb, struct genl_info *info)
+{
+       struct cfg80211_registered_device *rdev = info->user_ptr[0];
+       struct wireless_dev *wdev = info->user_ptr[1];
+       int err;
+
+       if (!rdev->ops->start_p2p_device)
+               return -EOPNOTSUPP;
+
+       if (wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)
+               return -EOPNOTSUPP;
+
+       if (wdev->p2p_started)
+               return 0;
+
+       mutex_lock(&rdev->devlist_mtx);
+       err = cfg80211_can_add_interface(rdev, wdev->iftype);
+       mutex_unlock(&rdev->devlist_mtx);
+       if (err)
+               return err;
+
+       err = rdev->ops->start_p2p_device(&rdev->wiphy, wdev);
+       if (err)
+               return err;
+
+       wdev->p2p_started = true;
+       mutex_lock(&rdev->devlist_mtx);
+       rdev->opencount++;
+       mutex_unlock(&rdev->devlist_mtx);
+
+       return 0;
+}
+
+static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info)
+{
+       struct cfg80211_registered_device *rdev = info->user_ptr[0];
+       struct wireless_dev *wdev = info->user_ptr[1];
+
+       if (wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)
+               return -EOPNOTSUPP;
+
+       if (!rdev->ops->stop_p2p_device)
+               return -EOPNOTSUPP;
+
+       if (!wdev->p2p_started)
+               return 0;
+
+       rdev->ops->stop_p2p_device(&rdev->wiphy, wdev);
+       wdev->p2p_started = false;
+
+       mutex_lock(&rdev->devlist_mtx);
+       rdev->opencount--;
+       mutex_unlock(&rdev->devlist_mtx);
+
+       if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) {
+               rdev->scan_req->aborted = true;
+               ___cfg80211_scan_done(rdev, true);
+       }
 
        return 0;
 }
@@ -6819,7 +6907,7 @@ static int nl80211_register_beacons(struct sk_buff *skb, struct genl_info *info)
 #define NL80211_FLAG_NEED_NETDEV_UP    (NL80211_FLAG_NEED_NETDEV |\
                                         NL80211_FLAG_CHECK_NETDEV_UP)
 #define NL80211_FLAG_NEED_WDEV         0x10
-/* If a netdev is associated, it must be UP */
+/* If a netdev is associated, it must be UP, P2P must be started */
 #define NL80211_FLAG_NEED_WDEV_UP      (NL80211_FLAG_NEED_WDEV |\
                                         NL80211_FLAG_CHECK_NETDEV_UP)
 
@@ -6880,6 +6968,13 @@ static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb,
                        }
 
                        dev_hold(dev);
+               } else if (ops->internal_flags & NL80211_FLAG_CHECK_NETDEV_UP) {
+                       if (!wdev->p2p_started) {
+                               mutex_unlock(&cfg80211_mutex);
+                               if (rtnl)
+                                       rtnl_unlock();
+                               return -ENETDOWN;
+                       }
                }
 
                cfg80211_lock_rdev(rdev);
@@ -7441,7 +7536,22 @@ static struct genl_ops nl80211_ops[] = {
                .internal_flags = NL80211_FLAG_NEED_NETDEV |
                                  NL80211_FLAG_NEED_RTNL,
        },
-
+       {
+               .cmd = NL80211_CMD_START_P2P_DEVICE,
+               .doit = nl80211_start_p2p_device,
+               .policy = nl80211_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = NL80211_FLAG_NEED_WDEV |
+                                 NL80211_FLAG_NEED_RTNL,
+       },
+       {
+               .cmd = NL80211_CMD_STOP_P2P_DEVICE,
+               .doit = nl80211_stop_p2p_device,
+               .policy = nl80211_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
+                                 NL80211_FLAG_NEED_RTNL,
+       },
 };
 
 static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@ -7520,12 +7630,12 @@ static int nl80211_add_scan_req(struct sk_buff *msg,
 static int nl80211_send_scan_msg(struct sk_buff *msg,
                                 struct cfg80211_registered_device *rdev,
                                 struct wireless_dev *wdev,
-                                u32 pid, u32 seq, int flags,
+                                u32 portid, u32 seq, int flags,
                                 u32 cmd)
 {
        void *hdr;
 
-       hdr = nl80211hdr_put(msg, pid, seq, flags, cmd);
+       hdr = nl80211hdr_put(msg, portid, seq, flags, cmd);
        if (!hdr)
                return -1;
 
@@ -7549,11 +7659,11 @@ static int
 nl80211_send_sched_scan_msg(struct sk_buff *msg,
                            struct cfg80211_registered_device *rdev,
                            struct net_device *netdev,
-                           u32 pid, u32 seq, int flags, u32 cmd)
+                           u32 portid, u32 seq, int flags, u32 cmd)
 {
        void *hdr;
 
-       hdr = nl80211hdr_put(msg, pid, seq, flags, cmd);
+       hdr = nl80211hdr_put(msg, portid, seq, flags, cmd);
        if (!hdr)
                return -1;
 
@@ -8254,6 +8364,40 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
        nlmsg_free(msg);
 }
 
+void nl80211_send_conn_failed_event(struct cfg80211_registered_device *rdev,
+                                   struct net_device *dev, const u8 *mac_addr,
+                                   enum nl80211_connect_failed_reason reason,
+                                   gfp_t gfp)
+{
+       struct sk_buff *msg;
+       void *hdr;
+
+       msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+       if (!msg)
+               return;
+
+       hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_CONN_FAILED);
+       if (!hdr) {
+               nlmsg_free(msg);
+               return;
+       }
+
+       if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+           nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr) ||
+           nla_put_u32(msg, NL80211_ATTR_CONN_FAILED_REASON, reason))
+               goto nla_put_failure;
+
+       genlmsg_end(msg, hdr);
+
+       genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+                               nl80211_mlme_mcgrp.id, gfp);
+       return;
+
+ nla_put_failure:
+       genlmsg_cancel(msg, hdr);
+       nlmsg_free(msg);
+}
+
 static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
                                       const u8 *addr, gfp_t gfp)
 {
@@ -8262,9 +8406,9 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
        struct sk_buff *msg;
        void *hdr;
        int err;
-       u32 nlpid = ACCESS_ONCE(wdev->ap_unexpected_nlpid);
+       u32 nlportid = ACCESS_ONCE(wdev->ap_unexpected_nlportid);
 
-       if (!nlpid)
+       if (!nlportid)
                return false;
 
        msg = nlmsg_new(100, gfp);
@@ -8288,7 +8432,7 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
                return true;
        }
 
-       genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid);
+       genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid);
        return true;
 
  nla_put_failure:
@@ -8312,7 +8456,7 @@ bool nl80211_unexpected_4addr_frame(struct net_device *dev,
 }
 
 int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
-                     struct wireless_dev *wdev, u32 nlpid,
+                     struct wireless_dev *wdev, u32 nlportid,
                      int freq, int sig_dbm,
                      const u8 *buf, size_t len, gfp_t gfp)
 {
@@ -8341,7 +8485,7 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
 
        genlmsg_end(msg, hdr);
 
-       return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid);
+       return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid);
 
  nla_put_failure:
        genlmsg_cancel(msg, hdr);
@@ -8696,9 +8840,9 @@ void cfg80211_report_obss_beacon(struct wiphy *wiphy,
        struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
        struct sk_buff *msg;
        void *hdr;
-       u32 nlpid = ACCESS_ONCE(rdev->ap_beacons_nlpid);
+       u32 nlportid = ACCESS_ONCE(rdev->ap_beacons_nlportid);
 
-       if (!nlpid)
+       if (!nlportid)
                return;
 
        msg = nlmsg_new(len + 100, gfp);
@@ -8721,7 +8865,7 @@ void cfg80211_report_obss_beacon(struct wiphy *wiphy,
 
        genlmsg_end(msg, hdr);
 
-       genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid);
+       genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid);
        return;
 
  nla_put_failure:
@@ -8745,9 +8889,9 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
 
        list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) {
                list_for_each_entry_rcu(wdev, &rdev->wdev_list, list)
-                       cfg80211_mlme_unregister_socket(wdev, notify->pid);
-               if (rdev->ap_beacons_nlpid == notify->pid)
-                       rdev->ap_beacons_nlpid = 0;
+                       cfg80211_mlme_unregister_socket(wdev, notify->portid);
+               if (rdev->ap_beacons_nlportid == notify->portid)
+                       rdev->ap_beacons_nlportid = 0;
        }
 
        rcu_read_unlock();
index 9f2616fffb4001958600d72dac222ff7edc7a9b4..f6153516068c30dce59e4283d9abfc0591adebfb 100644 (file)
@@ -91,6 +91,11 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
                                struct net_device *dev, const u8 *mac_addr,
                                gfp_t gfp);
 
+void nl80211_send_conn_failed_event(struct cfg80211_registered_device *rdev,
+                                   struct net_device *dev, const u8 *mac_addr,
+                                   enum nl80211_connect_failed_reason reason,
+                                   gfp_t gfp);
+
 int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
                      struct wireless_dev *wdev, u32 nlpid,
                      int freq, int sig_dbm,
index c4ad7958af52e84754b073915d009531299b9864..7d604c06c3dc38d1155366a52f184971be1197e3 100644 (file)
@@ -41,6 +41,8 @@ static const struct radiotap_align_size rtap_namespace_sizes[] = {
        [IEEE80211_RADIOTAP_TX_FLAGS] = { .align = 2, .size = 2, },
        [IEEE80211_RADIOTAP_RTS_RETRIES] = { .align = 1, .size = 1, },
        [IEEE80211_RADIOTAP_DATA_RETRIES] = { .align = 1, .size = 1, },
+       [IEEE80211_RADIOTAP_MCS] = { .align = 1, .size = 3, },
+       [IEEE80211_RADIOTAP_AMPDU_STATUS] = { .align = 4, .size = 8, },
        /*
         * add more here as they are defined in radiotap.h
         */
index 72d170ca340665ea5c893cc1bec2316219abbf7c..3b8cbbc214db563ba962ecda1e49fe6929c263cc 100644 (file)
@@ -510,9 +510,11 @@ static bool reg_does_bw_fit(const struct ieee80211_freq_range *freq_range,
  *
  * This lets us know if a specific frequency rule is or is not relevant to
  * a specific frequency's band. Bands are device specific and artificial
- * definitions (the "2.4 GHz band" and the "5 GHz band"), however it is
- * safe for now to assume that a frequency rule should not be part of a
- * frequency's band if the start freq or end freq are off by more than 2 GHz.
+ * definitions (the "2.4 GHz band", the "5 GHz band" and the "60GHz band"),
+ * however it is safe for now to assume that a frequency rule should not be
+ * part of a frequency's band if the start freq or end freq are off by more
+ * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 10 GHz for the
+ * 60 GHz band.
  * This resolution can be lowered and should be considered as we add
  * regulatory rule support for other "bands".
  **/
@@ -520,9 +522,16 @@ static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range,
        u32 freq_khz)
 {
 #define ONE_GHZ_IN_KHZ 1000000
-       if (abs(freq_khz - freq_range->start_freq_khz) <= (2 * ONE_GHZ_IN_KHZ))
+       /*
+        * From 802.11ad: directional multi-gigabit (DMG):
+        * Pertaining to operation in a frequency band containing a channel
+        * with the Channel starting frequency above 45 GHz.
+        */
+       u32 limit = freq_khz > 45 * ONE_GHZ_IN_KHZ ?
+                       10 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ;
+       if (abs(freq_khz - freq_range->start_freq_khz) <= limit)
                return true;
-       if (abs(freq_khz - freq_range->end_freq_khz) <= (2 * ONE_GHZ_IN_KHZ))
+       if (abs(freq_khz - freq_range->end_freq_khz) <= limit)
                return true;
        return false;
 #undef ONE_GHZ_IN_KHZ
@@ -1955,8 +1964,7 @@ static void restore_regulatory_settings(bool reset_user)
                        if (reg_request->initiator !=
                            NL80211_REGDOM_SET_BY_USER)
                                continue;
-                       list_del(&reg_request->list);
-                       list_add_tail(&reg_request->list, &tmp_reg_req_list);
+                       list_move_tail(&reg_request->list, &tmp_reg_req_list);
                }
        }
        spin_unlock(&reg_requests_lock);
@@ -2015,8 +2023,7 @@ static void restore_regulatory_settings(bool reset_user)
                              "into the queue\n",
                              reg_request->alpha2[0],
                              reg_request->alpha2[1]);
-               list_del(&reg_request->list);
-               list_add_tail(&reg_request->list, &reg_requests_list);
+               list_move_tail(&reg_request->list, &reg_requests_list);
        }
        spin_unlock(&reg_requests_lock);
 
@@ -2201,7 +2208,6 @@ static void print_regdomain_info(const struct ieee80211_regdomain *rd)
 static int __set_regdom(const struct ieee80211_regdomain *rd)
 {
        const struct ieee80211_regdomain *intersected_rd = NULL;
-       struct cfg80211_registered_device *rdev = NULL;
        struct wiphy *request_wiphy;
        /* Some basic sanity checks first */
 
@@ -2313,24 +2319,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
                return 0;
        }
 
-       if (!intersected_rd)
-               return -EINVAL;
-
-       rdev = wiphy_to_dev(request_wiphy);
-
-       rdev->country_ie_alpha2[0] = rd->alpha2[0];
-       rdev->country_ie_alpha2[1] = rd->alpha2[1];
-       rdev->env = last_request->country_ie_env;
-
-       BUG_ON(intersected_rd == rd);
-
-       kfree(rd);
-       rd = NULL;
-
-       reset_regdomains(false);
-       cfg80211_regdomain = intersected_rd;
-
-       return 0;
+       return -EINVAL;
 }
 
 
index 848523a2b22f02c9a8975ff942073c0794671b54..9730c9862bdcfd624af15641deefe95c0b12c8de 100644 (file)
@@ -815,7 +815,7 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
                return NULL;
 
        if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC &&
-                   (signal < 0 || signal > 100)))
+                   (signal < 0 || signal > 100)))
                return NULL;
 
        if (WARN_ON(len < offsetof(struct ieee80211_mgmt, u.probe_resp.variable)))
index 994e2f0cc7a8a12fc34cbe61fdee97afde3df10b..ef35f4ef2aa623d16f3556a5e3f4709fba363db4 100644 (file)
@@ -684,22 +684,10 @@ EXPORT_SYMBOL(cfg80211_classify8021d);
 
 const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie)
 {
-       u8 *end, *pos;
-
-       pos = bss->information_elements;
-       if (pos == NULL)
+       if (bss->information_elements == NULL)
                return NULL;
-       end = pos + bss->len_information_elements;
-
-       while (pos + 1 < end) {
-               if (pos + 2 + pos[1] > end)
-                       break;
-               if (pos[0] == ie)
-                       return pos;
-               pos += 2 + pos[1];
-       }
-
-       return NULL;
+       return cfg80211_find_ie(ie, bss->information_elements,
+                                bss->len_information_elements);
 }
 EXPORT_SYMBOL(ieee80211_bss_get_ie);
 
@@ -812,6 +800,10 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
        if (otype == NL80211_IFTYPE_AP_VLAN)
                return -EOPNOTSUPP;
 
+       /* cannot change into P2P device type */
+       if (ntype == NL80211_IFTYPE_P2P_DEVICE)
+               return -EOPNOTSUPP;
+
        if (!rdev->ops->change_virtual_intf ||
            !(rdev->wiphy.interface_modes & (1 << ntype)))
                return -EOPNOTSUPP;
@@ -889,6 +881,9 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
                case NUM_NL80211_IFTYPES:
                        /* not happening */
                        break;
+               case NL80211_IFTYPE_P2P_DEVICE:
+                       WARN_ON(1);
+                       break;
                }
        }
 
@@ -1053,8 +1048,15 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
        list_for_each_entry(wdev_iter, &rdev->wdev_list, list) {
                if (wdev_iter == wdev)
                        continue;
-               if (!netif_running(wdev_iter->netdev))
-                       continue;
+               if (wdev_iter->netdev) {
+                       if (!netif_running(wdev_iter->netdev))
+                               continue;
+               } else if (wdev_iter->iftype == NL80211_IFTYPE_P2P_DEVICE) {
+                       if (!wdev_iter->p2p_started)
+                               continue;
+               } else {
+                       WARN_ON(1);
+               }
 
                if (rdev->wiphy.software_iftypes & BIT(wdev_iter->iftype))
                        continue;
index b0eb7aa49b60a7c87d242213a4630c5d3402310f..c8717c1d082e702f9b071c480e873b408b400daf 100644 (file)
@@ -478,13 +478,13 @@ void wireless_send_event(struct net_device *      dev,
        if (descr->header_type == IW_HEADER_TYPE_POINT) {
                /* Check if number of token fits within bounds */
                if (wrqu->data.length > descr->max_tokens) {
-                       netdev_err(dev, "(WE) : Wireless Event too big (%d)\n",
-                                  wrqu->data.length);
+                       netdev_err(dev, "(WE) : Wireless Event (cmd=0x%04X) too big (%d)\n",
+                                  cmd, wrqu->data.length);
                        return;
                }
                if (wrqu->data.length < descr->min_tokens) {
-                       netdev_err(dev, "(WE) : Wireless Event too small (%d)\n",
-                                  wrqu->data.length);
+                       netdev_err(dev, "(WE) : Wireless Event (cmd=0x%04X) too small (%d)\n",
+                                  cmd, wrqu->data.length);
                        return;
                }
                /* Calculate extra_len - extra is NULL for restricted events */
index 46550997548c2e2d9cce0db8024a7bd168fb09cd..41eabc46f110d9cb607cb24f8af68054b91bcccd 100644 (file)
@@ -42,13 +42,12 @@ static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock);
 static struct dst_entry *xfrm_policy_sk_bundles;
 static DEFINE_RWLOCK(xfrm_policy_lock);
 
-static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
-static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
+static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
+static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
+                                               __read_mostly;
 
 static struct kmem_cache *xfrm_dst_cache __read_mostly;
 
-static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
-static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
 static void xfrm_init_pmtu(struct dst_entry *dst);
 static int stale_bundle(struct dst_entry *dst);
 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
@@ -95,6 +94,24 @@ bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl
        return false;
 }
 
+static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
+{
+       struct xfrm_policy_afinfo *afinfo;
+
+       if (unlikely(family >= NPROTO))
+               return NULL;
+       rcu_read_lock();
+       afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
+       if (unlikely(!afinfo))
+               rcu_read_unlock();
+       return afinfo;
+}
+
+static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
+{
+       rcu_read_unlock();
+}
+
 static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
                                                  const xfrm_address_t *saddr,
                                                  const xfrm_address_t *daddr,
@@ -2421,7 +2438,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
                return -EINVAL;
        if (unlikely(afinfo->family >= NPROTO))
                return -EAFNOSUPPORT;
-       write_lock_bh(&xfrm_policy_afinfo_lock);
+       spin_lock(&xfrm_policy_afinfo_lock);
        if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
                err = -ENOBUFS;
        else {
@@ -2442,9 +2459,9 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
                        dst_ops->neigh_lookup = xfrm_neigh_lookup;
                if (likely(afinfo->garbage_collect == NULL))
                        afinfo->garbage_collect = xfrm_garbage_collect_deferred;
-               xfrm_policy_afinfo[afinfo->family] = afinfo;
+               rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo);
        }
-       write_unlock_bh(&xfrm_policy_afinfo_lock);
+       spin_unlock(&xfrm_policy_afinfo_lock);
 
        rtnl_lock();
        for_each_net(net) {
@@ -2477,21 +2494,26 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
                return -EINVAL;
        if (unlikely(afinfo->family >= NPROTO))
                return -EAFNOSUPPORT;
-       write_lock_bh(&xfrm_policy_afinfo_lock);
+       spin_lock(&xfrm_policy_afinfo_lock);
        if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
                if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
                        err = -EINVAL;
-               else {
-                       struct dst_ops *dst_ops = afinfo->dst_ops;
-                       xfrm_policy_afinfo[afinfo->family] = NULL;
-                       dst_ops->kmem_cachep = NULL;
-                       dst_ops->check = NULL;
-                       dst_ops->negative_advice = NULL;
-                       dst_ops->link_failure = NULL;
-                       afinfo->garbage_collect = NULL;
-               }
+               else
+                       RCU_INIT_POINTER(xfrm_policy_afinfo[afinfo->family],
+                                        NULL);
+       }
+       spin_unlock(&xfrm_policy_afinfo_lock);
+       if (!err) {
+               struct dst_ops *dst_ops = afinfo->dst_ops;
+
+               synchronize_rcu();
+
+               dst_ops->kmem_cachep = NULL;
+               dst_ops->check = NULL;
+               dst_ops->negative_advice = NULL;
+               dst_ops->link_failure = NULL;
+               afinfo->garbage_collect = NULL;
        }
-       write_unlock_bh(&xfrm_policy_afinfo_lock);
        return err;
 }
 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
@@ -2500,33 +2522,16 @@ static void __net_init xfrm_dst_ops_init(struct net *net)
 {
        struct xfrm_policy_afinfo *afinfo;
 
-       read_lock_bh(&xfrm_policy_afinfo_lock);
-       afinfo = xfrm_policy_afinfo[AF_INET];
+       rcu_read_lock();
+       afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET]);
        if (afinfo)
                net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
 #if IS_ENABLED(CONFIG_IPV6)
-       afinfo = xfrm_policy_afinfo[AF_INET6];
+       afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET6]);
        if (afinfo)
                net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
 #endif
-       read_unlock_bh(&xfrm_policy_afinfo_lock);
-}
-
-static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
-{
-       struct xfrm_policy_afinfo *afinfo;
-       if (unlikely(family >= NPROTO))
-               return NULL;
-       read_lock(&xfrm_policy_afinfo_lock);
-       afinfo = xfrm_policy_afinfo[family];
-       if (unlikely(!afinfo))
-               read_unlock(&xfrm_policy_afinfo_lock);
-       return afinfo;
-}
-
-static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
-{
-       read_unlock(&xfrm_policy_afinfo_lock);
+       rcu_read_unlock();
 }
 
 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
index bd2d9841ad59becf39644e09bfd7c6f6dba98567..3459692092ec1f44220d02a4d73d82613896793d 100644 (file)
@@ -166,7 +166,7 @@ static DEFINE_SPINLOCK(xfrm_state_gc_lock);
 int __xfrm_state_delete(struct xfrm_state *x);
 
 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
-void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
+void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
 
 static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family)
 {
@@ -1674,13 +1674,13 @@ void km_state_notify(struct xfrm_state *x, const struct km_event *c)
 EXPORT_SYMBOL(km_policy_notify);
 EXPORT_SYMBOL(km_state_notify);
 
-void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
+void km_state_expired(struct xfrm_state *x, int hard, u32 portid)
 {
        struct net *net = xs_net(x);
        struct km_event c;
 
        c.data.hard = hard;
-       c.pid = pid;
+       c.portid = portid;
        c.event = XFRM_MSG_EXPIRE;
        km_state_notify(x, &c);
 
@@ -1700,7 +1700,7 @@ int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
 
        read_lock(&xfrm_km_lock);
        list_for_each_entry(km, &xfrm_km_list, list) {
-               acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
+               acqret = km->acquire(x, t, pol);
                if (!acqret)
                        err = acqret;
        }
@@ -1726,13 +1726,13 @@ int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
 }
 EXPORT_SYMBOL(km_new_mapping);
 
-void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
+void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid)
 {
        struct net *net = xp_net(pol);
        struct km_event c;
 
        c.data.hard = hard;
-       c.pid = pid;
+       c.portid = portid;
        c.event = XFRM_MSG_POLEXPIRE;
        km_policy_notify(pol, dir, &c);
 
index bc542448307a8f6413788159ed6c73f069de3956..421f9844433519eac0c41e24265d2be9c1155b49 100644 (file)
@@ -623,7 +623,7 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
        }
 
        c.seq = nlh->nlmsg_seq;
-       c.pid = nlh->nlmsg_pid;
+       c.portid = nlh->nlmsg_pid;
        c.event = nlh->nlmsg_type;
 
        km_state_notify(x, &c);
@@ -696,7 +696,7 @@ static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
                goto out;
 
        c.seq = nlh->nlmsg_seq;
-       c.pid = nlh->nlmsg_pid;
+       c.portid = nlh->nlmsg_pid;
        c.event = nlh->nlmsg_type;
        km_state_notify(x, &c);
 
@@ -847,7 +847,7 @@ static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
        struct nlmsghdr *nlh;
        int err;
 
-       nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
+       nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq,
                        XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags);
        if (nlh == NULL)
                return -EMSGSIZE;
@@ -927,7 +927,7 @@ static inline size_t xfrm_spdinfo_msgsize(void)
 }
 
 static int build_spdinfo(struct sk_buff *skb, struct net *net,
-                        u32 pid, u32 seq, u32 flags)
+                        u32 portid, u32 seq, u32 flags)
 {
        struct xfrmk_spdinfo si;
        struct xfrmu_spdinfo spc;
@@ -936,7 +936,7 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net,
        int err;
        u32 *f;
 
-       nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
+       nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
        if (nlh == NULL) /* shouldn't really happen ... */
                return -EMSGSIZE;
 
@@ -969,17 +969,17 @@ static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct net *net = sock_net(skb->sk);
        struct sk_buff *r_skb;
        u32 *flags = nlmsg_data(nlh);
-       u32 spid = NETLINK_CB(skb).pid;
+       u32 sportid = NETLINK_CB(skb).portid;
        u32 seq = nlh->nlmsg_seq;
 
        r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC);
        if (r_skb == NULL)
                return -ENOMEM;
 
-       if (build_spdinfo(r_skb, net, spid, seq, *flags) < 0)
+       if (build_spdinfo(r_skb, net, sportid, seq, *flags) < 0)
                BUG();
 
-       return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid);
+       return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid);
 }
 
 static inline size_t xfrm_sadinfo_msgsize(void)
@@ -990,7 +990,7 @@ static inline size_t xfrm_sadinfo_msgsize(void)
 }
 
 static int build_sadinfo(struct sk_buff *skb, struct net *net,
-                        u32 pid, u32 seq, u32 flags)
+                        u32 portid, u32 seq, u32 flags)
 {
        struct xfrmk_sadinfo si;
        struct xfrmu_sadhinfo sh;
@@ -998,7 +998,7 @@ static int build_sadinfo(struct sk_buff *skb, struct net *net,
        int err;
        u32 *f;
 
-       nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
+       nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
        if (nlh == NULL) /* shouldn't really happen ... */
                return -EMSGSIZE;
 
@@ -1026,17 +1026,17 @@ static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct net *net = sock_net(skb->sk);
        struct sk_buff *r_skb;
        u32 *flags = nlmsg_data(nlh);
-       u32 spid = NETLINK_CB(skb).pid;
+       u32 sportid = NETLINK_CB(skb).portid;
        u32 seq = nlh->nlmsg_seq;
 
        r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC);
        if (r_skb == NULL)
                return -ENOMEM;
 
-       if (build_sadinfo(r_skb, net, spid, seq, *flags) < 0)
+       if (build_sadinfo(r_skb, net, sportid, seq, *flags) < 0)
                BUG();
 
-       return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid);
+       return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid);
 }
 
 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -1056,7 +1056,7 @@ static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
        if (IS_ERR(resp_skb)) {
                err = PTR_ERR(resp_skb);
        } else {
-               err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid);
+               err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
        }
        xfrm_state_put(x);
 out_noput:
@@ -1137,7 +1137,7 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
                goto out;
        }
 
-       err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid);
+       err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
 
 out:
        xfrm_state_put(x);
@@ -1425,7 +1425,7 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        c.event = nlh->nlmsg_type;
        c.seq = nlh->nlmsg_seq;
-       c.pid = nlh->nlmsg_pid;
+       c.portid = nlh->nlmsg_pid;
        km_policy_notify(xp, p->dir, &c);
 
        xfrm_pol_put(xp);
@@ -1511,7 +1511,7 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
        struct nlmsghdr *nlh;
        int err;
 
-       nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
+       nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq,
                        XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
        if (nlh == NULL)
                return -EMSGSIZE;
@@ -1648,7 +1648,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
                        err = PTR_ERR(resp_skb);
                } else {
                        err = nlmsg_unicast(net->xfrm.nlsk, resp_skb,
-                                           NETLINK_CB(skb).pid);
+                                           NETLINK_CB(skb).portid);
                }
        } else {
                kuid_t loginuid = audit_get_loginuid(current);
@@ -1665,7 +1665,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
                c.data.byid = p->index;
                c.event = nlh->nlmsg_type;
                c.seq = nlh->nlmsg_seq;
-               c.pid = nlh->nlmsg_pid;
+               c.portid = nlh->nlmsg_pid;
                km_policy_notify(xp, p->dir, &c);
        }
 
@@ -1695,7 +1695,7 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
        c.data.proto = p->proto;
        c.event = nlh->nlmsg_type;
        c.seq = nlh->nlmsg_seq;
-       c.pid = nlh->nlmsg_pid;
+       c.portid = nlh->nlmsg_pid;
        c.net = net;
        km_state_notify(NULL, &c);
 
@@ -1722,7 +1722,7 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct
        struct nlmsghdr *nlh;
        int err;
 
-       nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
+       nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -1804,11 +1804,11 @@ static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
        spin_lock_bh(&x->lock);
        c.data.aevent = p->flags;
        c.seq = nlh->nlmsg_seq;
-       c.pid = nlh->nlmsg_pid;
+       c.portid = nlh->nlmsg_pid;
 
        if (build_aevent(r_skb, x, &c) < 0)
                BUG();
-       err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).pid);
+       err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).portid);
        spin_unlock_bh(&x->lock);
        xfrm_state_put(x);
        return err;
@@ -1854,7 +1854,7 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        c.event = nlh->nlmsg_type;
        c.seq = nlh->nlmsg_seq;
-       c.pid = nlh->nlmsg_pid;
+       c.portid = nlh->nlmsg_pid;
        c.data.aevent = XFRM_AE_CU;
        km_state_notify(x, &c);
        err = 0;
@@ -1889,7 +1889,7 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
        c.data.type = type;
        c.event = nlh->nlmsg_type;
        c.seq = nlh->nlmsg_seq;
-       c.pid = nlh->nlmsg_pid;
+       c.portid = nlh->nlmsg_pid;
        c.net = net;
        km_policy_notify(NULL, 0, &c);
        return 0;
@@ -1957,7 +1957,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
                // reset the timers here?
                WARN(1, "Dont know what to do with soft policy expire\n");
        }
-       km_policy_expired(xp, p->dir, up->hard, current->pid);
+       km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid);
 
 out:
        xfrm_pol_put(xp);
@@ -1985,7 +1985,7 @@ static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
        err = -EINVAL;
        if (x->km.state != XFRM_STATE_VALID)
                goto out;
-       km_state_expired(x, ue->hard, current->pid);
+       km_state_expired(x, ue->hard, nlh->nlmsg_pid);
 
        if (ue->hard) {
                kuid_t loginuid = audit_get_loginuid(current);
@@ -2397,7 +2397,7 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct
        struct nlmsghdr *nlh;
        int err;
 
-       nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
+       nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -2456,7 +2456,7 @@ static int xfrm_notify_sa_flush(const struct km_event *c)
        if (skb == NULL)
                return -ENOMEM;
 
-       nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0);
+       nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0);
        if (nlh == NULL) {
                kfree_skb(skb);
                return -EMSGSIZE;
@@ -2524,7 +2524,7 @@ static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
        if (skb == NULL)
                return -ENOMEM;
 
-       nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
+       nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0);
        err = -EMSGSIZE;
        if (nlh == NULL)
                goto out_free_skb;
@@ -2594,8 +2594,7 @@ static inline size_t xfrm_acquire_msgsize(struct xfrm_state *x,
 }
 
 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
-                        struct xfrm_tmpl *xt, struct xfrm_policy *xp,
-                        int dir)
+                        struct xfrm_tmpl *xt, struct xfrm_policy *xp)
 {
        __u32 seq = xfrm_get_acqseq();
        struct xfrm_user_acquire *ua;
@@ -2610,7 +2609,7 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
        memcpy(&ua->id, &x->id, sizeof(ua->id));
        memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
        memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
-       copy_to_user_policy(xp, &ua->policy, dir);
+       copy_to_user_policy(xp, &ua->policy, XFRM_POLICY_OUT);
        ua->aalgos = xt->aalgos;
        ua->ealgos = xt->ealgos;
        ua->calgos = xt->calgos;
@@ -2632,7 +2631,7 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
 }
 
 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
-                            struct xfrm_policy *xp, int dir)
+                            struct xfrm_policy *xp)
 {
        struct net *net = xs_net(x);
        struct sk_buff *skb;
@@ -2641,7 +2640,7 @@ static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
        if (skb == NULL)
                return -ENOMEM;
 
-       if (build_acquire(skb, x, xt, xp, dir) < 0)
+       if (build_acquire(skb, x, xt, xp) < 0)
                BUG();
 
        return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC);
@@ -2724,7 +2723,7 @@ static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
        struct nlmsghdr *nlh;
        int err;
 
-       nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
+       nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -2784,7 +2783,7 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_e
        if (skb == NULL)
                return -ENOMEM;
 
-       nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
+       nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0);
        err = -EMSGSIZE;
        if (nlh == NULL)
                goto out_free_skb;
@@ -2838,7 +2837,7 @@ static int xfrm_notify_policy_flush(const struct km_event *c)
        if (skb == NULL)
                return -ENOMEM;
 
-       nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
+       nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
        err = -EMSGSIZE;
        if (nlh == NULL)
                goto out_free_skb;
@@ -2991,7 +2990,7 @@ static int __net_init xfrm_user_net_init(struct net *net)
                .input  = xfrm_netlink_rcv,
        };
 
-       nlsk = netlink_kernel_create(net, NETLINK_XFRM, THIS_MODULE, &cfg);
+       nlsk = netlink_kernel_create(net, NETLINK_XFRM, &cfg);
        if (nlsk == NULL)
                return -ENOMEM;
        net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
index 8a77725423e0848e671a1f5bdb021fa414de6059..14d810ead42078482807666a41902e4cb24161c0 100644 (file)
@@ -113,13 +113,12 @@ static int __init selnl_init(void)
 {
        struct netlink_kernel_cfg cfg = {
                .groups = SELNLGRP_MAX,
+               .flags  = NL_CFG_F_NONROOT_RECV,
        };
 
-       selnl = netlink_kernel_create(&init_net, NETLINK_SELINUX,
-                                     THIS_MODULE, &cfg);
+       selnl = netlink_kernel_create(&init_net, NETLINK_SELINUX, &cfg);
        if (selnl == NULL)
                panic("SELinux:  Cannot create netlink socket.");
-       netlink_set_nonroot(NETLINK_SELINUX, NL_NONROOT_RECV);
        return 0;
 }